quick_trampoline_entrypoints.cc revision 575d3e60c68b5cf481b615dde4a16283507b19ed
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "art_method-inl.h" 18#include "base/enums.h" 19#include "callee_save_frame.h" 20#include "common_throws.h" 21#include "dex_file-inl.h" 22#include "dex_instruction-inl.h" 23#include "entrypoints/entrypoint_utils-inl.h" 24#include "entrypoints/runtime_asm_entrypoints.h" 25#include "gc/accounting/card_table-inl.h" 26#include "imt_conflict_table.h" 27#include "imtable-inl.h" 28#include "interpreter/interpreter.h" 29#include "linear_alloc.h" 30#include "method_handles.h" 31#include "method_reference.h" 32#include "mirror/class-inl.h" 33#include "mirror/dex_cache-inl.h" 34#include "mirror/method.h" 35#include "mirror/method_handle_impl.h" 36#include "mirror/object-inl.h" 37#include "mirror/object_array-inl.h" 38#include "oat_quick_method_header.h" 39#include "quick_exception_handler.h" 40#include "runtime.h" 41#include "scoped_thread_state_change-inl.h" 42#include "stack.h" 43#include "debugger.h" 44#include "well_known_classes.h" 45 46namespace art { 47 48// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. 49class QuickArgumentVisitor { 50 // Number of bytes for each out register in the caller method's frame. 51 static constexpr size_t kBytesStackArgLocation = 4; 52 // Frame size in bytes of a callee-save frame for RefsAndArgs. 53 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 54 GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs); 55#if defined(__arm__) 56 // The callee save frame is pointed to by SP. 57 // | argN | | 58 // | ... | | 59 // | arg4 | | 60 // | arg3 spill | | Caller's frame 61 // | arg2 spill | | 62 // | arg1 spill | | 63 // | Method* | --- 64 // | LR | 65 // | ... | 4x6 bytes callee saves 66 // | R3 | 67 // | R2 | 68 // | R1 | 69 // | S15 | 70 // | : | 71 // | S0 | 72 // | | 4x2 bytes padding 73 // | Method* | <- sp 74 static constexpr bool kSplitPairAcrossRegisterAndStack = kArm32QuickCodeUseSoftFloat; 75 static constexpr bool kAlignPairRegister = !kArm32QuickCodeUseSoftFloat; 76 static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat; 77 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat; 78 static constexpr bool kQuickSkipOddFpRegisters = false; 79 static constexpr size_t kNumQuickGprArgs = 3; 80 static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16; 81 static constexpr bool kGprFprLockstep = false; 82 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 83 arm::ArmCalleeSaveFpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first FPR arg. 84 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 85 arm::ArmCalleeSaveGpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first GPR arg. 86 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 87 arm::ArmCalleeSaveLrOffset(Runtime::kSaveRefsAndArgs); // Offset of return address. 88 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 89 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 90 } 91#elif defined(__aarch64__) 92 // The callee save frame is pointed to by SP. 93 // | argN | | 94 // | ... | | 95 // | arg4 | | 96 // | arg3 spill | | Caller's frame 97 // | arg2 spill | | 98 // | arg1 spill | | 99 // | Method* | --- 100 // | LR | 101 // | X29 | 102 // | : | 103 // | X20 | 104 // | X7 | 105 // | : | 106 // | X1 | 107 // | D7 | 108 // | : | 109 // | D0 | 110 // | | padding 111 // | Method* | <- sp 112 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 113 static constexpr bool kAlignPairRegister = false; 114 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 115 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 116 static constexpr bool kQuickSkipOddFpRegisters = false; 117 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 118 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 119 static constexpr bool kGprFprLockstep = false; 120 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 121 arm64::Arm64CalleeSaveFpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first FPR arg. 122 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 123 arm64::Arm64CalleeSaveGpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first GPR arg. 124 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 125 arm64::Arm64CalleeSaveLrOffset(Runtime::kSaveRefsAndArgs); // Offset of return address. 126 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 127 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 128 } 129#elif defined(__mips__) && !defined(__LP64__) 130 // The callee save frame is pointed to by SP. 131 // | argN | | 132 // | ... | | 133 // | arg4 | | 134 // | arg3 spill | | Caller's frame 135 // | arg2 spill | | 136 // | arg1 spill | | 137 // | Method* | --- 138 // | RA | 139 // | ... | callee saves 140 // | T1 | arg5 141 // | T0 | arg4 142 // | A3 | arg3 143 // | A2 | arg2 144 // | A1 | arg1 145 // | F19 | 146 // | F18 | f_arg5 147 // | F17 | 148 // | F16 | f_arg4 149 // | F15 | 150 // | F14 | f_arg3 151 // | F13 | 152 // | F12 | f_arg2 153 // | F11 | 154 // | F10 | f_arg1 155 // | F9 | 156 // | F8 | f_arg0 157 // | | padding 158 // | A0/Method* | <- sp 159 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 160 static constexpr bool kAlignPairRegister = true; 161 static constexpr bool kQuickSoftFloatAbi = false; 162 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 163 static constexpr bool kQuickSkipOddFpRegisters = true; 164 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 165 static constexpr size_t kNumQuickFprArgs = 12; // 6 arguments passed in FPRs. Floats can be 166 // passed only in even numbered registers and each 167 // double occupies two registers. 168 static constexpr bool kGprFprLockstep = false; 169 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 8; // Offset of first FPR arg. 170 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 56; // Offset of first GPR arg. 171 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 108; // Offset of return address. 172 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 173 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 174 } 175#elif defined(__mips__) && defined(__LP64__) 176 // The callee save frame is pointed to by SP. 177 // | argN | | 178 // | ... | | 179 // | arg4 | | 180 // | arg3 spill | | Caller's frame 181 // | arg2 spill | | 182 // | arg1 spill | | 183 // | Method* | --- 184 // | RA | 185 // | ... | callee saves 186 // | A7 | arg7 187 // | A6 | arg6 188 // | A5 | arg5 189 // | A4 | arg4 190 // | A3 | arg3 191 // | A2 | arg2 192 // | A1 | arg1 193 // | F19 | f_arg7 194 // | F18 | f_arg6 195 // | F17 | f_arg5 196 // | F16 | f_arg4 197 // | F15 | f_arg3 198 // | F14 | f_arg2 199 // | F13 | f_arg1 200 // | F12 | f_arg0 201 // | | padding 202 // | A0/Method* | <- sp 203 // NOTE: for Mip64, when A0 is skipped, F12 is also skipped. 204 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 205 static constexpr bool kAlignPairRegister = false; 206 static constexpr bool kQuickSoftFloatAbi = false; 207 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 208 static constexpr bool kQuickSkipOddFpRegisters = false; 209 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 210 static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs. 211 static constexpr bool kGprFprLockstep = true; 212 213 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F13). 214 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1). 215 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address. 216 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 217 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 218 } 219#elif defined(__i386__) 220 // The callee save frame is pointed to by SP. 221 // | argN | | 222 // | ... | | 223 // | arg4 | | 224 // | arg3 spill | | Caller's frame 225 // | arg2 spill | | 226 // | arg1 spill | | 227 // | Method* | --- 228 // | Return | 229 // | EBP,ESI,EDI | callee saves 230 // | EBX | arg3 231 // | EDX | arg2 232 // | ECX | arg1 233 // | XMM3 | float arg 4 234 // | XMM2 | float arg 3 235 // | XMM1 | float arg 2 236 // | XMM0 | float arg 1 237 // | EAX/Method* | <- sp 238 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 239 static constexpr bool kAlignPairRegister = false; 240 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 241 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 242 static constexpr bool kQuickSkipOddFpRegisters = false; 243 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 244 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs. 245 static constexpr bool kGprFprLockstep = false; 246 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4; // Offset of first FPR arg. 247 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8; // Offset of first GPR arg. 248 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8; // Offset of return address. 249 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 250 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 251 } 252#elif defined(__x86_64__) 253 // The callee save frame is pointed to by SP. 254 // | argN | | 255 // | ... | | 256 // | reg. arg spills | | Caller's frame 257 // | Method* | --- 258 // | Return | 259 // | R15 | callee save 260 // | R14 | callee save 261 // | R13 | callee save 262 // | R12 | callee save 263 // | R9 | arg5 264 // | R8 | arg4 265 // | RSI/R6 | arg1 266 // | RBP/R5 | callee save 267 // | RBX/R3 | callee save 268 // | RDX/R2 | arg2 269 // | RCX/R1 | arg3 270 // | XMM7 | float arg 8 271 // | XMM6 | float arg 7 272 // | XMM5 | float arg 6 273 // | XMM4 | float arg 5 274 // | XMM3 | float arg 4 275 // | XMM2 | float arg 3 276 // | XMM1 | float arg 2 277 // | XMM0 | float arg 1 278 // | Padding | 279 // | RDI/Method* | <- sp 280 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 281 static constexpr bool kAlignPairRegister = false; 282 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 283 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 284 static constexpr bool kQuickSkipOddFpRegisters = false; 285 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 286 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 287 static constexpr bool kGprFprLockstep = false; 288 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 289 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg. 290 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address. 291 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 292 switch (gpr_index) { 293 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 294 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 295 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 296 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 297 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 298 default: 299 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 300 return 0; 301 } 302 } 303#else 304#error "Unsupported architecture" 305#endif 306 307 public: 308 // Special handling for proxy methods. Proxy methods are instance methods so the 309 // 'this' object is the 1st argument. They also have the same frame layout as the 310 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the 311 // 1st GPR. 312 static mirror::Object* GetProxyThisObject(ArtMethod** sp) 313 REQUIRES_SHARED(Locks::mutator_lock_) { 314 CHECK((*sp)->IsProxyMethod()); 315 CHECK_GT(kNumQuickGprArgs, 0u); 316 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. 317 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + 318 GprIndexToGprOffset(kThisGprIndex); 319 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset; 320 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr(); 321 } 322 323 static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 324 DCHECK((*sp)->IsCalleeSaveMethod()); 325 return GetCalleeSaveMethodCaller(sp, Runtime::kSaveRefsAndArgs); 326 } 327 328 static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 329 DCHECK((*sp)->IsCalleeSaveMethod()); 330 uint8_t* previous_sp = 331 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 332 return *reinterpret_cast<ArtMethod**>(previous_sp); 333 } 334 335 static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 336 DCHECK((*sp)->IsCalleeSaveMethod()); 337 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs); 338 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 339 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 340 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 341 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 342 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 343 344 if (current_code->IsOptimized()) { 345 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 346 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 347 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding); 348 DCHECK(stack_map.IsValid()); 349 if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) { 350 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); 351 return inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 352 inline_info.GetDepth(encoding.inline_info.encoding)-1); 353 } else { 354 return stack_map.GetDexPc(encoding.stack_map.encoding); 355 } 356 } else { 357 return current_code->ToDexPc(*caller_sp, outer_pc); 358 } 359 } 360 361 // For the given quick ref and args quick frame, return the caller's PC. 362 static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 363 DCHECK((*sp)->IsCalleeSaveMethod()); 364 uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 365 return *reinterpret_cast<uintptr_t*>(lr); 366 } 367 368 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 369 uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) : 370 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 371 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 372 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 373 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 374 + sizeof(ArtMethod*)), // Skip ArtMethod*. 375 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), 376 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { 377 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), 378 "Number of Quick FPR arguments unexpected"); 379 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), 380 "Double alignment unexpected"); 381 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the 382 // next register is even. 383 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, 384 "Number of Quick FPR arguments not even"); 385 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 386 } 387 388 virtual ~QuickArgumentVisitor() {} 389 390 virtual void Visit() = 0; 391 392 Primitive::Type GetParamPrimitiveType() const { 393 return cur_type_; 394 } 395 396 uint8_t* GetParamAddress() const { 397 if (!kQuickSoftFloatAbi) { 398 Primitive::Type type = GetParamPrimitiveType(); 399 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 400 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) { 401 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 402 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 403 } 404 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 405 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 406 } 407 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 408 } 409 } 410 if (gpr_index_ < kNumQuickGprArgs) { 411 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 412 } 413 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 414 } 415 416 bool IsSplitLongOrDouble() const { 417 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || 418 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 419 return is_split_long_or_double_; 420 } else { 421 return false; // An optimization for when GPR and FPRs are 64bit. 422 } 423 } 424 425 bool IsParamAReference() const { 426 return GetParamPrimitiveType() == Primitive::kPrimNot; 427 } 428 429 bool IsParamALongOrDouble() const { 430 Primitive::Type type = GetParamPrimitiveType(); 431 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 432 } 433 434 uint64_t ReadSplitLongParam() const { 435 // The splitted long is always available through the stack. 436 return *reinterpret_cast<uint64_t*>(stack_args_ 437 + stack_index_ * kBytesStackArgLocation); 438 } 439 440 void IncGprIndex() { 441 gpr_index_++; 442 if (kGprFprLockstep) { 443 fpr_index_++; 444 } 445 } 446 447 void IncFprIndex() { 448 fpr_index_++; 449 if (kGprFprLockstep) { 450 gpr_index_++; 451 } 452 } 453 454 void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) { 455 // (a) 'stack_args_' should point to the first method's argument 456 // (b) whatever the argument type it is, the 'stack_index_' should 457 // be moved forward along with every visiting. 458 gpr_index_ = 0; 459 fpr_index_ = 0; 460 if (kQuickDoubleRegAlignedFloatBackFilled) { 461 fpr_double_index_ = 0; 462 } 463 stack_index_ = 0; 464 if (!is_static_) { // Handle this. 465 cur_type_ = Primitive::kPrimNot; 466 is_split_long_or_double_ = false; 467 Visit(); 468 stack_index_++; 469 if (kNumQuickGprArgs > 0) { 470 IncGprIndex(); 471 } 472 } 473 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 474 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 475 switch (cur_type_) { 476 case Primitive::kPrimNot: 477 case Primitive::kPrimBoolean: 478 case Primitive::kPrimByte: 479 case Primitive::kPrimChar: 480 case Primitive::kPrimShort: 481 case Primitive::kPrimInt: 482 is_split_long_or_double_ = false; 483 Visit(); 484 stack_index_++; 485 if (gpr_index_ < kNumQuickGprArgs) { 486 IncGprIndex(); 487 } 488 break; 489 case Primitive::kPrimFloat: 490 is_split_long_or_double_ = false; 491 Visit(); 492 stack_index_++; 493 if (kQuickSoftFloatAbi) { 494 if (gpr_index_ < kNumQuickGprArgs) { 495 IncGprIndex(); 496 } 497 } else { 498 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 499 IncFprIndex(); 500 if (kQuickDoubleRegAlignedFloatBackFilled) { 501 // Double should not overlap with float. 502 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4. 503 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2)); 504 // Float should not overlap with double. 505 if (fpr_index_ % 2 == 0) { 506 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 507 } 508 } else if (kQuickSkipOddFpRegisters) { 509 IncFprIndex(); 510 } 511 } 512 } 513 break; 514 case Primitive::kPrimDouble: 515 case Primitive::kPrimLong: 516 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 517 if (cur_type_ == Primitive::kPrimLong && 518#if defined(__mips__) && !defined(__LP64__) 519 (gpr_index_ == 0 || gpr_index_ == 2) && 520#else 521 gpr_index_ == 0 && 522#endif 523 kAlignPairRegister) { 524 // Currently, this is only for ARM and MIPS, where we align long parameters with 525 // even-numbered registers by skipping R1 (on ARM) or A1(A3) (on MIPS) and using 526 // R2 (on ARM) or A2(T0) (on MIPS) instead. 527 IncGprIndex(); 528 } 529 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 530 ((gpr_index_ + 1) == kNumQuickGprArgs); 531 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) { 532 // We don't want to split this. Pass over this register. 533 gpr_index_++; 534 is_split_long_or_double_ = false; 535 } 536 Visit(); 537 if (kBytesStackArgLocation == 4) { 538 stack_index_+= 2; 539 } else { 540 CHECK_EQ(kBytesStackArgLocation, 8U); 541 stack_index_++; 542 } 543 if (gpr_index_ < kNumQuickGprArgs) { 544 IncGprIndex(); 545 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 546 if (gpr_index_ < kNumQuickGprArgs) { 547 IncGprIndex(); 548 } 549 } 550 } 551 } else { 552 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 553 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled; 554 Visit(); 555 if (kBytesStackArgLocation == 4) { 556 stack_index_+= 2; 557 } else { 558 CHECK_EQ(kBytesStackArgLocation, 8U); 559 stack_index_++; 560 } 561 if (kQuickDoubleRegAlignedFloatBackFilled) { 562 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 563 fpr_double_index_ += 2; 564 // Float should not overlap with double. 565 if (fpr_index_ % 2 == 0) { 566 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 567 } 568 } 569 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 570 IncFprIndex(); 571 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 572 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 573 IncFprIndex(); 574 } 575 } 576 } 577 } 578 break; 579 default: 580 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 581 } 582 } 583 } 584 585 protected: 586 const bool is_static_; 587 const char* const shorty_; 588 const uint32_t shorty_len_; 589 590 private: 591 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame. 592 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame. 593 uint8_t* const stack_args_; // Address of stack arguments in caller's frame. 594 uint32_t gpr_index_; // Index into spilled GPRs. 595 // Index into spilled FPRs. 596 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_ 597 // holds a higher register number. 598 uint32_t fpr_index_; 599 // Index into spilled FPRs for aligned double. 600 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in 601 // terms of singles, may be behind fpr_index. 602 uint32_t fpr_double_index_; 603 uint32_t stack_index_; // Index into arguments on the stack. 604 // The current type of argument during VisitArguments. 605 Primitive::Type cur_type_; 606 // Does a 64bit parameter straddle the register and stack arguments? 607 bool is_split_long_or_double_; 608}; 609 610// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It 611// allows to use the QuickArgumentVisitor constants without moving all the code in its own module. 612extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 613 REQUIRES_SHARED(Locks::mutator_lock_) { 614 return QuickArgumentVisitor::GetProxyThisObject(sp); 615} 616 617// Visits arguments on the stack placing them into the shadow frame. 618class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 619 public: 620 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty, 621 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 622 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 623 624 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 625 626 private: 627 ShadowFrame* const sf_; 628 uint32_t cur_reg_; 629 630 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 631}; 632 633void BuildQuickShadowFrameVisitor::Visit() { 634 Primitive::Type type = GetParamPrimitiveType(); 635 switch (type) { 636 case Primitive::kPrimLong: // Fall-through. 637 case Primitive::kPrimDouble: 638 if (IsSplitLongOrDouble()) { 639 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 640 } else { 641 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 642 } 643 ++cur_reg_; 644 break; 645 case Primitive::kPrimNot: { 646 StackReference<mirror::Object>* stack_ref = 647 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 648 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 649 } 650 break; 651 case Primitive::kPrimBoolean: // Fall-through. 652 case Primitive::kPrimByte: // Fall-through. 653 case Primitive::kPrimChar: // Fall-through. 654 case Primitive::kPrimShort: // Fall-through. 655 case Primitive::kPrimInt: // Fall-through. 656 case Primitive::kPrimFloat: 657 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 658 break; 659 case Primitive::kPrimVoid: 660 LOG(FATAL) << "UNREACHABLE"; 661 UNREACHABLE(); 662 } 663 ++cur_reg_; 664} 665 666extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) 667 REQUIRES_SHARED(Locks::mutator_lock_) { 668 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 669 // frame. 670 ScopedQuickEntrypointChecks sqec(self); 671 672 if (UNLIKELY(!method->IsInvokable())) { 673 method->ThrowInvocationTimeError(); 674 return 0; 675 } 676 677 JValue tmp_value; 678 ShadowFrame* deopt_frame = self->PopStackedShadowFrame( 679 StackedShadowFrameType::kDeoptimizationShadowFrame, false); 680 ManagedStack fragment; 681 682 DCHECK(!method->IsNative()) << method->PrettyMethod(); 683 uint32_t shorty_len = 0; 684 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 685 const DexFile::CodeItem* code_item = non_proxy_method->GetCodeItem(); 686 DCHECK(code_item != nullptr) << method->PrettyMethod(); 687 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 688 689 JValue result; 690 691 if (deopt_frame != nullptr) { 692 // Coming from partial-fragment deopt. 693 694 if (kIsDebugBuild) { 695 // Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom 696 // of the call-stack) corresponds to the called method. 697 ShadowFrame* linked = deopt_frame; 698 while (linked->GetLink() != nullptr) { 699 linked = linked->GetLink(); 700 } 701 CHECK_EQ(method, linked->GetMethod()) << method->PrettyMethod() << " " 702 << ArtMethod::PrettyMethod(linked->GetMethod()); 703 } 704 705 if (VLOG_IS_ON(deopt)) { 706 // Print out the stack to verify that it was a partial-fragment deopt. 707 LOG(INFO) << "Continue-ing from deopt. Stack is:"; 708 QuickExceptionHandler::DumpFramesWithType(self, true); 709 } 710 711 ObjPtr<mirror::Throwable> pending_exception; 712 bool from_code = false; 713 self->PopDeoptimizationContext(&result, &pending_exception, /* out */ &from_code); 714 715 // Push a transition back into managed code onto the linked list in thread. 716 self->PushManagedStackFragment(&fragment); 717 718 // Ensure that the stack is still in order. 719 if (kIsDebugBuild) { 720 class DummyStackVisitor : public StackVisitor { 721 public: 722 explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_) 723 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} 724 725 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 726 // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking 727 // logic. Just always say we want to continue. 728 return true; 729 } 730 }; 731 DummyStackVisitor dsv(self); 732 dsv.WalkStack(); 733 } 734 735 // Restore the exception that was pending before deoptimization then interpret the 736 // deoptimized frames. 737 if (pending_exception != nullptr) { 738 self->SetException(pending_exception); 739 } 740 interpreter::EnterInterpreterFromDeoptimize(self, deopt_frame, from_code, &result); 741 } else { 742 const char* old_cause = self->StartAssertNoThreadSuspension( 743 "Building interpreter shadow frame"); 744 uint16_t num_regs = code_item->registers_size_; 745 // No last shadow coming from quick. 746 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 747 CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0); 748 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 749 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 750 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 751 shadow_frame, first_arg_reg); 752 shadow_frame_builder.VisitArguments(); 753 const bool needs_initialization = 754 method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); 755 // Push a transition back into managed code onto the linked list in thread. 756 self->PushManagedStackFragment(&fragment); 757 self->PushShadowFrame(shadow_frame); 758 self->EndAssertNoThreadSuspension(old_cause); 759 760 if (needs_initialization) { 761 // Ensure static method's class is initialized. 762 StackHandleScope<1> hs(self); 763 Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass())); 764 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { 765 DCHECK(Thread::Current()->IsExceptionPending()) 766 << shadow_frame->GetMethod()->PrettyMethod(); 767 self->PopManagedStackFragment(fragment); 768 return 0; 769 } 770 } 771 772 result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame); 773 } 774 775 // Pop transition. 776 self->PopManagedStackFragment(fragment); 777 778 // Request a stack deoptimization if needed 779 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 780 uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp); 781 // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization 782 // should be done and it knows the real return pc. 783 if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) && 784 Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) { 785 if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) { 786 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " 787 << caller->PrettyMethod(); 788 } else { 789 // Push the context of the deoptimization stack so we can restore the return value and the 790 // exception before executing the deoptimized frames. 791 self->PushDeoptimizationContext( 792 result, shorty[0] == 'L', /* from_code */ false, self->GetException()); 793 794 // Set special exception to cause deoptimization. 795 self->SetException(Thread::GetDeoptimizationException()); 796 } 797 } 798 799 // No need to restore the args since the method has already been run by the interpreter. 800 return result.GetJ(); 801} 802 803// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 804// to jobjects. 805class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 806 public: 807 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len, 808 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 809 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 810 811 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 812 813 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_); 814 815 private: 816 ScopedObjectAccessUnchecked* const soa_; 817 std::vector<jvalue>* const args_; 818 // References which we must update when exiting in case the GC moved the objects. 819 std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_; 820 821 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 822}; 823 824void BuildQuickArgumentVisitor::Visit() { 825 jvalue val; 826 Primitive::Type type = GetParamPrimitiveType(); 827 switch (type) { 828 case Primitive::kPrimNot: { 829 StackReference<mirror::Object>* stack_ref = 830 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 831 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 832 references_.push_back(std::make_pair(val.l, stack_ref)); 833 break; 834 } 835 case Primitive::kPrimLong: // Fall-through. 836 case Primitive::kPrimDouble: 837 if (IsSplitLongOrDouble()) { 838 val.j = ReadSplitLongParam(); 839 } else { 840 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 841 } 842 break; 843 case Primitive::kPrimBoolean: // Fall-through. 844 case Primitive::kPrimByte: // Fall-through. 845 case Primitive::kPrimChar: // Fall-through. 846 case Primitive::kPrimShort: // Fall-through. 847 case Primitive::kPrimInt: // Fall-through. 848 case Primitive::kPrimFloat: 849 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 850 break; 851 case Primitive::kPrimVoid: 852 LOG(FATAL) << "UNREACHABLE"; 853 UNREACHABLE(); 854 } 855 args_->push_back(val); 856} 857 858void BuildQuickArgumentVisitor::FixupReferences() { 859 // Fixup any references which may have changed. 860 for (const auto& pair : references_) { 861 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first)); 862 soa_->Env()->DeleteLocalRef(pair.first); 863 } 864} 865 866// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 867// which is responsible for recording callee save registers. We explicitly place into jobjects the 868// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 869// field within the proxy object, which will box the primitive arguments and deal with error cases. 870extern "C" uint64_t artQuickProxyInvokeHandler( 871 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) 872 REQUIRES_SHARED(Locks::mutator_lock_) { 873 DCHECK(proxy_method->IsProxyMethod()) << proxy_method->PrettyMethod(); 874 DCHECK(receiver->GetClass()->IsProxyClass()) << proxy_method->PrettyMethod(); 875 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 876 const char* old_cause = 877 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 878 // Register the top of the managed stack, making stack crawlable. 879 DCHECK_EQ((*sp), proxy_method) << proxy_method->PrettyMethod(); 880 self->VerifyStack(); 881 // Start new JNI local reference state. 882 JNIEnvExt* env = self->GetJniEnv(); 883 ScopedObjectAccessUnchecked soa(env); 884 ScopedJniEnvLocalRefState env_state(env); 885 // Create local ref. copies of proxy method and the receiver. 886 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 887 888 // Placing arguments into args vector and remove the receiver. 889 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 890 CHECK(!non_proxy_method->IsStatic()) << proxy_method->PrettyMethod() << " " 891 << non_proxy_method->PrettyMethod(); 892 std::vector<jvalue> args; 893 uint32_t shorty_len = 0; 894 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 895 BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); 896 897 local_ref_visitor.VisitArguments(); 898 DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod(); 899 args.erase(args.begin()); 900 901 // Convert proxy method into expected interface method. 902 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize); 903 DCHECK(interface_method != nullptr) << proxy_method->PrettyMethod(); 904 DCHECK(!interface_method->IsProxyMethod()) << interface_method->PrettyMethod(); 905 self->EndAssertNoThreadSuspension(old_cause); 906 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 907 DCHECK(!Runtime::Current()->IsActiveTransaction()); 908 jobject interface_method_jobj = soa.AddLocalReference<jobject>( 909 mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), 910 interface_method)); 911 912 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 913 // that performs allocations. 914 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 915 // Restore references which might have moved. 916 local_ref_visitor.FixupReferences(); 917 return result.GetJ(); 918} 919 920// Read object references held in arguments from quick frames and place in a JNI local references, 921// so they don't get garbage collected. 922class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 923 public: 924 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 925 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 926 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 927 928 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 929 930 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_); 931 932 private: 933 ScopedObjectAccessUnchecked* const soa_; 934 // References which we must update when exiting in case the GC moved the objects. 935 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 936 937 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 938}; 939 940void RememberForGcArgumentVisitor::Visit() { 941 if (IsParamAReference()) { 942 StackReference<mirror::Object>* stack_ref = 943 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 944 jobject reference = 945 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 946 references_.push_back(std::make_pair(reference, stack_ref)); 947 } 948} 949 950void RememberForGcArgumentVisitor::FixupReferences() { 951 // Fixup any references which may have changed. 952 for (const auto& pair : references_) { 953 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first)); 954 soa_->Env()->DeleteLocalRef(pair.first); 955 } 956} 957 958// Lazily resolve a method for quick. Called by stub code. 959extern "C" const void* artQuickResolutionTrampoline( 960 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) 961 REQUIRES_SHARED(Locks::mutator_lock_) { 962 // The resolution trampoline stashes the resolved method into the callee-save frame to transport 963 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely 964 // does not have the same stack layout as the callee-save method). 965 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 966 // Start new JNI local reference state 967 JNIEnvExt* env = self->GetJniEnv(); 968 ScopedObjectAccessUnchecked soa(env); 969 ScopedJniEnvLocalRefState env_state(env); 970 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 971 972 // Compute details about the called method (avoid GCs) 973 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 974 InvokeType invoke_type; 975 MethodReference called_method(nullptr, 0); 976 const bool called_method_known_on_entry = !called->IsRuntimeMethod(); 977 ArtMethod* caller = nullptr; 978 if (!called_method_known_on_entry) { 979 caller = QuickArgumentVisitor::GetCallingMethod(sp); 980 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 981 const DexFile::CodeItem* code; 982 called_method.dex_file = caller->GetDexFile(); 983 code = caller->GetCodeItem(); 984 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 985 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 986 Instruction::Code instr_code = instr->Opcode(); 987 bool is_range; 988 switch (instr_code) { 989 case Instruction::INVOKE_DIRECT: 990 invoke_type = kDirect; 991 is_range = false; 992 break; 993 case Instruction::INVOKE_DIRECT_RANGE: 994 invoke_type = kDirect; 995 is_range = true; 996 break; 997 case Instruction::INVOKE_STATIC: 998 invoke_type = kStatic; 999 is_range = false; 1000 break; 1001 case Instruction::INVOKE_STATIC_RANGE: 1002 invoke_type = kStatic; 1003 is_range = true; 1004 break; 1005 case Instruction::INVOKE_SUPER: 1006 invoke_type = kSuper; 1007 is_range = false; 1008 break; 1009 case Instruction::INVOKE_SUPER_RANGE: 1010 invoke_type = kSuper; 1011 is_range = true; 1012 break; 1013 case Instruction::INVOKE_VIRTUAL: 1014 invoke_type = kVirtual; 1015 is_range = false; 1016 break; 1017 case Instruction::INVOKE_VIRTUAL_RANGE: 1018 invoke_type = kVirtual; 1019 is_range = true; 1020 break; 1021 case Instruction::INVOKE_INTERFACE: 1022 invoke_type = kInterface; 1023 is_range = false; 1024 break; 1025 case Instruction::INVOKE_INTERFACE_RANGE: 1026 invoke_type = kInterface; 1027 is_range = true; 1028 break; 1029 default: 1030 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(nullptr); 1031 UNREACHABLE(); 1032 } 1033 called_method.dex_method_index = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); 1034 } else { 1035 invoke_type = kStatic; 1036 called_method.dex_file = called->GetDexFile(); 1037 called_method.dex_method_index = called->GetDexMethodIndex(); 1038 } 1039 uint32_t shorty_len; 1040 const char* shorty = 1041 called_method.dex_file->GetMethodShorty( 1042 called_method.dex_file->GetMethodId(called_method.dex_method_index), &shorty_len); 1043 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 1044 visitor.VisitArguments(); 1045 self->EndAssertNoThreadSuspension(old_cause); 1046 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 1047 // Resolve method filling in dex cache. 1048 if (!called_method_known_on_entry) { 1049 StackHandleScope<1> hs(self); 1050 mirror::Object* dummy = nullptr; 1051 HandleWrapper<mirror::Object> h_receiver( 1052 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 1053 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1054 called = linker->ResolveMethod<ClassLinker::kForceICCECheck>( 1055 self, called_method.dex_method_index, caller, invoke_type); 1056 } 1057 const void* code = nullptr; 1058 if (LIKELY(!self->IsExceptionPending())) { 1059 // Incompatible class change should have been handled in resolve method. 1060 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 1061 << called->PrettyMethod() << " " << invoke_type; 1062 if (virtual_or_interface || invoke_type == kSuper) { 1063 // Refine called method based on receiver for kVirtual/kInterface, and 1064 // caller for kSuper. 1065 ArtMethod* orig_called = called; 1066 if (invoke_type == kVirtual) { 1067 CHECK(receiver != nullptr) << invoke_type; 1068 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize); 1069 } else if (invoke_type == kInterface) { 1070 CHECK(receiver != nullptr) << invoke_type; 1071 called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize); 1072 } else { 1073 DCHECK_EQ(invoke_type, kSuper); 1074 CHECK(caller != nullptr) << invoke_type; 1075 StackHandleScope<2> hs(self); 1076 Handle<mirror::DexCache> dex_cache( 1077 hs.NewHandle(caller->GetDeclaringClass()->GetDexCache())); 1078 Handle<mirror::ClassLoader> class_loader( 1079 hs.NewHandle(caller->GetDeclaringClass()->GetClassLoader())); 1080 // TODO Maybe put this into a mirror::Class function. 1081 mirror::Class* ref_class = linker->ResolveReferencedClassOfMethod( 1082 called_method.dex_method_index, dex_cache, class_loader); 1083 if (ref_class->IsInterface()) { 1084 called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize); 1085 } else { 1086 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry( 1087 called->GetMethodIndex(), kRuntimePointerSize); 1088 } 1089 } 1090 1091 CHECK(called != nullptr) << orig_called->PrettyMethod() << " " 1092 << mirror::Object::PrettyTypeOf(receiver) << " " 1093 << invoke_type << " " << orig_called->GetVtableIndex(); 1094 1095 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index 1096 // of the sharpened method avoiding dirtying the dex cache if possible. 1097 // Note, called_method.dex_method_index references the dex method before the 1098 // FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares 1099 // about the name and signature. 1100 uint32_t update_dex_cache_method_index = called->GetDexMethodIndex(); 1101 if (!called->HasSameDexCacheResolvedMethods(caller, kRuntimePointerSize)) { 1102 // Calling from one dex file to another, need to compute the method index appropriate to 1103 // the caller's dex file. Since we get here only if the original called was a runtime 1104 // method, we've got the correct dex_file and a dex_method_idx from above. 1105 DCHECK(!called_method_known_on_entry); 1106 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1107 const DexFile* caller_dex_file = called_method.dex_file; 1108 uint32_t caller_method_name_and_sig_index = called_method.dex_method_index; 1109 update_dex_cache_method_index = 1110 called->FindDexMethodIndexInOtherDexFile(*caller_dex_file, 1111 caller_method_name_and_sig_index); 1112 } 1113 if ((update_dex_cache_method_index != DexFile::kDexNoIndex) && 1114 (caller->GetDexCacheResolvedMethod( 1115 update_dex_cache_method_index, kRuntimePointerSize) != called)) { 1116 caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, 1117 called, 1118 kRuntimePointerSize); 1119 } 1120 } else if (invoke_type == kStatic) { 1121 const auto called_dex_method_idx = called->GetDexMethodIndex(); 1122 // For static invokes, we may dispatch to the static method in the superclass but resolve 1123 // using the subclass. To prevent getting slow paths on each invoke, we force set the 1124 // resolved method for the super class dex method index if we are in the same dex file. 1125 // b/19175856 1126 if (called->GetDexFile() == called_method.dex_file && 1127 called_method.dex_method_index != called_dex_method_idx) { 1128 called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, 1129 called, 1130 kRuntimePointerSize); 1131 } 1132 } 1133 1134 // Ensure that the called method's class is initialized. 1135 StackHandleScope<1> hs(soa.Self()); 1136 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 1137 linker->EnsureInitialized(soa.Self(), called_class, true, true); 1138 if (LIKELY(called_class->IsInitialized())) { 1139 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1140 // If we are single-stepping or the called method is deoptimized (by a 1141 // breakpoint, for example), then we have to execute the called method 1142 // with the interpreter. 1143 code = GetQuickToInterpreterBridge(); 1144 } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) { 1145 // If the caller is deoptimized (by a breakpoint, for example), we have to 1146 // continue its execution with interpreter when returning from the called 1147 // method. Because we do not want to execute the called method with the 1148 // interpreter, we wrap its execution into the instrumentation stubs. 1149 // When the called method returns, it will execute the instrumentation 1150 // exit hook that will determine the need of the interpreter with a call 1151 // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if 1152 // it is needed. 1153 code = GetQuickInstrumentationEntryPoint(); 1154 } else { 1155 code = called->GetEntryPointFromQuickCompiledCode(); 1156 } 1157 } else if (called_class->IsInitializing()) { 1158 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1159 // If we are single-stepping or the called method is deoptimized (by a 1160 // breakpoint, for example), then we have to execute the called method 1161 // with the interpreter. 1162 code = GetQuickToInterpreterBridge(); 1163 } else if (invoke_type == kStatic) { 1164 // Class is still initializing, go to oat and grab code (trampoline must be left in place 1165 // until class is initialized to stop races between threads). 1166 code = linker->GetQuickOatCodeFor(called); 1167 } else { 1168 // No trampoline for non-static methods. 1169 code = called->GetEntryPointFromQuickCompiledCode(); 1170 } 1171 } else { 1172 DCHECK(called_class->IsErroneous()); 1173 } 1174 } 1175 CHECK_EQ(code == nullptr, self->IsExceptionPending()); 1176 // Fixup any locally saved objects may have moved during a GC. 1177 visitor.FixupReferences(); 1178 // Place called method in callee-save frame to be placed as first argument to quick method. 1179 *sp = called; 1180 1181 return code; 1182} 1183 1184/* 1185 * This class uses a couple of observations to unite the different calling conventions through 1186 * a few constants. 1187 * 1188 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 1189 * possible alignment. 1190 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 1191 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 1192 * when we have to split things 1193 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 1194 * and we can use Int handling directly. 1195 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 1196 * necessary when widening. Also, widening of Ints will take place implicitly, and the 1197 * extension should be compatible with Aarch64, which mandates copying the available bits 1198 * into LSB and leaving the rest unspecified. 1199 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 1200 * the stack. 1201 * 6) There is only little endian. 1202 * 1203 * 1204 * Actual work is supposed to be done in a delegate of the template type. The interface is as 1205 * follows: 1206 * 1207 * void PushGpr(uintptr_t): Add a value for the next GPR 1208 * 1209 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 1210 * padding, that is, think the architecture is 32b and aligns 64b. 1211 * 1212 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 1213 * split this if necessary. The current state will have aligned, if 1214 * necessary. 1215 * 1216 * void PushStack(uintptr_t): Push a value to the stack. 1217 * 1218 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 1219 * as this might be important for null initialization. 1220 * Must return the jobject, that is, the reference to the 1221 * entry in the HandleScope (nullptr if necessary). 1222 * 1223 */ 1224template<class T> class BuildNativeCallFrameStateMachine { 1225 public: 1226#if defined(__arm__) 1227 // TODO: These are all dummy values! 1228 static constexpr bool kNativeSoftFloatAbi = true; 1229 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 1230 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1231 1232 static constexpr size_t kRegistersNeededForLong = 2; 1233 static constexpr size_t kRegistersNeededForDouble = 2; 1234 static constexpr bool kMultiRegistersAligned = true; 1235 static constexpr bool kMultiFPRegistersWidened = false; 1236 static constexpr bool kMultiGPRegistersWidened = false; 1237 static constexpr bool kAlignLongOnStack = true; 1238 static constexpr bool kAlignDoubleOnStack = true; 1239#elif defined(__aarch64__) 1240 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1241 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 1242 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1243 1244 static constexpr size_t kRegistersNeededForLong = 1; 1245 static constexpr size_t kRegistersNeededForDouble = 1; 1246 static constexpr bool kMultiRegistersAligned = false; 1247 static constexpr bool kMultiFPRegistersWidened = false; 1248 static constexpr bool kMultiGPRegistersWidened = false; 1249 static constexpr bool kAlignLongOnStack = false; 1250 static constexpr bool kAlignDoubleOnStack = false; 1251#elif defined(__mips__) && !defined(__LP64__) 1252 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 1253 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs. 1254 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1255 1256 static constexpr size_t kRegistersNeededForLong = 2; 1257 static constexpr size_t kRegistersNeededForDouble = 2; 1258 static constexpr bool kMultiRegistersAligned = true; 1259 static constexpr bool kMultiFPRegistersWidened = true; 1260 static constexpr bool kMultiGPRegistersWidened = false; 1261 static constexpr bool kAlignLongOnStack = true; 1262 static constexpr bool kAlignDoubleOnStack = true; 1263#elif defined(__mips__) && defined(__LP64__) 1264 // Let the code prepare GPRs only and we will load the FPRs with same data. 1265 static constexpr bool kNativeSoftFloatAbi = true; 1266 static constexpr size_t kNumNativeGprArgs = 8; 1267 static constexpr size_t kNumNativeFprArgs = 0; 1268 1269 static constexpr size_t kRegistersNeededForLong = 1; 1270 static constexpr size_t kRegistersNeededForDouble = 1; 1271 static constexpr bool kMultiRegistersAligned = false; 1272 static constexpr bool kMultiFPRegistersWidened = false; 1273 static constexpr bool kMultiGPRegistersWidened = true; 1274 static constexpr bool kAlignLongOnStack = false; 1275 static constexpr bool kAlignDoubleOnStack = false; 1276#elif defined(__i386__) 1277 // TODO: Check these! 1278 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 1279 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 1280 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 1281 1282 static constexpr size_t kRegistersNeededForLong = 2; 1283 static constexpr size_t kRegistersNeededForDouble = 2; 1284 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 1285 static constexpr bool kMultiFPRegistersWidened = false; 1286 static constexpr bool kMultiGPRegistersWidened = false; 1287 static constexpr bool kAlignLongOnStack = false; 1288 static constexpr bool kAlignDoubleOnStack = false; 1289#elif defined(__x86_64__) 1290 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1291 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 1292 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1293 1294 static constexpr size_t kRegistersNeededForLong = 1; 1295 static constexpr size_t kRegistersNeededForDouble = 1; 1296 static constexpr bool kMultiRegistersAligned = false; 1297 static constexpr bool kMultiFPRegistersWidened = false; 1298 static constexpr bool kMultiGPRegistersWidened = false; 1299 static constexpr bool kAlignLongOnStack = false; 1300 static constexpr bool kAlignDoubleOnStack = false; 1301#else 1302#error "Unsupported architecture" 1303#endif 1304 1305 public: 1306 explicit BuildNativeCallFrameStateMachine(T* delegate) 1307 : gpr_index_(kNumNativeGprArgs), 1308 fpr_index_(kNumNativeFprArgs), 1309 stack_entries_(0), 1310 delegate_(delegate) { 1311 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 1312 // the next register is even; counting down is just to make the compiler happy... 1313 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even"); 1314 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even"); 1315 } 1316 1317 virtual ~BuildNativeCallFrameStateMachine() {} 1318 1319 bool HavePointerGpr() const { 1320 return gpr_index_ > 0; 1321 } 1322 1323 void AdvancePointer(const void* val) { 1324 if (HavePointerGpr()) { 1325 gpr_index_--; 1326 PushGpr(reinterpret_cast<uintptr_t>(val)); 1327 } else { 1328 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 1329 PushStack(reinterpret_cast<uintptr_t>(val)); 1330 gpr_index_ = 0; 1331 } 1332 } 1333 1334 bool HaveHandleScopeGpr() const { 1335 return gpr_index_ > 0; 1336 } 1337 1338 void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) { 1339 uintptr_t handle = PushHandle(ptr); 1340 if (HaveHandleScopeGpr()) { 1341 gpr_index_--; 1342 PushGpr(handle); 1343 } else { 1344 stack_entries_++; 1345 PushStack(handle); 1346 gpr_index_ = 0; 1347 } 1348 } 1349 1350 bool HaveIntGpr() const { 1351 return gpr_index_ > 0; 1352 } 1353 1354 void AdvanceInt(uint32_t val) { 1355 if (HaveIntGpr()) { 1356 gpr_index_--; 1357 if (kMultiGPRegistersWidened) { 1358 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1359 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1360 } else { 1361 PushGpr(val); 1362 } 1363 } else { 1364 stack_entries_++; 1365 if (kMultiGPRegistersWidened) { 1366 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1367 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1368 } else { 1369 PushStack(val); 1370 } 1371 gpr_index_ = 0; 1372 } 1373 } 1374 1375 bool HaveLongGpr() const { 1376 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1377 } 1378 1379 bool LongGprNeedsPadding() const { 1380 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1381 kAlignLongOnStack && // and when it needs alignment 1382 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1383 } 1384 1385 bool LongStackNeedsPadding() const { 1386 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1387 kAlignLongOnStack && // and when it needs 8B alignment 1388 (stack_entries_ & 1) == 1; // counter is odd 1389 } 1390 1391 void AdvanceLong(uint64_t val) { 1392 if (HaveLongGpr()) { 1393 if (LongGprNeedsPadding()) { 1394 PushGpr(0); 1395 gpr_index_--; 1396 } 1397 if (kRegistersNeededForLong == 1) { 1398 PushGpr(static_cast<uintptr_t>(val)); 1399 } else { 1400 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1401 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1402 } 1403 gpr_index_ -= kRegistersNeededForLong; 1404 } else { 1405 if (LongStackNeedsPadding()) { 1406 PushStack(0); 1407 stack_entries_++; 1408 } 1409 if (kRegistersNeededForLong == 1) { 1410 PushStack(static_cast<uintptr_t>(val)); 1411 stack_entries_++; 1412 } else { 1413 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1414 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1415 stack_entries_ += 2; 1416 } 1417 gpr_index_ = 0; 1418 } 1419 } 1420 1421 bool HaveFloatFpr() const { 1422 return fpr_index_ > 0; 1423 } 1424 1425 void AdvanceFloat(float val) { 1426 if (kNativeSoftFloatAbi) { 1427 AdvanceInt(bit_cast<uint32_t, float>(val)); 1428 } else { 1429 if (HaveFloatFpr()) { 1430 fpr_index_--; 1431 if (kRegistersNeededForDouble == 1) { 1432 if (kMultiFPRegistersWidened) { 1433 PushFpr8(bit_cast<uint64_t, double>(val)); 1434 } else { 1435 // No widening, just use the bits. 1436 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val))); 1437 } 1438 } else { 1439 PushFpr4(val); 1440 } 1441 } else { 1442 stack_entries_++; 1443 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) { 1444 // Need to widen before storing: Note the "double" in the template instantiation. 1445 // Note: We need to jump through those hoops to make the compiler happy. 1446 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1447 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val))); 1448 } else { 1449 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val))); 1450 } 1451 fpr_index_ = 0; 1452 } 1453 } 1454 } 1455 1456 bool HaveDoubleFpr() const { 1457 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1458 } 1459 1460 bool DoubleFprNeedsPadding() const { 1461 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1462 kAlignDoubleOnStack && // and when it needs alignment 1463 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1464 } 1465 1466 bool DoubleStackNeedsPadding() const { 1467 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1468 kAlignDoubleOnStack && // and when it needs 8B alignment 1469 (stack_entries_ & 1) == 1; // counter is odd 1470 } 1471 1472 void AdvanceDouble(uint64_t val) { 1473 if (kNativeSoftFloatAbi) { 1474 AdvanceLong(val); 1475 } else { 1476 if (HaveDoubleFpr()) { 1477 if (DoubleFprNeedsPadding()) { 1478 PushFpr4(0); 1479 fpr_index_--; 1480 } 1481 PushFpr8(val); 1482 fpr_index_ -= kRegistersNeededForDouble; 1483 } else { 1484 if (DoubleStackNeedsPadding()) { 1485 PushStack(0); 1486 stack_entries_++; 1487 } 1488 if (kRegistersNeededForDouble == 1) { 1489 PushStack(static_cast<uintptr_t>(val)); 1490 stack_entries_++; 1491 } else { 1492 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1493 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1494 stack_entries_ += 2; 1495 } 1496 fpr_index_ = 0; 1497 } 1498 } 1499 } 1500 1501 uint32_t GetStackEntries() const { 1502 return stack_entries_; 1503 } 1504 1505 uint32_t GetNumberOfUsedGprs() const { 1506 return kNumNativeGprArgs - gpr_index_; 1507 } 1508 1509 uint32_t GetNumberOfUsedFprs() const { 1510 return kNumNativeFprArgs - fpr_index_; 1511 } 1512 1513 private: 1514 void PushGpr(uintptr_t val) { 1515 delegate_->PushGpr(val); 1516 } 1517 void PushFpr4(float val) { 1518 delegate_->PushFpr4(val); 1519 } 1520 void PushFpr8(uint64_t val) { 1521 delegate_->PushFpr8(val); 1522 } 1523 void PushStack(uintptr_t val) { 1524 delegate_->PushStack(val); 1525 } 1526 uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) { 1527 return delegate_->PushHandle(ref); 1528 } 1529 1530 uint32_t gpr_index_; // Number of free GPRs 1531 uint32_t fpr_index_; // Number of free FPRs 1532 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1533 // extended 1534 T* const delegate_; // What Push implementation gets called 1535}; 1536 1537// Computes the sizes of register stacks and call stack area. Handling of references can be extended 1538// in subclasses. 1539// 1540// To handle native pointers, use "L" in the shorty for an object reference, which simulates 1541// them with handles. 1542class ComputeNativeCallFrameSize { 1543 public: 1544 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1545 1546 virtual ~ComputeNativeCallFrameSize() {} 1547 1548 uint32_t GetStackSize() const { 1549 return num_stack_entries_ * sizeof(uintptr_t); 1550 } 1551 1552 uint8_t* LayoutCallStack(uint8_t* sp8) const { 1553 sp8 -= GetStackSize(); 1554 // Align by kStackAlignment. 1555 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1556 return sp8; 1557 } 1558 1559 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) 1560 const { 1561 // Assumption is OK right now, as we have soft-float arm 1562 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1563 sp8 -= fregs * sizeof(uintptr_t); 1564 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1565 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1566 sp8 -= iregs * sizeof(uintptr_t); 1567 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1568 return sp8; 1569 } 1570 1571 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1572 uint32_t** start_fpr) const { 1573 // Native call stack. 1574 sp8 = LayoutCallStack(sp8); 1575 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1576 1577 // Put fprs and gprs below. 1578 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1579 1580 // Return the new bottom. 1581 return sp8; 1582 } 1583 1584 virtual void WalkHeader( 1585 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED) 1586 REQUIRES_SHARED(Locks::mutator_lock_) { 1587 } 1588 1589 void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) { 1590 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1591 1592 WalkHeader(&sm); 1593 1594 for (uint32_t i = 1; i < shorty_len; ++i) { 1595 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1596 switch (cur_type_) { 1597 case Primitive::kPrimNot: 1598 // TODO: fix abuse of mirror types. 1599 sm.AdvanceHandleScope( 1600 reinterpret_cast<mirror::Object*>(0x12345678)); 1601 break; 1602 1603 case Primitive::kPrimBoolean: 1604 case Primitive::kPrimByte: 1605 case Primitive::kPrimChar: 1606 case Primitive::kPrimShort: 1607 case Primitive::kPrimInt: 1608 sm.AdvanceInt(0); 1609 break; 1610 case Primitive::kPrimFloat: 1611 sm.AdvanceFloat(0); 1612 break; 1613 case Primitive::kPrimDouble: 1614 sm.AdvanceDouble(0); 1615 break; 1616 case Primitive::kPrimLong: 1617 sm.AdvanceLong(0); 1618 break; 1619 default: 1620 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1621 UNREACHABLE(); 1622 } 1623 } 1624 1625 num_stack_entries_ = sm.GetStackEntries(); 1626 } 1627 1628 void PushGpr(uintptr_t /* val */) { 1629 // not optimizing registers, yet 1630 } 1631 1632 void PushFpr4(float /* val */) { 1633 // not optimizing registers, yet 1634 } 1635 1636 void PushFpr8(uint64_t /* val */) { 1637 // not optimizing registers, yet 1638 } 1639 1640 void PushStack(uintptr_t /* val */) { 1641 // counting is already done in the superclass 1642 } 1643 1644 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1645 return reinterpret_cast<uintptr_t>(nullptr); 1646 } 1647 1648 protected: 1649 uint32_t num_stack_entries_; 1650}; 1651 1652class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { 1653 public: 1654 explicit ComputeGenericJniFrameSize(bool critical_native) 1655 : num_handle_scope_references_(0), critical_native_(critical_native) {} 1656 1657 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1658 // is at *m = sp. Will update to point to the bottom of the save frame. 1659 // 1660 // Note: assumes ComputeAll() has been run before. 1661 void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1662 REQUIRES_SHARED(Locks::mutator_lock_) { 1663 ArtMethod* method = **m; 1664 1665 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 1666 1667 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1668 1669 // First, fix up the layout of the callee-save frame. 1670 // We have to squeeze in the HandleScope, and relocate the method pointer. 1671 1672 // "Free" the slot for the method. 1673 sp8 += sizeof(void*); // In the callee-save frame we use a full pointer. 1674 1675 // Under the callee saves put handle scope and new method stack reference. 1676 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 1677 size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*); 1678 1679 sp8 -= scope_and_method; 1680 // Align by kStackAlignment. 1681 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1682 1683 uint8_t* sp8_table = sp8 + sizeof(ArtMethod*); 1684 *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), 1685 num_handle_scope_references_); 1686 1687 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1688 uint8_t* method_pointer = sp8; 1689 auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer); 1690 *new_method_ref = method; 1691 *m = new_method_ref; 1692 } 1693 1694 // Adds space for the cookie. Note: may leave stack unaligned. 1695 void LayoutCookie(uint8_t** sp) const { 1696 // Reference cookie and padding 1697 *sp -= 8; 1698 } 1699 1700 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 1701 // Returns the new bottom. Note: this may be unaligned. 1702 uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1703 REQUIRES_SHARED(Locks::mutator_lock_) { 1704 // First, fix up the layout of the callee-save frame. 1705 // We have to squeeze in the HandleScope, and relocate the method pointer. 1706 LayoutCalleeSaveFrame(self, m, sp, handle_scope); 1707 1708 // The bottom of the callee-save frame is now where the method is, *m. 1709 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 1710 1711 // Add space for cookie. 1712 LayoutCookie(&sp8); 1713 1714 return sp8; 1715 } 1716 1717 // WARNING: After this, *sp won't be pointing to the method anymore! 1718 uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, 1719 HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, 1720 uint32_t** start_fpr) 1721 REQUIRES_SHARED(Locks::mutator_lock_) { 1722 Walk(shorty, shorty_len); 1723 1724 // JNI part. 1725 uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope); 1726 1727 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 1728 1729 // Return the new bottom. 1730 return sp8; 1731 } 1732 1733 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE; 1734 1735 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 1736 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE 1737 REQUIRES_SHARED(Locks::mutator_lock_); 1738 1739 private: 1740 uint32_t num_handle_scope_references_; 1741 const bool critical_native_; 1742}; 1743 1744uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 1745 num_handle_scope_references_++; 1746 return reinterpret_cast<uintptr_t>(nullptr); 1747} 1748 1749void ComputeGenericJniFrameSize::WalkHeader( 1750 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 1751 // First 2 parameters are always excluded for @CriticalNative. 1752 if (UNLIKELY(critical_native_)) { 1753 return; 1754 } 1755 1756 // JNIEnv 1757 sm->AdvancePointer(nullptr); 1758 1759 // Class object or this as first argument 1760 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 1761} 1762 1763// Class to push values to three separate regions. Used to fill the native call part. Adheres to 1764// the template requirements of BuildGenericJniFrameStateMachine. 1765class FillNativeCall { 1766 public: 1767 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 1768 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 1769 1770 virtual ~FillNativeCall() {} 1771 1772 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 1773 cur_gpr_reg_ = gpr_regs; 1774 cur_fpr_reg_ = fpr_regs; 1775 cur_stack_arg_ = stack_args; 1776 } 1777 1778 void PushGpr(uintptr_t val) { 1779 *cur_gpr_reg_ = val; 1780 cur_gpr_reg_++; 1781 } 1782 1783 void PushFpr4(float val) { 1784 *cur_fpr_reg_ = val; 1785 cur_fpr_reg_++; 1786 } 1787 1788 void PushFpr8(uint64_t val) { 1789 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1790 *tmp = val; 1791 cur_fpr_reg_ += 2; 1792 } 1793 1794 void PushStack(uintptr_t val) { 1795 *cur_stack_arg_ = val; 1796 cur_stack_arg_++; 1797 } 1798 1799 virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) { 1800 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 1801 UNREACHABLE(); 1802 } 1803 1804 private: 1805 uintptr_t* cur_gpr_reg_; 1806 uint32_t* cur_fpr_reg_; 1807 uintptr_t* cur_stack_arg_; 1808}; 1809 1810// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1811// of transitioning into native code. 1812class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1813 public: 1814 BuildGenericJniFrameVisitor(Thread* self, 1815 bool is_static, 1816 bool critical_native, 1817 const char* shorty, 1818 uint32_t shorty_len, 1819 ArtMethod*** sp) 1820 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 1821 jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native), 1822 sm_(&jni_call_) { 1823 ComputeGenericJniFrameSize fsc(critical_native); 1824 uintptr_t* start_gpr_reg; 1825 uint32_t* start_fpr_reg; 1826 uintptr_t* start_stack_arg; 1827 bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len, 1828 &handle_scope_, 1829 &start_stack_arg, 1830 &start_gpr_reg, &start_fpr_reg); 1831 1832 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 1833 1834 // First 2 parameters are always excluded for CriticalNative methods. 1835 if (LIKELY(!critical_native)) { 1836 // jni environment is always first argument 1837 sm_.AdvancePointer(self->GetJniEnv()); 1838 1839 if (is_static) { 1840 sm_.AdvanceHandleScope((**sp)->GetDeclaringClass()); 1841 } // else "this" reference is already handled by QuickArgumentVisitor. 1842 } 1843 } 1844 1845 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 1846 1847 void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 1848 1849 StackReference<mirror::Object>* GetFirstHandleScopeEntry() { 1850 return handle_scope_->GetHandle(0).GetReference(); 1851 } 1852 1853 jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) { 1854 return handle_scope_->GetHandle(0).ToJObject(); 1855 } 1856 1857 void* GetBottomOfUsedArea() const { 1858 return bottom_of_used_area_; 1859 } 1860 1861 private: 1862 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 1863 class FillJniCall FINAL : public FillNativeCall { 1864 public: 1865 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 1866 HandleScope* handle_scope, bool critical_native) 1867 : FillNativeCall(gpr_regs, fpr_regs, stack_args), 1868 handle_scope_(handle_scope), 1869 cur_entry_(0), 1870 critical_native_(critical_native) {} 1871 1872 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); 1873 1874 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 1875 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 1876 handle_scope_ = scope; 1877 cur_entry_ = 0U; 1878 } 1879 1880 void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) { 1881 // Initialize padding entries. 1882 size_t expected_slots = handle_scope_->NumberOfReferences(); 1883 while (cur_entry_ < expected_slots) { 1884 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr); 1885 } 1886 1887 if (!critical_native_) { 1888 // Non-critical natives have at least the self class (jclass) or this (jobject). 1889 DCHECK_NE(cur_entry_, 0U); 1890 } 1891 } 1892 1893 bool CriticalNative() const { 1894 return critical_native_; 1895 } 1896 1897 private: 1898 HandleScope* handle_scope_; 1899 size_t cur_entry_; 1900 const bool critical_native_; 1901 }; 1902 1903 HandleScope* handle_scope_; 1904 FillJniCall jni_call_; 1905 void* bottom_of_used_area_; 1906 1907 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 1908 1909 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 1910}; 1911 1912uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 1913 uintptr_t tmp; 1914 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_); 1915 h.Assign(ref); 1916 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 1917 cur_entry_++; 1918 return tmp; 1919} 1920 1921void BuildGenericJniFrameVisitor::Visit() { 1922 Primitive::Type type = GetParamPrimitiveType(); 1923 switch (type) { 1924 case Primitive::kPrimLong: { 1925 jlong long_arg; 1926 if (IsSplitLongOrDouble()) { 1927 long_arg = ReadSplitLongParam(); 1928 } else { 1929 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 1930 } 1931 sm_.AdvanceLong(long_arg); 1932 break; 1933 } 1934 case Primitive::kPrimDouble: { 1935 uint64_t double_arg; 1936 if (IsSplitLongOrDouble()) { 1937 // Read into union so that we don't case to a double. 1938 double_arg = ReadSplitLongParam(); 1939 } else { 1940 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 1941 } 1942 sm_.AdvanceDouble(double_arg); 1943 break; 1944 } 1945 case Primitive::kPrimNot: { 1946 StackReference<mirror::Object>* stack_ref = 1947 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1948 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 1949 break; 1950 } 1951 case Primitive::kPrimFloat: 1952 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 1953 break; 1954 case Primitive::kPrimBoolean: // Fall-through. 1955 case Primitive::kPrimByte: // Fall-through. 1956 case Primitive::kPrimChar: // Fall-through. 1957 case Primitive::kPrimShort: // Fall-through. 1958 case Primitive::kPrimInt: // Fall-through. 1959 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 1960 break; 1961 case Primitive::kPrimVoid: 1962 LOG(FATAL) << "UNREACHABLE"; 1963 UNREACHABLE(); 1964 } 1965} 1966 1967void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 1968 // Clear out rest of the scope. 1969 jni_call_.ResetRemainingScopeSlots(); 1970 if (!jni_call_.CriticalNative()) { 1971 // Install HandleScope. 1972 self->PushHandleScope(handle_scope_); 1973 } 1974} 1975 1976#if defined(__arm__) || defined(__aarch64__) 1977extern "C" void* artFindNativeMethod(); 1978#else 1979extern "C" void* artFindNativeMethod(Thread* self); 1980#endif 1981 1982static uint64_t artQuickGenericJniEndJNIRef(Thread* self, 1983 uint32_t cookie, 1984 bool fast_native ATTRIBUTE_UNUSED, 1985 jobject l, 1986 jobject lock) { 1987 // TODO: add entrypoints for @FastNative returning objects. 1988 if (lock != nullptr) { 1989 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 1990 } else { 1991 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 1992 } 1993} 1994 1995static void artQuickGenericJniEndJNINonRef(Thread* self, 1996 uint32_t cookie, 1997 bool fast_native, 1998 jobject lock) { 1999 if (lock != nullptr) { 2000 JniMethodEndSynchronized(cookie, lock, self); 2001 // Ignore "fast_native" here because synchronized functions aren't very fast. 2002 } else { 2003 if (UNLIKELY(fast_native)) { 2004 JniMethodFastEnd(cookie, self); 2005 } else { 2006 JniMethodEnd(cookie, self); 2007 } 2008 } 2009} 2010 2011/* 2012 * Initializes an alloca region assumed to be directly below sp for a native call: 2013 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 2014 * The final element on the stack is a pointer to the native code. 2015 * 2016 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 2017 * We need to fix this, as the handle scope needs to go into the callee-save frame. 2018 * 2019 * The return of this function denotes: 2020 * 1) How many bytes of the alloca can be released, if the value is non-negative. 2021 * 2) An error, if the value is negative. 2022 */ 2023extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) 2024 REQUIRES_SHARED(Locks::mutator_lock_) { 2025 ArtMethod* called = *sp; 2026 DCHECK(called->IsNative()) << called->PrettyMethod(true); 2027 uint32_t shorty_len = 0; 2028 const char* shorty = called->GetShorty(&shorty_len); 2029 bool critical_native = called->IsAnnotatedWithCriticalNative(); 2030 bool fast_native = called->IsAnnotatedWithFastNative(); 2031 bool normal_native = !critical_native && !fast_native; 2032 2033 // Run the visitor and update sp. 2034 BuildGenericJniFrameVisitor visitor(self, 2035 called->IsStatic(), 2036 critical_native, 2037 shorty, 2038 shorty_len, 2039 &sp); 2040 { 2041 ScopedAssertNoThreadSuspension sants(__FUNCTION__); 2042 visitor.VisitArguments(); 2043 // FinalizeHandleScope pushes the handle scope on the thread. 2044 visitor.FinalizeHandleScope(self); 2045 } 2046 2047 // Fix up managed-stack things in Thread. 2048 self->SetTopOfStack(sp); 2049 2050 self->VerifyStack(); 2051 2052 uint32_t cookie; 2053 uint32_t* sp32; 2054 // Skip calling JniMethodStart for @CriticalNative. 2055 if (LIKELY(!critical_native)) { 2056 // Start JNI, save the cookie. 2057 if (called->IsSynchronized()) { 2058 DCHECK(normal_native) << " @FastNative and synchronize is not supported"; 2059 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 2060 if (self->IsExceptionPending()) { 2061 self->PopHandleScope(); 2062 // A negative value denotes an error. 2063 return GetTwoWordFailureValue(); 2064 } 2065 } else { 2066 if (fast_native) { 2067 cookie = JniMethodFastStart(self); 2068 } else { 2069 DCHECK(normal_native); 2070 cookie = JniMethodStart(self); 2071 } 2072 } 2073 sp32 = reinterpret_cast<uint32_t*>(sp); 2074 *(sp32 - 1) = cookie; 2075 } 2076 2077 // Retrieve the stored native code. 2078 void* nativeCode = called->GetEntryPointFromJni(); 2079 2080 // There are two cases for the content of nativeCode: 2081 // 1) Pointer to the native function. 2082 // 2) Pointer to the trampoline for native code binding. 2083 // In the second case, we need to execute the binding and continue with the actual native function 2084 // pointer. 2085 DCHECK(nativeCode != nullptr); 2086 if (nativeCode == GetJniDlsymLookupStub()) { 2087#if defined(__arm__) || defined(__aarch64__) 2088 nativeCode = artFindNativeMethod(); 2089#else 2090 nativeCode = artFindNativeMethod(self); 2091#endif 2092 2093 if (nativeCode == nullptr) { 2094 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 2095 2096 // @CriticalNative calls do not need to call back into JniMethodEnd. 2097 if (LIKELY(!critical_native)) { 2098 // End JNI, as the assembly will move to deliver the exception. 2099 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 2100 if (shorty[0] == 'L') { 2101 artQuickGenericJniEndJNIRef(self, cookie, fast_native, nullptr, lock); 2102 } else { 2103 artQuickGenericJniEndJNINonRef(self, cookie, fast_native, lock); 2104 } 2105 } 2106 2107 return GetTwoWordFailureValue(); 2108 } 2109 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 2110 } 2111 2112#if defined(__mips__) && !defined(__LP64__) 2113 // On MIPS32 if the first two arguments are floating-point, we need to know their types 2114 // so that art_quick_generic_jni_trampoline can correctly extract them from the stack 2115 // and load into floating-point registers. 2116 // Possible arrangements of first two floating-point arguments on the stack (32-bit FPU 2117 // view): 2118 // (1) 2119 // | DOUBLE | DOUBLE | other args, if any 2120 // | F12 | F13 | F14 | F15 | 2121 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2122 // (2) 2123 // | DOUBLE | FLOAT | (PAD) | other args, if any 2124 // | F12 | F13 | F14 | | 2125 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2126 // (3) 2127 // | FLOAT | (PAD) | DOUBLE | other args, if any 2128 // | F12 | | F14 | F15 | 2129 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2130 // (4) 2131 // | FLOAT | FLOAT | other args, if any 2132 // | F12 | F14 | 2133 // | SP+0 | SP+4 | SP+8 2134 // As you can see, only the last case (4) is special. In all others we can just 2135 // load F12/F13 and F14/F15 in the same manner. 2136 // Set bit 0 of the native code address to 1 in this case (valid code addresses 2137 // are always a multiple of 4 on MIPS32, so we have 2 spare bits available). 2138 if (nativeCode != nullptr && 2139 shorty != nullptr && 2140 shorty_len >= 3 && 2141 shorty[1] == 'F' && 2142 shorty[2] == 'F') { 2143 nativeCode = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(nativeCode) | 1); 2144 } 2145#endif 2146 2147 // Return native code addr(lo) and bottom of alloca address(hi). 2148 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 2149 reinterpret_cast<uintptr_t>(nativeCode)); 2150} 2151 2152// Defined in quick_jni_entrypoints.cc. 2153extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie, 2154 jvalue result, uint64_t result_f, ArtMethod* called, 2155 HandleScope* handle_scope); 2156/* 2157 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 2158 * unlocking. 2159 */ 2160extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, 2161 jvalue result, 2162 uint64_t result_f) { 2163 // We're here just back from a native call. We don't have the shared mutator lock at this point 2164 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing 2165 // anything that requires a mutator lock before that would cause problems as GC may have the 2166 // exclusive mutator lock and may be moving objects, etc. 2167 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); 2168 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 2169 ArtMethod* called = *sp; 2170 uint32_t cookie = *(sp32 - 1); 2171 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp)); 2172 return GenericJniMethodEnd(self, cookie, result, result_f, called, table); 2173} 2174 2175// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 2176// for the method pointer. 2177// 2178// It is valid to use this, as at the usage points here (returns from C functions) we are assuming 2179// to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations). 2180 2181template<InvokeType type, bool access_check> 2182static TwoWordReturn artInvokeCommon(uint32_t method_idx, 2183 ObjPtr<mirror::Object> this_object, 2184 Thread* self, 2185 ArtMethod** sp) { 2186 ScopedQuickEntrypointChecks sqec(self); 2187 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs)); 2188 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2189 ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); 2190 if (UNLIKELY(method == nullptr)) { 2191 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 2192 uint32_t shorty_len; 2193 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 2194 { 2195 // Remember the args in case a GC happens in FindMethodFromCode. 2196 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2197 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 2198 visitor.VisitArguments(); 2199 method = FindMethodFromCode<type, access_check>(method_idx, 2200 &this_object, 2201 caller_method, 2202 self); 2203 visitor.FixupReferences(); 2204 } 2205 2206 if (UNLIKELY(method == nullptr)) { 2207 CHECK(self->IsExceptionPending()); 2208 return GetTwoWordFailureValue(); // Failure. 2209 } 2210 } 2211 DCHECK(!self->IsExceptionPending()); 2212 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2213 2214 // When we return, the caller will branch to this address, so it had better not be 0! 2215 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod() 2216 << " location: " 2217 << method->GetDexFile()->GetLocation(); 2218 2219 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2220 reinterpret_cast<uintptr_t>(method)); 2221} 2222 2223// Explicit artInvokeCommon template function declarations to please analysis tool. 2224#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 2225 template REQUIRES_SHARED(Locks::mutator_lock_) \ 2226 TwoWordReturn artInvokeCommon<type, access_check>( \ 2227 uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp) 2228 2229EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 2230EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 2231EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 2232EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 2233EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 2234EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 2235EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 2236EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 2237EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 2238EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 2239#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 2240 2241// See comments in runtime_support_asm.S 2242extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 2243 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2244 REQUIRES_SHARED(Locks::mutator_lock_) { 2245 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp); 2246} 2247 2248extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 2249 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2250 REQUIRES_SHARED(Locks::mutator_lock_) { 2251 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp); 2252} 2253 2254extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 2255 uint32_t method_idx, 2256 mirror::Object* this_object ATTRIBUTE_UNUSED, 2257 Thread* self, 2258 ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 2259 // For static, this_object is not required and may be random garbage. Don't pass it down so that 2260 // it doesn't cause ObjPtr alignment failure check. 2261 return artInvokeCommon<kStatic, true>(method_idx, nullptr, self, sp); 2262} 2263 2264extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 2265 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2266 REQUIRES_SHARED(Locks::mutator_lock_) { 2267 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp); 2268} 2269 2270extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 2271 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2272 REQUIRES_SHARED(Locks::mutator_lock_) { 2273 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp); 2274} 2275 2276// Determine target of interface dispatch. This object is known non-null. First argument 2277// is there for consistency but should not be used, as some architectures overwrite it 2278// in the assembly trampoline. 2279extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUTE_UNUSED, 2280 mirror::Object* raw_this_object, 2281 Thread* self, 2282 ArtMethod** sp) 2283 REQUIRES_SHARED(Locks::mutator_lock_) { 2284 ObjPtr<mirror::Object> this_object(raw_this_object); 2285 ScopedQuickEntrypointChecks sqec(self); 2286 StackHandleScope<1> hs(self); 2287 Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass())); 2288 2289 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2290 2291 // Fetch the dex_method_idx of the target interface method from the caller. 2292 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2293 2294 const DexFile::CodeItem* code_item = caller_method->GetCodeItem(); 2295 CHECK_LT(dex_pc, code_item->insns_size_in_code_units_); 2296 const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]); 2297 Instruction::Code instr_code = instr->Opcode(); 2298 CHECK(instr_code == Instruction::INVOKE_INTERFACE || 2299 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 2300 << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr); 2301 uint32_t dex_method_idx; 2302 if (instr_code == Instruction::INVOKE_INTERFACE) { 2303 dex_method_idx = instr->VRegB_35c(); 2304 } else { 2305 CHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 2306 dex_method_idx = instr->VRegB_3rc(); 2307 } 2308 2309 ArtMethod* interface_method = caller_method->GetDexCacheResolvedMethod( 2310 dex_method_idx, kRuntimePointerSize); 2311 DCHECK(interface_method != nullptr) << dex_method_idx << " " << caller_method->PrettyMethod(); 2312 ArtMethod* method = nullptr; 2313 ImTable* imt = cls->GetImt(kRuntimePointerSize); 2314 2315 if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { 2316 // If the dex cache already resolved the interface method, look whether we have 2317 // a match in the ImtConflictTable. 2318 ArtMethod* conflict_method = imt->Get(ImTable::GetImtIndex(interface_method), 2319 kRuntimePointerSize); 2320 if (LIKELY(conflict_method->IsRuntimeMethod())) { 2321 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize); 2322 DCHECK(current_table != nullptr); 2323 method = current_table->Lookup(interface_method, kRuntimePointerSize); 2324 } else { 2325 // It seems we aren't really a conflict method! 2326 method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2327 } 2328 if (method != nullptr) { 2329 return GetTwoWordSuccessValue( 2330 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()), 2331 reinterpret_cast<uintptr_t>(method)); 2332 } 2333 2334 // No match, use the IfTable. 2335 method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2336 if (UNLIKELY(method == nullptr)) { 2337 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch( 2338 interface_method, this_object, caller_method); 2339 return GetTwoWordFailureValue(); // Failure. 2340 } 2341 } else { 2342 // The dex cache did not resolve the method, look it up in the dex file 2343 // of the caller, 2344 DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod()); 2345 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache() 2346 ->GetDexFile(); 2347 uint32_t shorty_len; 2348 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), 2349 &shorty_len); 2350 { 2351 // Remember the args in case a GC happens in FindMethodFromCode. 2352 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2353 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 2354 visitor.VisitArguments(); 2355 method = FindMethodFromCode<kInterface, false>(dex_method_idx, 2356 &this_object, 2357 caller_method, 2358 self); 2359 visitor.FixupReferences(); 2360 } 2361 2362 if (UNLIKELY(method == nullptr)) { 2363 CHECK(self->IsExceptionPending()); 2364 return GetTwoWordFailureValue(); // Failure. 2365 } 2366 interface_method = 2367 caller_method->GetDexCacheResolvedMethod(dex_method_idx, kRuntimePointerSize); 2368 DCHECK(!interface_method->IsRuntimeMethod()); 2369 } 2370 2371 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable. 2372 // We create a new table with the new pair { interface_method, method }. 2373 uint32_t imt_index = ImTable::GetImtIndex(interface_method); 2374 ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize); 2375 if (conflict_method->IsRuntimeMethod()) { 2376 ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable( 2377 cls.Get(), 2378 conflict_method, 2379 interface_method, 2380 method, 2381 /*force_new_conflict_method*/false); 2382 if (new_conflict_method != conflict_method) { 2383 // Update the IMT if we create a new conflict method. No fence needed here, as the 2384 // data is consistent. 2385 imt->Set(imt_index, 2386 new_conflict_method, 2387 kRuntimePointerSize); 2388 } 2389 } 2390 2391 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2392 2393 // When we return, the caller will branch to this address, so it had better not be 0! 2394 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod() 2395 << " location: " << method->GetDexFile()->GetLocation(); 2396 2397 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2398 reinterpret_cast<uintptr_t>(method)); 2399} 2400 2401// Returns shorty type so the caller can determine how to put |result| 2402// into expected registers. The shorty type is static so the compiler 2403// could call different flavors of this code path depending on the 2404// shorty type though this would require different entry points for 2405// each type. 2406extern "C" uintptr_t artInvokePolymorphic( 2407 JValue* result, 2408 mirror::Object* raw_method_handle, 2409 Thread* self, 2410 ArtMethod** sp) 2411 REQUIRES_SHARED(Locks::mutator_lock_) { 2412 ScopedQuickEntrypointChecks sqec(self); 2413 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs)); 2414 2415 // Start new JNI local reference state 2416 JNIEnvExt* env = self->GetJniEnv(); 2417 ScopedObjectAccessUnchecked soa(env); 2418 ScopedJniEnvLocalRefState env_state(env); 2419 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe."); 2420 2421 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC. 2422 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2423 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2424 const DexFile::CodeItem* code = caller_method->GetCodeItem(); 2425 const Instruction* inst = Instruction::At(&code->insns_[dex_pc]); 2426 DCHECK(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC || 2427 inst->Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE); 2428 const DexFile* dex_file = caller_method->GetDexFile(); 2429 const uint32_t proto_idx = inst->VRegH(); 2430 const char* shorty = dex_file->GetShorty(proto_idx); 2431 const size_t shorty_length = strlen(shorty); 2432 static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static. 2433 RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa); 2434 gc_visitor.VisitArguments(); 2435 2436 // Wrap raw_method_handle in a Handle for safety. 2437 StackHandleScope<5> hs(self); 2438 Handle<mirror::MethodHandleImpl> method_handle( 2439 hs.NewHandle(ObjPtr<mirror::MethodHandleImpl>::DownCast(MakeObjPtr(raw_method_handle)))); 2440 raw_method_handle = nullptr; 2441 self->EndAssertNoThreadSuspension(old_cause); 2442 2443 // Resolve method - it's either MethodHandle.invoke() or MethodHandle.invokeExact(). 2444 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 2445 ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::kForceICCECheck>(self, 2446 inst->VRegB(), 2447 caller_method, 2448 kVirtual); 2449 DCHECK((resolved_method == 2450 jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact)) || 2451 (resolved_method == 2452 jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invoke))); 2453 if (UNLIKELY(method_handle.IsNull())) { 2454 ThrowNullPointerExceptionForMethodAccess(resolved_method, InvokeType::kVirtual); 2455 return static_cast<uintptr_t>('V'); 2456 } 2457 2458 Handle<mirror::Class> caller_class(hs.NewHandle(caller_method->GetDeclaringClass())); 2459 Handle<mirror::MethodType> method_type(hs.NewHandle(linker->ResolveMethodType( 2460 *dex_file, proto_idx, 2461 hs.NewHandle<mirror::DexCache>(caller_class->GetDexCache()), 2462 hs.NewHandle<mirror::ClassLoader>(caller_class->GetClassLoader())))); 2463 // This implies we couldn't resolve one or more types in this method handle. 2464 if (UNLIKELY(method_type.IsNull())) { 2465 CHECK(self->IsExceptionPending()); 2466 return static_cast<uintptr_t>('V'); 2467 } 2468 2469 DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst->VRegA()); 2470 DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic); 2471 2472 // Fix references before constructing the shadow frame. 2473 gc_visitor.FixupReferences(); 2474 2475 // Construct shadow frame placing arguments consecutively from |first_arg|. 2476 const bool is_range = (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE); 2477 const size_t num_vregs = is_range ? inst->VRegA_4rcc() : inst->VRegA_45cc(); 2478 const size_t first_arg = 0; 2479 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 2480 CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc); 2481 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 2482 ScopedStackedShadowFramePusher 2483 frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction); 2484 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, 2485 kMethodIsStatic, 2486 shorty, 2487 strlen(shorty), 2488 shadow_frame, 2489 first_arg); 2490 shadow_frame_builder.VisitArguments(); 2491 2492 // Push a transition back into managed code onto the linked list in thread. 2493 ManagedStack fragment; 2494 self->PushManagedStackFragment(&fragment); 2495 2496 // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in 2497 // consecutive order. 2498 uint32_t unused_args[Instruction::kMaxVarArgRegs] = {}; 2499 uint32_t first_callee_arg = first_arg + 1; 2500 const bool do_assignability_check = false; 2501 if (!DoInvokePolymorphic<true /* is_range */, do_assignability_check>(self, 2502 resolved_method, 2503 *shadow_frame, 2504 method_handle, 2505 method_type, 2506 unused_args, 2507 first_callee_arg, 2508 result)) { 2509 DCHECK(self->IsExceptionPending()); 2510 } 2511 2512 // Pop transition record. 2513 self->PopManagedStackFragment(fragment); 2514 2515 return static_cast<uintptr_t>(shorty[0]); 2516} 2517 2518} // namespace art 2519