quick_trampoline_entrypoints.cc revision b95a5345ae4217b70ca36f0cced92f68dda7caf5
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "callee_save_frame.h" 18#include "common_throws.h" 19#include "dex_file-inl.h" 20#include "dex_instruction-inl.h" 21#include "entrypoints/entrypoint_utils.h" 22#include "gc/accounting/card_table-inl.h" 23#include "interpreter/interpreter.h" 24#include "mirror/art_method-inl.h" 25#include "mirror/class-inl.h" 26#include "mirror/object-inl.h" 27#include "mirror/object_array-inl.h" 28#include "object_utils.h" 29#include "runtime.h" 30#include "scoped_thread_state_change.h" 31 32namespace art { 33 34// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. 35class QuickArgumentVisitor { 36 // Size of each spilled GPR. 37#ifdef __LP64__ 38 static constexpr size_t kBytesPerGprSpillLocation = 8; 39#else 40 static constexpr size_t kBytesPerGprSpillLocation = 4; 41#endif 42 // Number of bytes for each out register in the caller method's frame. 43 static constexpr size_t kBytesStackArgLocation = 4; 44#if defined(__arm__) 45 // The callee save frame is pointed to by SP. 46 // | argN | | 47 // | ... | | 48 // | arg4 | | 49 // | arg3 spill | | Caller's frame 50 // | arg2 spill | | 51 // | arg1 spill | | 52 // | Method* | --- 53 // | LR | 54 // | ... | callee saves 55 // | R3 | arg3 56 // | R2 | arg2 57 // | R1 | arg1 58 // | R0 | padding 59 // | Method* | <- sp 60 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 61 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 62 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 63 static constexpr size_t kBytesPerFprSpillLocation = 4; // FPR spill size is 4 bytes. 64 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 65 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 8; // Offset of first GPR arg. 66 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 44; // Offset of return address. 67 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 48; // Frame size. 68 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 69 return gpr_index * kBytesPerGprSpillLocation; 70 } 71#elif defined(__aarch64__) 72 // The callee save frame is pointed to by SP. 73 // | argN | | 74 // | ... | | 75 // | arg4 | | 76 // | arg3 spill | | Caller's frame 77 // | arg2 spill | | 78 // | arg1 spill | | 79 // | Method* | --- 80 // | LR | 81 // | X28 | 82 // | : | 83 // | X19 | 84 // | X7 | 85 // | : | 86 // | X1 | 87 // | D15 | 88 // | : | 89 // | D0 | 90 // | | padding 91 // | Method* | <- sp 92 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 93 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 94 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 95 static constexpr size_t kBytesPerFprSpillLocation = 8; // FPR spill size is 8 bytes. 96 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =16; // Offset of first FPR arg. 97 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 144; // Offset of first GPR arg. 98 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 296; // Offset of return address. 99 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 304; // Frame size. 100 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 101 return gpr_index * kBytesPerGprSpillLocation; 102 } 103#elif defined(__mips__) 104 // The callee save frame is pointed to by SP. 105 // | argN | | 106 // | ... | | 107 // | arg4 | | 108 // | arg3 spill | | Caller's frame 109 // | arg2 spill | | 110 // | arg1 spill | | 111 // | Method* | --- 112 // | RA | 113 // | ... | callee saves 114 // | A3 | arg3 115 // | A2 | arg2 116 // | A1 | arg1 117 // | A0/Method* | <- sp 118 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 119 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 120 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 121 static constexpr size_t kBytesPerFprSpillLocation = 4; // FPR spill size is 4 bytes. 122 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 123 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4; // Offset of first GPR arg. 124 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60; // Offset of return address. 125 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 64; // Frame size. 126 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 127 return gpr_index * kBytesPerGprSpillLocation; 128 } 129#elif defined(__i386__) 130 // The callee save frame is pointed to by SP. 131 // | argN | | 132 // | ... | | 133 // | arg4 | | 134 // | arg3 spill | | Caller's frame 135 // | arg2 spill | | 136 // | arg1 spill | | 137 // | Method* | --- 138 // | Return | 139 // | EBP,ESI,EDI | callee saves 140 // | EBX | arg3 141 // | EDX | arg2 142 // | ECX | arg1 143 // | EAX/Method* | <- sp 144 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 145 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 146 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 147 static constexpr size_t kBytesPerFprSpillLocation = 8; // FPR spill size is 8 bytes. 148 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 149 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4; // Offset of first GPR arg. 150 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28; // Offset of return address. 151 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 32; // Frame size. 152 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 153 return gpr_index * kBytesPerGprSpillLocation; 154 } 155#elif defined(__x86_64__) 156 // The callee save frame is pointed to by SP. 157 // | argN | | 158 // | ... | | 159 // | reg. arg spills | | Caller's frame 160 // | Method* | --- 161 // | Return | 162 // | R15 | callee save 163 // | R14 | callee save 164 // | R13 | callee save 165 // | R12 | callee save 166 // | R9 | arg5 167 // | R8 | arg4 168 // | RSI/R6 | arg1 169 // | RBP/R5 | callee save 170 // | RBX/R3 | callee save 171 // | RDX/R2 | arg2 172 // | RCX/R1 | arg3 173 // | XMM7 | float arg 8 174 // | XMM6 | float arg 7 175 // | XMM5 | float arg 6 176 // | XMM4 | float arg 5 177 // | XMM3 | float arg 4 178 // | XMM2 | float arg 3 179 // | XMM1 | float arg 2 180 // | XMM0 | float arg 1 181 // | Padding | 182 // | RDI/Method* | <- sp 183 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 184 static constexpr size_t kNumQuickGprArgs = 5; // 3 arguments passed in GPRs. 185 static constexpr size_t kNumQuickFprArgs = 8; // 0 arguments passed in FPRs. 186 static constexpr size_t kBytesPerFprSpillLocation = 8; // FPR spill size is 8 bytes. 187 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 188 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg. 189 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168; // Offset of return address. 190 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 176; // Frame size. 191 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 192 switch (gpr_index) { 193 case 0: return (4 * kBytesPerGprSpillLocation); 194 case 1: return (1 * kBytesPerGprSpillLocation); 195 case 2: return (0 * kBytesPerGprSpillLocation); 196 case 3: return (5 * kBytesPerGprSpillLocation); 197 case 4: return (6 * kBytesPerGprSpillLocation); 198 default: 199 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 200 return 0; 201 } 202 } 203#else 204#error "Unsupported architecture" 205#endif 206 207 public: 208 static mirror::ArtMethod* GetCallingMethod(mirror::ArtMethod** sp) 209 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 210 DCHECK((*sp)->IsCalleeSaveMethod()); 211 byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 212 return *reinterpret_cast<mirror::ArtMethod**>(previous_sp); 213 } 214 215 // For the given quick ref and args quick frame, return the caller's PC. 216 static uintptr_t GetCallingPc(mirror::ArtMethod** sp) 217 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 218 DCHECK((*sp)->IsCalleeSaveMethod()); 219 byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 220 return *reinterpret_cast<uintptr_t*>(lr); 221 } 222 223 QuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static, 224 const char* shorty, uint32_t shorty_len) 225 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : 226 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 227 gpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 228 fpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 229 stack_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 230 + StackArgumentStartFromShorty(is_static, shorty, shorty_len)), 231 gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid), 232 is_split_long_or_double_(false) { 233 DCHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, 234 Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); 235 } 236 237 virtual ~QuickArgumentVisitor() {} 238 239 virtual void Visit() = 0; 240 241 Primitive::Type GetParamPrimitiveType() const { 242 return cur_type_; 243 } 244 245 byte* GetParamAddress() const { 246 if (!kQuickSoftFloatAbi) { 247 Primitive::Type type = GetParamPrimitiveType(); 248 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 249 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 250 return fpr_args_ + (fpr_index_ * kBytesPerFprSpillLocation); 251 } 252 } 253 } 254 if (gpr_index_ < kNumQuickGprArgs) { 255 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 256 } 257 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 258 } 259 260 bool IsSplitLongOrDouble() const { 261 if ((kBytesPerGprSpillLocation == 4) || (kBytesPerFprSpillLocation == 4)) { 262 return is_split_long_or_double_; 263 } else { 264 return false; // An optimization for when GPR and FPRs are 64bit. 265 } 266 } 267 268 bool IsParamAReference() const { 269 return GetParamPrimitiveType() == Primitive::kPrimNot; 270 } 271 272 bool IsParamALongOrDouble() const { 273 Primitive::Type type = GetParamPrimitiveType(); 274 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 275 } 276 277 uint64_t ReadSplitLongParam() const { 278 DCHECK(IsSplitLongOrDouble()); 279 uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress()); 280 uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_); 281 return (low_half & 0xffffffffULL) | (high_half << 32); 282 } 283 284 void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 285 gpr_index_ = 0; 286 fpr_index_ = 0; 287 stack_index_ = 0; 288 if (!is_static_) { // Handle this. 289 cur_type_ = Primitive::kPrimNot; 290 is_split_long_or_double_ = false; 291 Visit(); 292 if (kNumQuickGprArgs > 0) { 293 gpr_index_++; 294 } else { 295 stack_index_++; 296 } 297 } 298 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 299 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 300 switch (cur_type_) { 301 case Primitive::kPrimNot: 302 case Primitive::kPrimBoolean: 303 case Primitive::kPrimByte: 304 case Primitive::kPrimChar: 305 case Primitive::kPrimShort: 306 case Primitive::kPrimInt: 307 is_split_long_or_double_ = false; 308 Visit(); 309 if (gpr_index_ < kNumQuickGprArgs) { 310 gpr_index_++; 311 } else { 312 stack_index_++; 313 } 314 break; 315 case Primitive::kPrimFloat: 316 is_split_long_or_double_ = false; 317 Visit(); 318 if (kQuickSoftFloatAbi) { 319 if (gpr_index_ < kNumQuickGprArgs) { 320 gpr_index_++; 321 } else { 322 stack_index_++; 323 } 324 } else { 325 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 326 fpr_index_++; 327 } else { 328 stack_index_++; 329 } 330 } 331 break; 332 case Primitive::kPrimDouble: 333 case Primitive::kPrimLong: 334 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 335 is_split_long_or_double_ = (kBytesPerGprSpillLocation == 4) && 336 ((gpr_index_ + 1) == kNumQuickGprArgs); 337 Visit(); 338 if (gpr_index_ < kNumQuickGprArgs) { 339 gpr_index_++; 340 if (kBytesPerGprSpillLocation == 4) { 341 if (gpr_index_ < kNumQuickGprArgs) { 342 gpr_index_++; 343 } else { 344 stack_index_++; 345 } 346 } 347 } else { 348 if (kBytesStackArgLocation == 4) { 349 stack_index_+= 2; 350 } else { 351 CHECK_EQ(kBytesStackArgLocation, 8U); 352 stack_index_++; 353 } 354 } 355 } else { 356 is_split_long_or_double_ = (kBytesPerFprSpillLocation == 4) && 357 ((fpr_index_ + 1) == kNumQuickFprArgs); 358 Visit(); 359 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 360 fpr_index_++; 361 if (kBytesPerFprSpillLocation == 4) { 362 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 363 fpr_index_++; 364 } else { 365 stack_index_++; 366 } 367 } 368 } else { 369 if (kBytesStackArgLocation == 4) { 370 stack_index_+= 2; 371 } else { 372 CHECK_EQ(kBytesStackArgLocation, 8U); 373 stack_index_++; 374 } 375 } 376 } 377 break; 378 default: 379 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 380 } 381 } 382 } 383 384 private: 385 static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty, 386 uint32_t shorty_len) { 387 if (kQuickSoftFloatAbi) { 388 CHECK_EQ(kNumQuickFprArgs, 0U); 389 return (kNumQuickGprArgs * kBytesPerGprSpillLocation) + kBytesPerGprSpillLocation /* ArtMethod* */; 390 } else { 391 size_t offset = kBytesPerGprSpillLocation; // Skip Method*. 392 size_t gprs_seen = 0; 393 size_t fprs_seen = 0; 394 if (!is_static && (gprs_seen < kNumQuickGprArgs)) { 395 gprs_seen++; 396 offset += kBytesStackArgLocation; 397 } 398 for (uint32_t i = 1; i < shorty_len; ++i) { 399 switch (shorty[i]) { 400 case 'Z': 401 case 'B': 402 case 'C': 403 case 'S': 404 case 'I': 405 case 'L': 406 if (gprs_seen < kNumQuickGprArgs) { 407 gprs_seen++; 408 offset += kBytesStackArgLocation; 409 } 410 break; 411 case 'J': 412 if (gprs_seen < kNumQuickGprArgs) { 413 gprs_seen++; 414 offset += 2 * kBytesStackArgLocation; 415 if (kBytesPerGprSpillLocation == 4) { 416 if (gprs_seen < kNumQuickGprArgs) { 417 gprs_seen++; 418 } 419 } 420 } 421 break; 422 case 'F': 423 if ((kNumQuickFprArgs != 0) && (fprs_seen + 1 < kNumQuickFprArgs + 1)) { 424 fprs_seen++; 425 offset += kBytesStackArgLocation; 426 } 427 break; 428 case 'D': 429 if ((kNumQuickFprArgs != 0) && (fprs_seen + 1 < kNumQuickFprArgs + 1)) { 430 fprs_seen++; 431 offset += 2 * kBytesStackArgLocation; 432 if (kBytesPerFprSpillLocation == 4) { 433 if ((kNumQuickFprArgs != 0) && (fprs_seen + 1 < kNumQuickFprArgs + 1)) { 434 fprs_seen++; 435 } 436 } 437 } 438 break; 439 default: 440 LOG(FATAL) << "Unexpected shorty character: " << shorty[i] << " in " << shorty; 441 } 442 } 443 return offset; 444 } 445 } 446 447 const bool is_static_; 448 const char* const shorty_; 449 const uint32_t shorty_len_; 450 byte* const gpr_args_; // Address of GPR arguments in callee save frame. 451 byte* const fpr_args_; // Address of FPR arguments in callee save frame. 452 byte* const stack_args_; // Address of stack arguments in caller's frame. 453 uint32_t gpr_index_; // Index into spilled GPRs. 454 uint32_t fpr_index_; // Index into spilled FPRs. 455 uint32_t stack_index_; // Index into arguments on the stack. 456 // The current type of argument during VisitArguments. 457 Primitive::Type cur_type_; 458 // Does a 64bit parameter straddle the register and stack arguments? 459 bool is_split_long_or_double_; 460}; 461 462// Visits arguments on the stack placing them into the shadow frame. 463class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 464 public: 465 BuildQuickShadowFrameVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty, 466 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 467 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 468 469 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 470 471 private: 472 ShadowFrame* const sf_; 473 uint32_t cur_reg_; 474 475 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 476}; 477 478void BuildQuickShadowFrameVisitor::Visit() { 479 Primitive::Type type = GetParamPrimitiveType(); 480 switch (type) { 481 case Primitive::kPrimLong: // Fall-through. 482 case Primitive::kPrimDouble: 483 if (IsSplitLongOrDouble()) { 484 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 485 } else { 486 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 487 } 488 ++cur_reg_; 489 break; 490 case Primitive::kPrimNot: { 491 StackReference<mirror::Object>* stack_ref = 492 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 493 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 494 } 495 break; 496 case Primitive::kPrimBoolean: // Fall-through. 497 case Primitive::kPrimByte: // Fall-through. 498 case Primitive::kPrimChar: // Fall-through. 499 case Primitive::kPrimShort: // Fall-through. 500 case Primitive::kPrimInt: // Fall-through. 501 case Primitive::kPrimFloat: 502 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 503 break; 504 case Primitive::kPrimVoid: 505 LOG(FATAL) << "UNREACHABLE"; 506 break; 507 } 508 ++cur_reg_; 509} 510 511extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self, 512 mirror::ArtMethod** sp) 513 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 514 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 515 // frame. 516 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 517 518 if (method->IsAbstract()) { 519 ThrowAbstractMethodError(method); 520 return 0; 521 } else { 522 DCHECK(!method->IsNative()) << PrettyMethod(method); 523 const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame"); 524 MethodHelper mh(method); 525 const DexFile::CodeItem* code_item = mh.GetCodeItem(); 526 DCHECK(code_item != nullptr) << PrettyMethod(method); 527 uint16_t num_regs = code_item->registers_size_; 528 void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); 529 ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL, // No last shadow coming from quick. 530 method, 0, memory)); 531 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 532 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, mh.IsStatic(), mh.GetShorty(), 533 mh.GetShortyLength(), 534 shadow_frame, first_arg_reg); 535 shadow_frame_builder.VisitArguments(); 536 // Push a transition back into managed code onto the linked list in thread. 537 ManagedStack fragment; 538 self->PushManagedStackFragment(&fragment); 539 self->PushShadowFrame(shadow_frame); 540 self->EndAssertNoThreadSuspension(old_cause); 541 542 if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) { 543 // Ensure static method's class is initialized. 544 SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass()); 545 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true)) { 546 DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method); 547 self->PopManagedStackFragment(fragment); 548 return 0; 549 } 550 } 551 552 JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame); 553 // Pop transition. 554 self->PopManagedStackFragment(fragment); 555 // No need to restore the args since the method has already been run by the interpreter. 556 return result.GetJ(); 557 } 558} 559 560// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 561// to jobjects. 562class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 563 public: 564 BuildQuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty, 565 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa, 566 std::vector<jvalue>* args) : 567 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 568 569 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 570 571 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 572 573 private: 574 ScopedObjectAccessUnchecked* const soa_; 575 std::vector<jvalue>* const args_; 576 // References which we must update when exiting in case the GC moved the objects. 577 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 578 579 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 580}; 581 582void BuildQuickArgumentVisitor::Visit() { 583 jvalue val; 584 Primitive::Type type = GetParamPrimitiveType(); 585 switch (type) { 586 case Primitive::kPrimNot: { 587 StackReference<mirror::Object>* stack_ref = 588 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 589 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 590 references_.push_back(std::make_pair(val.l, stack_ref)); 591 break; 592 } 593 case Primitive::kPrimLong: // Fall-through. 594 case Primitive::kPrimDouble: 595 if (IsSplitLongOrDouble()) { 596 val.j = ReadSplitLongParam(); 597 } else { 598 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 599 } 600 break; 601 case Primitive::kPrimBoolean: // Fall-through. 602 case Primitive::kPrimByte: // Fall-through. 603 case Primitive::kPrimChar: // Fall-through. 604 case Primitive::kPrimShort: // Fall-through. 605 case Primitive::kPrimInt: // Fall-through. 606 case Primitive::kPrimFloat: 607 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 608 break; 609 case Primitive::kPrimVoid: 610 LOG(FATAL) << "UNREACHABLE"; 611 val.j = 0; 612 break; 613 } 614 args_->push_back(val); 615} 616 617void BuildQuickArgumentVisitor::FixupReferences() { 618 // Fixup any references which may have changed. 619 for (const auto& pair : references_) { 620 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 621 } 622} 623 624// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 625// which is responsible for recording callee save registers. We explicitly place into jobjects the 626// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 627// field within the proxy object, which will box the primitive arguments and deal with error cases. 628extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method, 629 mirror::Object* receiver, 630 Thread* self, mirror::ArtMethod** sp) 631 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 632 DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); 633 DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); 634 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 635 const char* old_cause = 636 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 637 // Register the top of the managed stack, making stack crawlable. 638 DCHECK_EQ(*sp, proxy_method) << PrettyMethod(proxy_method); 639 self->SetTopOfStack(sp, 0); 640 DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), 641 Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()) 642 << PrettyMethod(proxy_method); 643 self->VerifyStack(); 644 // Start new JNI local reference state. 645 JNIEnvExt* env = self->GetJniEnv(); 646 ScopedObjectAccessUnchecked soa(env); 647 ScopedJniEnvLocalRefState env_state(env); 648 // Create local ref. copies of proxy method and the receiver. 649 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 650 651 // Placing arguments into args vector and remove the receiver. 652 MethodHelper proxy_mh(proxy_method); 653 DCHECK(!proxy_mh.IsStatic()) << PrettyMethod(proxy_method); 654 std::vector<jvalue> args; 655 BuildQuickArgumentVisitor local_ref_visitor(sp, proxy_mh.IsStatic(), proxy_mh.GetShorty(), 656 proxy_mh.GetShortyLength(), &soa, &args); 657 658 local_ref_visitor.VisitArguments(); 659 DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method); 660 args.erase(args.begin()); 661 662 // Convert proxy method into expected interface method. 663 mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod(); 664 DCHECK(interface_method != NULL) << PrettyMethod(proxy_method); 665 DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); 666 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method); 667 668 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 669 // that performs allocations. 670 self->EndAssertNoThreadSuspension(old_cause); 671 JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), 672 rcvr_jobj, interface_method_jobj, args); 673 // Restore references which might have moved. 674 local_ref_visitor.FixupReferences(); 675 return result.GetJ(); 676} 677 678// Read object references held in arguments from quick frames and place in a JNI local references, 679// so they don't get garbage collected. 680class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 681 public: 682 RememberForGcArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty, 683 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 684 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 685 686 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 687 688 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 689 690 private: 691 ScopedObjectAccessUnchecked* const soa_; 692 // References which we must update when exiting in case the GC moved the objects. 693 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 694 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 695}; 696 697void RememberForGcArgumentVisitor::Visit() { 698 if (IsParamAReference()) { 699 StackReference<mirror::Object>* stack_ref = 700 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 701 jobject reference = 702 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 703 references_.push_back(std::make_pair(reference, stack_ref)); 704 } 705} 706 707void RememberForGcArgumentVisitor::FixupReferences() { 708 // Fixup any references which may have changed. 709 for (const auto& pair : references_) { 710 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 711 } 712} 713 714 715// Lazily resolve a method for quick. Called by stub code. 716extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, 717 mirror::Object* receiver, 718 Thread* self, mirror::ArtMethod** sp) 719 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 720 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 721 // Start new JNI local reference state 722 JNIEnvExt* env = self->GetJniEnv(); 723 ScopedObjectAccessUnchecked soa(env); 724 ScopedJniEnvLocalRefState env_state(env); 725 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 726 727 // Compute details about the called method (avoid GCs) 728 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 729 mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 730 InvokeType invoke_type; 731 const DexFile* dex_file; 732 uint32_t dex_method_idx; 733 if (called->IsRuntimeMethod()) { 734 uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp)); 735 const DexFile::CodeItem* code; 736 { 737 MethodHelper mh(caller); 738 dex_file = &mh.GetDexFile(); 739 code = mh.GetCodeItem(); 740 } 741 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 742 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 743 Instruction::Code instr_code = instr->Opcode(); 744 bool is_range; 745 switch (instr_code) { 746 case Instruction::INVOKE_DIRECT: 747 invoke_type = kDirect; 748 is_range = false; 749 break; 750 case Instruction::INVOKE_DIRECT_RANGE: 751 invoke_type = kDirect; 752 is_range = true; 753 break; 754 case Instruction::INVOKE_STATIC: 755 invoke_type = kStatic; 756 is_range = false; 757 break; 758 case Instruction::INVOKE_STATIC_RANGE: 759 invoke_type = kStatic; 760 is_range = true; 761 break; 762 case Instruction::INVOKE_SUPER: 763 invoke_type = kSuper; 764 is_range = false; 765 break; 766 case Instruction::INVOKE_SUPER_RANGE: 767 invoke_type = kSuper; 768 is_range = true; 769 break; 770 case Instruction::INVOKE_VIRTUAL: 771 invoke_type = kVirtual; 772 is_range = false; 773 break; 774 case Instruction::INVOKE_VIRTUAL_RANGE: 775 invoke_type = kVirtual; 776 is_range = true; 777 break; 778 case Instruction::INVOKE_INTERFACE: 779 invoke_type = kInterface; 780 is_range = false; 781 break; 782 case Instruction::INVOKE_INTERFACE_RANGE: 783 invoke_type = kInterface; 784 is_range = true; 785 break; 786 default: 787 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); 788 // Avoid used uninitialized warnings. 789 invoke_type = kDirect; 790 is_range = false; 791 } 792 dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); 793 794 } else { 795 invoke_type = kStatic; 796 dex_file = &MethodHelper(called).GetDexFile(); 797 dex_method_idx = called->GetDexMethodIndex(); 798 } 799 uint32_t shorty_len; 800 const char* shorty = 801 dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len); 802 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 803 visitor.VisitArguments(); 804 self->EndAssertNoThreadSuspension(old_cause); 805 bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 806 // Resolve method filling in dex cache. 807 if (called->IsRuntimeMethod()) { 808 SirtRef<mirror::Object> sirt_receiver(soa.Self(), virtual_or_interface ? receiver : nullptr); 809 called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); 810 receiver = sirt_receiver.get(); 811 } 812 const void* code = NULL; 813 if (LIKELY(!self->IsExceptionPending())) { 814 // Incompatible class change should have been handled in resolve method. 815 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 816 << PrettyMethod(called) << " " << invoke_type; 817 if (virtual_or_interface) { 818 // Refine called method based on receiver. 819 CHECK(receiver != nullptr) << invoke_type; 820 if (invoke_type == kVirtual) { 821 called = receiver->GetClass()->FindVirtualMethodForVirtual(called); 822 } else { 823 called = receiver->GetClass()->FindVirtualMethodForInterface(called); 824 } 825 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index 826 // of the sharpened method. 827 if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) { 828 caller->GetDexCacheResolvedMethods()->Set<false>(called->GetDexMethodIndex(), called); 829 } else { 830 // Calling from one dex file to another, need to compute the method index appropriate to 831 // the caller's dex file. Since we get here only if the original called was a runtime 832 // method, we've got the correct dex_file and a dex_method_idx from above. 833 DCHECK(&MethodHelper(caller).GetDexFile() == dex_file); 834 uint32_t method_index = 835 MethodHelper(called).FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx); 836 if (method_index != DexFile::kDexNoIndex) { 837 caller->GetDexCacheResolvedMethods()->Set<false>(method_index, called); 838 } 839 } 840 } 841 // Ensure that the called method's class is initialized. 842 SirtRef<mirror::Class> called_class(soa.Self(), called->GetDeclaringClass()); 843 linker->EnsureInitialized(called_class, true, true); 844 if (LIKELY(called_class->IsInitialized())) { 845 code = called->GetEntryPointFromQuickCompiledCode(); 846 } else if (called_class->IsInitializing()) { 847 if (invoke_type == kStatic) { 848 // Class is still initializing, go to oat and grab code (trampoline must be left in place 849 // until class is initialized to stop races between threads). 850 code = linker->GetQuickOatCodeFor(called); 851 } else { 852 // No trampoline for non-static methods. 853 code = called->GetEntryPointFromQuickCompiledCode(); 854 } 855 } else { 856 DCHECK(called_class->IsErroneous()); 857 } 858 } 859 CHECK_EQ(code == NULL, self->IsExceptionPending()); 860 // Fixup any locally saved objects may have moved during a GC. 861 visitor.FixupReferences(); 862 // Place called method in callee-save frame to be placed as first argument to quick method. 863 *sp = called; 864 return code; 865} 866 867 868 869/* 870 * This class uses a couple of observations to unite the different calling conventions through 871 * a few constants. 872 * 873 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 874 * possible alignment. 875 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 876 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 877 * when we have to split things 878 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 879 * and we can use Int handling directly. 880 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 881 * necessary when widening. Also, widening of Ints will take place implicitly, and the 882 * extension should be compatible with Aarch64, which mandates copying the available bits 883 * into LSB and leaving the rest unspecified. 884 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 885 * the stack. 886 * 6) There is only little endian. 887 * 888 * 889 * Actual work is supposed to be done in a delegate of the template type. The interface is as 890 * follows: 891 * 892 * void PushGpr(uintptr_t): Add a value for the next GPR 893 * 894 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 895 * padding, that is, think the architecture is 32b and aligns 64b. 896 * 897 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 898 * split this if necessary. The current state will have aligned, if 899 * necessary. 900 * 901 * void PushStack(uintptr_t): Push a value to the stack. 902 * 903 * uintptr_t PushSirt(mirror::Object* ref): Add a reference to the Sirt. This _will_ have nullptr, 904 * as this might be important for null initialization. 905 * Must return the jobject, that is, the reference to the 906 * entry in the Sirt (nullptr if necessary). 907 * 908 */ 909template <class T> class BuildGenericJniFrameStateMachine { 910 public: 911#if defined(__arm__) 912 // TODO: These are all dummy values! 913 static constexpr bool kNativeSoftFloatAbi = true; 914 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 915 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 916 917 static constexpr size_t kRegistersNeededForLong = 2; 918 static constexpr size_t kRegistersNeededForDouble = 2; 919 static constexpr bool kMultiRegistersAligned = true; 920 static constexpr bool kMultiRegistersWidened = false; 921 static constexpr bool kAlignLongOnStack = true; 922 static constexpr bool kAlignDoubleOnStack = true; 923#elif defined(__aarch64__) 924 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 925 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 926 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 927 928 static constexpr size_t kRegistersNeededForLong = 1; 929 static constexpr size_t kRegistersNeededForDouble = 1; 930 static constexpr bool kMultiRegistersAligned = false; 931 static constexpr bool kMultiRegistersWidened = false; 932 static constexpr bool kAlignLongOnStack = false; 933 static constexpr bool kAlignDoubleOnStack = false; 934#elif defined(__mips__) 935 // TODO: These are all dummy values! 936 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 937 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 938 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 939 940 static constexpr size_t kRegistersNeededForLong = 2; 941 static constexpr size_t kRegistersNeededForDouble = 2; 942 static constexpr bool kMultiRegistersAligned = true; 943 static constexpr bool kMultiRegistersWidened = true; 944 static constexpr bool kAlignLongOnStack = false; 945 static constexpr bool kAlignDoubleOnStack = false; 946#elif defined(__i386__) 947 // TODO: Check these! 948 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 949 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 950 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 951 952 static constexpr size_t kRegistersNeededForLong = 2; 953 static constexpr size_t kRegistersNeededForDouble = 2; 954 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 955 static constexpr bool kMultiRegistersWidened = false; 956 static constexpr bool kAlignLongOnStack = false; 957 static constexpr bool kAlignDoubleOnStack = false; 958#elif defined(__x86_64__) 959 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 960 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 961 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 962 963 static constexpr size_t kRegistersNeededForLong = 1; 964 static constexpr size_t kRegistersNeededForDouble = 1; 965 static constexpr bool kMultiRegistersAligned = false; 966 static constexpr bool kMultiRegistersWidened = false; 967 static constexpr bool kAlignLongOnStack = false; 968 static constexpr bool kAlignDoubleOnStack = false; 969#else 970#error "Unsupported architecture" 971#endif 972 973 public: 974 explicit BuildGenericJniFrameStateMachine(T* delegate) : gpr_index_(kNumNativeGprArgs), 975 fpr_index_(kNumNativeFprArgs), 976 stack_entries_(0), 977 delegate_(delegate) { 978 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 979 // the next register is even; counting down is just to make the compiler happy... 980 CHECK_EQ(kNumNativeGprArgs % 2, 0U); 981 CHECK_EQ(kNumNativeFprArgs % 2, 0U); 982 } 983 984 virtual ~BuildGenericJniFrameStateMachine() {} 985 986 bool HavePointerGpr() { 987 return gpr_index_ > 0; 988 } 989 990 void AdvancePointer(void* val) { 991 if (HavePointerGpr()) { 992 gpr_index_--; 993 PushGpr(reinterpret_cast<uintptr_t>(val)); 994 } else { 995 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 996 PushStack(reinterpret_cast<uintptr_t>(val)); 997 gpr_index_ = 0; 998 } 999 } 1000 1001 1002 bool HaveSirtGpr() { 1003 return gpr_index_ > 0; 1004 } 1005 1006 void AdvanceSirt(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1007 uintptr_t sirtRef = PushSirt(ptr); 1008 if (HaveSirtGpr()) { 1009 gpr_index_--; 1010 PushGpr(sirtRef); 1011 } else { 1012 stack_entries_++; 1013 PushStack(sirtRef); 1014 gpr_index_ = 0; 1015 } 1016 } 1017 1018 1019 bool HaveIntGpr() { 1020 return gpr_index_ > 0; 1021 } 1022 1023 void AdvanceInt(uint32_t val) { 1024 if (HaveIntGpr()) { 1025 gpr_index_--; 1026 PushGpr(val); 1027 } else { 1028 stack_entries_++; 1029 PushStack(val); 1030 gpr_index_ = 0; 1031 } 1032 } 1033 1034 1035 bool HaveLongGpr() { 1036 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1037 } 1038 1039 bool LongGprNeedsPadding() { 1040 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1041 kAlignLongOnStack && // and when it needs alignment 1042 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1043 } 1044 1045 bool LongStackNeedsPadding() { 1046 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1047 kAlignLongOnStack && // and when it needs 8B alignment 1048 (stack_entries_ & 1) == 1; // counter is odd 1049 } 1050 1051 void AdvanceLong(uint64_t val) { 1052 if (HaveLongGpr()) { 1053 if (LongGprNeedsPadding()) { 1054 PushGpr(0); 1055 gpr_index_--; 1056 } 1057 if (kRegistersNeededForLong == 1) { 1058 PushGpr(static_cast<uintptr_t>(val)); 1059 } else { 1060 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1061 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1062 } 1063 gpr_index_ -= kRegistersNeededForLong; 1064 } else { 1065 if (LongStackNeedsPadding()) { 1066 PushStack(0); 1067 stack_entries_++; 1068 } 1069 if (kRegistersNeededForLong == 1) { 1070 PushStack(static_cast<uintptr_t>(val)); 1071 stack_entries_++; 1072 } else { 1073 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1074 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1075 stack_entries_ += 2; 1076 } 1077 gpr_index_ = 0; 1078 } 1079 } 1080 1081 1082 bool HaveFloatFpr() { 1083 return fpr_index_ > 0; 1084 } 1085 1086 template <typename U, typename V> V convert(U in) { 1087 CHECK_LE(sizeof(U), sizeof(V)); 1088 union { U u; V v; } tmp; 1089 tmp.u = in; 1090 return tmp.v; 1091 } 1092 1093 void AdvanceFloat(float val) { 1094 if (kNativeSoftFloatAbi) { 1095 AdvanceInt(convert<float, uint32_t>(val)); 1096 } else { 1097 if (HaveFloatFpr()) { 1098 fpr_index_--; 1099 if (kRegistersNeededForDouble == 1) { 1100 if (kMultiRegistersWidened) { 1101 PushFpr8(convert<double, uint64_t>(val)); 1102 } else { 1103 // No widening, just use the bits. 1104 PushFpr8(convert<float, uint64_t>(val)); 1105 } 1106 } else { 1107 PushFpr4(val); 1108 } 1109 } else { 1110 stack_entries_++; 1111 if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) { 1112 // Need to widen before storing: Note the "double" in the template instantiation. 1113 PushStack(convert<double, uintptr_t>(val)); 1114 } else { 1115 PushStack(convert<float, uintptr_t>(val)); 1116 } 1117 fpr_index_ = 0; 1118 } 1119 } 1120 } 1121 1122 1123 bool HaveDoubleFpr() { 1124 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1125 } 1126 1127 bool DoubleFprNeedsPadding() { 1128 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1129 kAlignDoubleOnStack && // and when it needs alignment 1130 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1131 } 1132 1133 bool DoubleStackNeedsPadding() { 1134 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1135 kAlignDoubleOnStack && // and when it needs 8B alignment 1136 (stack_entries_ & 1) == 1; // counter is odd 1137 } 1138 1139 void AdvanceDouble(uint64_t val) { 1140 if (kNativeSoftFloatAbi) { 1141 AdvanceLong(val); 1142 } else { 1143 if (HaveDoubleFpr()) { 1144 if (DoubleFprNeedsPadding()) { 1145 PushFpr4(0); 1146 fpr_index_--; 1147 } 1148 PushFpr8(val); 1149 fpr_index_ -= kRegistersNeededForDouble; 1150 } else { 1151 if (DoubleStackNeedsPadding()) { 1152 PushStack(0); 1153 stack_entries_++; 1154 } 1155 if (kRegistersNeededForDouble == 1) { 1156 PushStack(static_cast<uintptr_t>(val)); 1157 stack_entries_++; 1158 } else { 1159 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1160 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1161 stack_entries_ += 2; 1162 } 1163 fpr_index_ = 0; 1164 } 1165 } 1166 } 1167 1168 uint32_t getStackEntries() { 1169 return stack_entries_; 1170 } 1171 1172 uint32_t getNumberOfUsedGprs() { 1173 return kNumNativeGprArgs - gpr_index_; 1174 } 1175 1176 uint32_t getNumberOfUsedFprs() { 1177 return kNumNativeFprArgs - fpr_index_; 1178 } 1179 1180 private: 1181 void PushGpr(uintptr_t val) { 1182 delegate_->PushGpr(val); 1183 } 1184 void PushFpr4(float val) { 1185 delegate_->PushFpr4(val); 1186 } 1187 void PushFpr8(uint64_t val) { 1188 delegate_->PushFpr8(val); 1189 } 1190 void PushStack(uintptr_t val) { 1191 delegate_->PushStack(val); 1192 } 1193 uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1194 return delegate_->PushSirt(ref); 1195 } 1196 1197 uint32_t gpr_index_; // Number of free GPRs 1198 uint32_t fpr_index_; // Number of free FPRs 1199 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1200 // extended 1201 T* delegate_; // What Push implementation gets called 1202}; 1203 1204class ComputeGenericJniFrameSize FINAL { 1205 public: 1206 ComputeGenericJniFrameSize() : num_sirt_references_(0), num_stack_entries_(0) {} 1207 1208 uint32_t GetStackSize() { 1209 return num_stack_entries_ * sizeof(uintptr_t); 1210 } 1211 1212 // WARNING: After this, *sp won't be pointing to the method anymore! 1213 void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len, 1214 void* sp, StackIndirectReferenceTable** table, uint32_t* sirt_entries, 1215 uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr, 1216 void** code_return, size_t* overall_size) 1217 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1218 ComputeAll(is_static, shorty, shorty_len); 1219 1220 mirror::ArtMethod* method = **m; 1221 1222 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1223 1224 // First, fix up the layout of the callee-save frame. 1225 // We have to squeeze in the Sirt, and relocate the method pointer. 1226 1227 // "Free" the slot for the method. 1228 sp8 += kPointerSize; 1229 1230 // Add the Sirt. 1231 *sirt_entries = num_sirt_references_; 1232 size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(num_sirt_references_); 1233 sp8 -= sirt_size; 1234 *table = reinterpret_cast<StackIndirectReferenceTable*>(sp8); 1235 (*table)->SetNumberOfReferences(num_sirt_references_); 1236 1237 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1238 sp8 -= kPointerSize; 1239 uint8_t* method_pointer = sp8; 1240 *(reinterpret_cast<mirror::ArtMethod**>(method_pointer)) = method; 1241 *m = reinterpret_cast<mirror::ArtMethod**>(method_pointer); 1242 1243 // Reference cookie and padding 1244 sp8 -= 8; 1245 // Store Sirt size 1246 *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(sirt_size & 0xFFFFFFFF); 1247 1248 // Next comes the native call stack. 1249 sp8 -= GetStackSize(); 1250 // Now align the call stack below. This aligns by 16, as AArch64 seems to require. 1251 uintptr_t mask = ~0x0F; 1252 sp8 = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(sp8) & mask); 1253 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1254 1255 // put fprs and gprs below 1256 // Assumption is OK right now, as we have soft-float arm 1257 size_t fregs = BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize>::kNumNativeFprArgs; 1258 sp8 -= fregs * sizeof(uintptr_t); 1259 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1260 size_t iregs = BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize>::kNumNativeGprArgs; 1261 sp8 -= iregs * sizeof(uintptr_t); 1262 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1263 1264 // reserve space for the code pointer 1265 sp8 -= kPointerSize; 1266 *code_return = reinterpret_cast<void*>(sp8); 1267 1268 *overall_size = reinterpret_cast<uint8_t*>(sp) - sp8; 1269 1270 // The new SP is stored at the end of the alloca, so it can be immediately popped 1271 sp8 = reinterpret_cast<uint8_t*>(sp) - 5 * KB; 1272 *(reinterpret_cast<uint8_t**>(sp8)) = method_pointer; 1273 } 1274 1275 void ComputeSirtOffset() { } // nothing to do, static right now 1276 1277 void ComputeAll(bool is_static, const char* shorty, uint32_t shorty_len) 1278 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1279 BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize> sm(this); 1280 1281 // JNIEnv 1282 sm.AdvancePointer(nullptr); 1283 1284 // Class object or this as first argument 1285 sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678)); 1286 1287 for (uint32_t i = 1; i < shorty_len; ++i) { 1288 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1289 switch (cur_type_) { 1290 case Primitive::kPrimNot: 1291 sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678)); 1292 break; 1293 1294 case Primitive::kPrimBoolean: 1295 case Primitive::kPrimByte: 1296 case Primitive::kPrimChar: 1297 case Primitive::kPrimShort: 1298 case Primitive::kPrimInt: 1299 sm.AdvanceInt(0); 1300 break; 1301 case Primitive::kPrimFloat: 1302 sm.AdvanceFloat(0); 1303 break; 1304 case Primitive::kPrimDouble: 1305 sm.AdvanceDouble(0); 1306 break; 1307 case Primitive::kPrimLong: 1308 sm.AdvanceLong(0); 1309 break; 1310 default: 1311 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1312 } 1313 } 1314 1315 num_stack_entries_ = sm.getStackEntries(); 1316 } 1317 1318 void PushGpr(uintptr_t /* val */) { 1319 // not optimizing registers, yet 1320 } 1321 1322 void PushFpr4(float /* val */) { 1323 // not optimizing registers, yet 1324 } 1325 1326 void PushFpr8(uint64_t /* val */) { 1327 // not optimizing registers, yet 1328 } 1329 1330 void PushStack(uintptr_t /* val */) { 1331 // counting is already done in the superclass 1332 } 1333 1334 uintptr_t PushSirt(mirror::Object* /* ptr */) { 1335 num_sirt_references_++; 1336 return reinterpret_cast<uintptr_t>(nullptr); 1337 } 1338 1339 private: 1340 uint32_t num_sirt_references_; 1341 uint32_t num_stack_entries_; 1342}; 1343 1344// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1345// of transitioning into native code. 1346class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1347 public: 1348 BuildGenericJniFrameVisitor(mirror::ArtMethod*** sp, bool is_static, const char* shorty, 1349 uint32_t shorty_len, Thread* self) : 1350 QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) { 1351 ComputeGenericJniFrameSize fsc; 1352 fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &sirt_, &sirt_expected_refs_, 1353 &cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_, &code_return_, 1354 &alloca_used_size_); 1355 sirt_number_of_references_ = 0; 1356 cur_sirt_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstSirtEntry()); 1357 1358 // jni environment is always first argument 1359 sm_.AdvancePointer(self->GetJniEnv()); 1360 1361 if (is_static) { 1362 sm_.AdvanceSirt((**sp)->GetDeclaringClass()); 1363 } 1364 } 1365 1366 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 1367 1368 void FinalizeSirt(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1369 1370 jobject GetFirstSirtEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1371 return reinterpret_cast<jobject>(sirt_->GetStackReference(0)); 1372 } 1373 1374 void PushGpr(uintptr_t val) { 1375 *cur_gpr_reg_ = val; 1376 cur_gpr_reg_++; 1377 } 1378 1379 void PushFpr4(float val) { 1380 *cur_fpr_reg_ = val; 1381 cur_fpr_reg_++; 1382 } 1383 1384 void PushFpr8(uint64_t val) { 1385 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1386 *tmp = val; 1387 cur_fpr_reg_ += 2; 1388 } 1389 1390 void PushStack(uintptr_t val) { 1391 *cur_stack_arg_ = val; 1392 cur_stack_arg_++; 1393 } 1394 1395 uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1396 uintptr_t tmp; 1397 if (ref == nullptr) { 1398 *cur_sirt_entry_ = StackReference<mirror::Object>(); 1399 tmp = reinterpret_cast<uintptr_t>(nullptr); 1400 } else { 1401 *cur_sirt_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref); 1402 tmp = reinterpret_cast<uintptr_t>(cur_sirt_entry_); 1403 } 1404 cur_sirt_entry_++; 1405 sirt_number_of_references_++; 1406 return tmp; 1407 } 1408 1409 // Size of the part of the alloca that we actually need. 1410 size_t GetAllocaUsedSize() { 1411 return alloca_used_size_; 1412 } 1413 1414 void* GetCodeReturn() { 1415 return code_return_; 1416 } 1417 1418 private: 1419 uint32_t sirt_number_of_references_; 1420 StackReference<mirror::Object>* cur_sirt_entry_; 1421 StackIndirectReferenceTable* sirt_; 1422 uint32_t sirt_expected_refs_; 1423 uintptr_t* cur_gpr_reg_; 1424 uint32_t* cur_fpr_reg_; 1425 uintptr_t* cur_stack_arg_; 1426 // StackReference<mirror::Object>* top_of_sirt_; 1427 void* code_return_; 1428 size_t alloca_used_size_; 1429 1430 BuildGenericJniFrameStateMachine<BuildGenericJniFrameVisitor> sm_; 1431 1432 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 1433}; 1434 1435void BuildGenericJniFrameVisitor::Visit() { 1436 Primitive::Type type = GetParamPrimitiveType(); 1437 switch (type) { 1438 case Primitive::kPrimLong: { 1439 jlong long_arg; 1440 if (IsSplitLongOrDouble()) { 1441 long_arg = ReadSplitLongParam(); 1442 } else { 1443 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 1444 } 1445 sm_.AdvanceLong(long_arg); 1446 break; 1447 } 1448 case Primitive::kPrimDouble: { 1449 uint64_t double_arg; 1450 if (IsSplitLongOrDouble()) { 1451 // Read into union so that we don't case to a double. 1452 double_arg = ReadSplitLongParam(); 1453 } else { 1454 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 1455 } 1456 sm_.AdvanceDouble(double_arg); 1457 break; 1458 } 1459 case Primitive::kPrimNot: { 1460 StackReference<mirror::Object>* stack_ref = 1461 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1462 sm_.AdvanceSirt(stack_ref->AsMirrorPtr()); 1463 break; 1464 } 1465 case Primitive::kPrimFloat: 1466 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 1467 break; 1468 case Primitive::kPrimBoolean: // Fall-through. 1469 case Primitive::kPrimByte: // Fall-through. 1470 case Primitive::kPrimChar: // Fall-through. 1471 case Primitive::kPrimShort: // Fall-through. 1472 case Primitive::kPrimInt: // Fall-through. 1473 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 1474 break; 1475 case Primitive::kPrimVoid: 1476 LOG(FATAL) << "UNREACHABLE"; 1477 break; 1478 } 1479} 1480 1481void BuildGenericJniFrameVisitor::FinalizeSirt(Thread* self) { 1482 // Initialize padding entries. 1483 while (sirt_number_of_references_ < sirt_expected_refs_) { 1484 *cur_sirt_entry_ = StackReference<mirror::Object>(); 1485 cur_sirt_entry_++; 1486 sirt_number_of_references_++; 1487 } 1488 sirt_->SetNumberOfReferences(sirt_expected_refs_); 1489 DCHECK_NE(sirt_expected_refs_, 0U); 1490 // Install Sirt. 1491 self->PushSirt(sirt_); 1492} 1493 1494extern "C" void* artFindNativeMethod(); 1495 1496/* 1497 * Initializes an alloca region assumed to be directly below sp for a native call: 1498 * Create a Sirt and call stack and fill a mini stack with values to be pushed to registers. 1499 * The final element on the stack is a pointer to the native code. 1500 * 1501 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 1502 * We need to fix this, as the Sirt needs to go into the callee-save frame. 1503 * 1504 * The return of this function denotes: 1505 * 1) How many bytes of the alloca can be released, if the value is non-negative. 1506 * 2) An error, if the value is negative. 1507 */ 1508extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod** sp) 1509 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1510 mirror::ArtMethod* called = *sp; 1511 DCHECK(called->IsNative()) << PrettyMethod(called, true); 1512 1513 // run the visitor 1514 MethodHelper mh(called); 1515 1516 BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), mh.GetShorty(), mh.GetShortyLength(), 1517 self); 1518 visitor.VisitArguments(); 1519 visitor.FinalizeSirt(self); 1520 1521 // fix up managed-stack things in Thread 1522 self->SetTopOfStack(sp, 0); 1523 1524 self->VerifyStack(); 1525 1526 // Start JNI, save the cookie. 1527 uint32_t cookie; 1528 if (called->IsSynchronized()) { 1529 cookie = JniMethodStartSynchronized(visitor.GetFirstSirtEntry(), self); 1530 if (self->IsExceptionPending()) { 1531 self->PopSirt(); 1532 // A negative value denotes an error. 1533 return -1; 1534 } 1535 } else { 1536 cookie = JniMethodStart(self); 1537 } 1538 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1539 *(sp32 - 1) = cookie; 1540 1541 // Retrieve the stored native code. 1542 const void* nativeCode = called->GetNativeMethod(); 1543 1544 // There are two cases for the content of nativeCode: 1545 // 1) Pointer to the native function. 1546 // 2) Pointer to the trampoline for native code binding. 1547 // In the second case, we need to execute the binding and continue with the actual native function 1548 // pointer. 1549 DCHECK(nativeCode != nullptr); 1550 if (nativeCode == GetJniDlsymLookupStub()) { 1551 nativeCode = artFindNativeMethod(); 1552 1553 if (nativeCode == nullptr) { 1554 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 1555 return -1; 1556 } 1557 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 1558 } 1559 1560 // Store the native code pointer in the stack at the right location. 1561 uintptr_t* code_pointer = reinterpret_cast<uintptr_t*>(visitor.GetCodeReturn()); 1562 *code_pointer = reinterpret_cast<uintptr_t>(nativeCode); 1563 1564 // 5K reserved, window_size + frame pointer used. 1565 size_t window_size = visitor.GetAllocaUsedSize(); 1566 return (5 * KB) - window_size - kPointerSize; 1567} 1568 1569/* 1570 * Is called after the native JNI code. Responsible for cleanup (SIRT, saved state) and 1571 * unlocking. 1572 */ 1573extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMethod** sp, 1574 jvalue result, uint64_t result_f) 1575 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1576 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1577 mirror::ArtMethod* called = *sp; 1578 uint32_t cookie = *(sp32 - 1); 1579 1580 MethodHelper mh(called); 1581 char return_shorty_char = mh.GetShorty()[0]; 1582 1583 if (return_shorty_char == 'L') { 1584 // the only special ending call 1585 if (called->IsSynchronized()) { 1586 StackIndirectReferenceTable* table = 1587 reinterpret_cast<StackIndirectReferenceTable*>( 1588 reinterpret_cast<uint8_t*>(sp) + kPointerSize); 1589 jobject tmp = reinterpret_cast<jobject>(table->GetStackReference(0)); 1590 1591 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(result.l, cookie, tmp, 1592 self)); 1593 } else { 1594 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(result.l, cookie, self)); 1595 } 1596 } else { 1597 if (called->IsSynchronized()) { 1598 StackIndirectReferenceTable* table = 1599 reinterpret_cast<StackIndirectReferenceTable*>( 1600 reinterpret_cast<uint8_t*>(sp) + kPointerSize); 1601 jobject tmp = reinterpret_cast<jobject>(table->GetStackReference(0)); 1602 1603 JniMethodEndSynchronized(cookie, tmp, self); 1604 } else { 1605 JniMethodEnd(cookie, self); 1606 } 1607 1608 switch (return_shorty_char) { 1609 case 'F': // Fall-through. 1610 case 'D': 1611 return result_f; 1612 case 'Z': 1613 return result.z; 1614 case 'B': 1615 return result.b; 1616 case 'C': 1617 return result.c; 1618 case 'S': 1619 return result.s; 1620 case 'I': 1621 return result.i; 1622 case 'J': 1623 return result.j; 1624 case 'V': 1625 return 0; 1626 default: 1627 LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char; 1628 return 0; 1629 } 1630 } 1631} 1632 1633} // namespace art 1634