runtime.h revision 94f7b49578b6aaa80de8ffed230648d601393905
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_RUNTIME_H_ 18#define ART_RUNTIME_RUNTIME_H_ 19 20#include <jni.h> 21#include <stdio.h> 22 23#include <iosfwd> 24#include <set> 25#include <string> 26#include <utility> 27#include <vector> 28 29#include "compiler_callbacks.h" 30#include "gc_root.h" 31#include "instrumentation.h" 32#include "instruction_set.h" 33#include "jobject_comparator.h" 34#include "object_callbacks.h" 35#include "offsets.h" 36#include "profiler_options.h" 37#include "quick/quick_method_frame_info.h" 38#include "runtime_stats.h" 39#include "safe_map.h" 40 41namespace art { 42 43namespace gc { 44 class Heap; 45} // namespace gc 46namespace mirror { 47 class ArtMethod; 48 class ClassLoader; 49 class Array; 50 template<class T> class ObjectArray; 51 template<class T> class PrimitiveArray; 52 typedef PrimitiveArray<int8_t> ByteArray; 53 class String; 54 class Throwable; 55} // namespace mirror 56namespace verifier { 57class MethodVerifier; 58} 59class ClassLinker; 60class DexFile; 61class InternTable; 62class JavaVMExt; 63class MonitorList; 64class MonitorPool; 65class NullPointerHandler; 66class SignalCatcher; 67class StackOverflowHandler; 68class SuspensionHandler; 69class ThreadList; 70class Trace; 71class Transaction; 72 73typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions; 74 75// Not all combinations of flags are valid. You may not visit all roots as well as the new roots 76// (no logical reason to do this). You also may not start logging new roots and stop logging new 77// roots (also no logical reason to do this). 78enum VisitRootFlags : uint8_t { 79 kVisitRootFlagAllRoots = 0x1, 80 kVisitRootFlagNewRoots = 0x2, 81 kVisitRootFlagStartLoggingNewRoots = 0x4, 82 kVisitRootFlagStopLoggingNewRoots = 0x8, 83 kVisitRootFlagClearRootLog = 0x10, 84}; 85 86class Runtime { 87 public: 88 // Creates and initializes a new runtime. 89 static bool Create(const RuntimeOptions& options, bool ignore_unrecognized) 90 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); 91 92 bool IsCompiler() const { 93 return compiler_callbacks_ != nullptr; 94 } 95 96 bool CanRelocate() const { 97 return !IsCompiler() || compiler_callbacks_->IsRelocationPossible(); 98 } 99 100 bool ShouldRelocate() const { 101 return must_relocate_ && CanRelocate(); 102 } 103 104 bool MustRelocateIfPossible() const { 105 return must_relocate_; 106 } 107 108 CompilerCallbacks* GetCompilerCallbacks() { 109 return compiler_callbacks_; 110 } 111 112 bool IsZygote() const { 113 return is_zygote_; 114 } 115 116 bool IsExplicitGcDisabled() const { 117 return is_explicit_gc_disabled_; 118 } 119 120 std::string GetCompilerExecutable() const; 121 std::string GetPatchoatExecutable() const; 122 123 const std::vector<std::string>& GetCompilerOptions() const { 124 return compiler_options_; 125 } 126 127 const std::vector<std::string>& GetImageCompilerOptions() const { 128 return image_compiler_options_; 129 } 130 131 const ProfilerOptions& GetProfilerOptions() const { 132 return profiler_options_; 133 } 134 135 // Starts a runtime, which may cause threads to be started and code to run. 136 bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_); 137 138 bool IsShuttingDown(Thread* self); 139 bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { 140 return shutting_down_; 141 } 142 143 size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { 144 return threads_being_born_; 145 } 146 147 void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { 148 threads_being_born_++; 149 } 150 151 void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_); 152 153 bool IsStarted() const { 154 return started_; 155 } 156 157 bool IsFinishedStarting() const { 158 return finished_starting_; 159 } 160 161 static Runtime* Current() { 162 return instance_; 163 } 164 165 // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most 166 // callers should prefer. 167 // This isn't marked ((noreturn)) because then gcc will merge multiple calls 168 // in a single function together. This reduces code size slightly, but means 169 // that the native stack trace we get may point at the wrong call site. 170 static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_); 171 172 // Returns the "main" ThreadGroup, used when attaching user threads. 173 jobject GetMainThreadGroup() const; 174 175 // Returns the "system" ThreadGroup, used when attaching our internal threads. 176 jobject GetSystemThreadGroup() const; 177 178 // Returns the system ClassLoader which represents the CLASSPATH. 179 jobject GetSystemClassLoader() const; 180 181 // Attaches the calling native thread to the runtime. 182 bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group, 183 bool create_peer); 184 185 void CallExitHook(jint status); 186 187 // Detaches the current native thread from the runtime. 188 void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_); 189 190 void DumpForSigQuit(std::ostream& os) 191 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 192 void DumpLockHolders(std::ostream& os); 193 194 ~Runtime(); 195 196 const std::string& GetBootClassPathString() const { 197 return boot_class_path_string_; 198 } 199 200 const std::string& GetClassPathString() const { 201 return class_path_string_; 202 } 203 204 ClassLinker* GetClassLinker() const { 205 return class_linker_; 206 } 207 208 size_t GetDefaultStackSize() const { 209 return default_stack_size_; 210 } 211 212 gc::Heap* GetHeap() const { 213 return heap_; 214 } 215 216 InternTable* GetInternTable() const { 217 DCHECK(intern_table_ != NULL); 218 return intern_table_; 219 } 220 221 JavaVMExt* GetJavaVM() const { 222 return java_vm_; 223 } 224 225 size_t GetMaxSpinsBeforeThinkLockInflation() const { 226 return max_spins_before_thin_lock_inflation_; 227 } 228 229 MonitorList* GetMonitorList() const { 230 return monitor_list_; 231 } 232 233 MonitorPool* GetMonitorPool() const { 234 return monitor_pool_; 235 } 236 237 mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 238 239 const std::vector<std::string>& GetProperties() const { 240 return properties_; 241 } 242 243 ThreadList* GetThreadList() const { 244 return thread_list_; 245 } 246 247 static const char* GetVersion() { 248 return "2.1.0"; 249 } 250 251 void DisallowNewSystemWeaks() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 252 void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 253 254 // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If 255 // clean_dirty is true then dirty roots will be marked as non-dirty after visiting. 256 void VisitRoots(RootCallback* visitor, void* arg, VisitRootFlags flags = kVisitRootFlagAllRoots) 257 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 258 259 // Visit all of the roots we can do safely do concurrently. 260 void VisitConcurrentRoots(RootCallback* visitor, void* arg, 261 VisitRootFlags flags = kVisitRootFlagAllRoots) 262 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 263 264 // Visit all of the non thread roots, we can do this with mutators unpaused. 265 void VisitNonThreadRoots(RootCallback* visitor, void* arg) 266 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 267 268 // Visit all other roots which must be done with mutators suspended. 269 void VisitNonConcurrentRoots(RootCallback* visitor, void* arg) 270 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 271 272 // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the 273 // system weak is updated to be the visitor's returned value. 274 void SweepSystemWeaks(IsMarkedCallback* visitor, void* arg) 275 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 276 277 // Constant roots are the roots which never change after the runtime is initialized, they only 278 // need to be visited once per GC cycle. 279 void VisitConstantRoots(RootCallback* callback, void* arg) 280 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 281 282 // Returns a special method that calls into a trampoline for runtime method resolution 283 mirror::ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 284 285 bool HasResolutionMethod() const { 286 return !resolution_method_.IsNull(); 287 } 288 289 void SetResolutionMethod(mirror::ArtMethod* method) { 290 resolution_method_ = GcRoot<mirror::ArtMethod>(method); 291 } 292 293 mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 294 295 // Returns a special method that calls into a trampoline for runtime imt conflicts. 296 mirror::ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 297 298 bool HasImtConflictMethod() const { 299 return !imt_conflict_method_.IsNull(); 300 } 301 302 void SetImtConflictMethod(mirror::ArtMethod* method) { 303 imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method); 304 } 305 306 mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 307 308 // Returns an imt with every entry set to conflict, used as default imt for all classes. 309 mirror::ObjectArray<mirror::ArtMethod>* GetDefaultImt() 310 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 311 312 bool HasDefaultImt() const { 313 return !default_imt_.IsNull(); 314 } 315 316 void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) { 317 default_imt_ = GcRoot<mirror::ObjectArray<mirror::ArtMethod>>(imt); 318 } 319 320 mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl) 321 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 322 323 // Returns a special method that describes all callee saves being spilled to the stack. 324 enum CalleeSaveType { 325 kSaveAll, 326 kRefsOnly, 327 kRefsAndArgs, 328 kLastCalleeSaveType // Value used for iteration 329 }; 330 331 bool HasCalleeSaveMethod(CalleeSaveType type) const { 332 return !callee_save_methods_[type].IsNull(); 333 } 334 335 mirror::ArtMethod* GetCalleeSaveMethod(CalleeSaveType type) 336 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 337 338 mirror::ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type) 339 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 340 341 QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const { 342 return callee_save_method_frame_infos_[type]; 343 } 344 345 QuickMethodFrameInfo GetRuntimeMethodFrameInfo(mirror::ArtMethod* method) 346 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 347 348 static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) { 349 return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]); 350 } 351 352 InstructionSet GetInstructionSet() const { 353 return instruction_set_; 354 } 355 356 void SetInstructionSet(InstructionSet instruction_set); 357 358 void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type); 359 360 mirror::ArtMethod* CreateCalleeSaveMethod(CalleeSaveType type) 361 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 362 363 int32_t GetStat(int kind); 364 365 RuntimeStats* GetStats() { 366 return &stats_; 367 } 368 369 bool HasStatsEnabled() const { 370 return stats_enabled_; 371 } 372 373 void ResetStats(int kinds); 374 375 void SetStatsEnabled(bool new_state); 376 377 void PreZygoteFork(); 378 bool InitZygote(); 379 void DidForkFromZygote(); 380 381 const instrumentation::Instrumentation* GetInstrumentation() const { 382 return &instrumentation_; 383 } 384 385 instrumentation::Instrumentation* GetInstrumentation() { 386 return &instrumentation_; 387 } 388 389 bool UseCompileTimeClassPath() const { 390 return use_compile_time_class_path_; 391 } 392 393 void AddMethodVerifier(verifier::MethodVerifier* verifier) LOCKS_EXCLUDED(method_verifier_lock_); 394 void RemoveMethodVerifier(verifier::MethodVerifier* verifier) 395 LOCKS_EXCLUDED(method_verifier_lock_); 396 397 const std::vector<const DexFile*>& GetCompileTimeClassPath(jobject class_loader); 398 void SetCompileTimeClassPath(jobject class_loader, std::vector<const DexFile*>& class_path); 399 400 void StartProfiler(const char* profile_output_filename); 401 void UpdateProfilerState(int state); 402 403 // Transaction support. 404 bool IsActiveTransaction() const { 405 return preinitialization_transaction_ != nullptr; 406 } 407 void EnterTransactionMode(Transaction* transaction); 408 void ExitTransactionMode(); 409 void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value, 410 bool is_volatile) const; 411 void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value, 412 bool is_volatile) const; 413 void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset, 414 mirror::Object* value, bool is_volatile) const; 415 void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const 416 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 417 void RecordStrongStringInsertion(mirror::String* s, uint32_t hash_code) const 418 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 419 void RecordWeakStringInsertion(mirror::String* s, uint32_t hash_code) const 420 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 421 void RecordStrongStringRemoval(mirror::String* s, uint32_t hash_code) const 422 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 423 void RecordWeakStringRemoval(mirror::String* s, uint32_t hash_code) const 424 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 425 426 void SetFaultMessage(const std::string& message); 427 // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations 428 // with the unexpected_signal_lock_. 429 const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS { 430 return fault_message_; 431 } 432 433 void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const; 434 435 bool ExplicitNullChecks() const { 436 return null_pointer_handler_ == nullptr; 437 } 438 439 bool ExplicitSuspendChecks() const { 440 return suspend_handler_ == nullptr; 441 } 442 443 bool ExplicitStackOverflowChecks() const { 444 return stack_overflow_handler_ == nullptr; 445 } 446 447 bool IsVerificationEnabled() const { 448 return verify_; 449 } 450 451 bool RunningOnValgrind() const { 452 return running_on_valgrind_; 453 } 454 455 void SetTargetSdkVersion(int32_t version) { 456 target_sdk_version_ = version; 457 } 458 459 int32_t GetTargetSdkVersion() const { 460 return target_sdk_version_; 461 } 462 463 static const char* GetDefaultInstructionSetFeatures() { 464 return kDefaultInstructionSetFeatures; 465 } 466 467 private: 468 static void InitPlatformSignalHandlers(); 469 470 Runtime(); 471 472 void BlockSignals(); 473 474 bool Init(const RuntimeOptions& options, bool ignore_unrecognized) 475 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); 476 void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_); 477 void InitThreadGroups(Thread* self); 478 void RegisterRuntimeNativeMethods(JNIEnv* env); 479 480 void StartDaemonThreads(); 481 void StartSignalCatcher(); 482 483 // A pointer to the active runtime or NULL. 484 static Runtime* instance_; 485 486 static const char* kDefaultInstructionSetFeatures; 487 488 // NOTE: these must match the gc::ProcessState values as they come directly from the framework. 489 static constexpr int kProfileForground = 0; 490 static constexpr int kProfileBackgrouud = 1; 491 492 GcRoot<mirror::ArtMethod> callee_save_methods_[kLastCalleeSaveType]; 493 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_; 494 GcRoot<mirror::ArtMethod> resolution_method_; 495 GcRoot<mirror::ArtMethod> imt_conflict_method_; 496 GcRoot<mirror::ObjectArray<mirror::ArtMethod>> default_imt_; 497 498 InstructionSet instruction_set_; 499 QuickMethodFrameInfo callee_save_method_frame_infos_[kLastCalleeSaveType]; 500 501 CompilerCallbacks* compiler_callbacks_; 502 bool is_zygote_; 503 bool must_relocate_; 504 bool is_concurrent_gc_enabled_; 505 bool is_explicit_gc_disabled_; 506 507 std::string compiler_executable_; 508 std::string patchoat_executable_; 509 std::vector<std::string> compiler_options_; 510 std::vector<std::string> image_compiler_options_; 511 512 std::string boot_class_path_string_; 513 std::string class_path_string_; 514 std::vector<std::string> properties_; 515 516 // The default stack size for managed threads created by the runtime. 517 size_t default_stack_size_; 518 519 gc::Heap* heap_; 520 521 // The number of spins that are done before thread suspension is used to forcibly inflate. 522 size_t max_spins_before_thin_lock_inflation_; 523 MonitorList* monitor_list_; 524 MonitorPool* monitor_pool_; 525 526 ThreadList* thread_list_; 527 528 InternTable* intern_table_; 529 530 ClassLinker* class_linker_; 531 532 SignalCatcher* signal_catcher_; 533 std::string stack_trace_file_; 534 535 JavaVMExt* java_vm_; 536 537 // Fault message, printed when we get a SIGSEGV. 538 Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 539 std::string fault_message_ GUARDED_BY(fault_message_lock_); 540 541 // Method verifier set, used so that we can update their GC roots. 542 Mutex method_verifier_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 543 std::set<verifier::MethodVerifier*> method_verifiers_; 544 545 // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by 546 // the shutdown lock so that threads aren't born while we're shutting down. 547 size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_); 548 549 // Waited upon until no threads are being born. 550 std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_); 551 552 // Set when runtime shutdown is past the point that new threads may attach. 553 bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_); 554 555 // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_. 556 bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_); 557 558 bool started_; 559 560 // New flag added which tells us if the runtime has finished starting. If 561 // this flag is set then the Daemon threads are created and the class loader 562 // is created. This flag is needed for knowing if its safe to request CMS. 563 bool finished_starting_; 564 565 // Hooks supported by JNI_CreateJavaVM 566 jint (*vfprintf_)(FILE* stream, const char* format, va_list ap); 567 void (*exit_)(jint status); 568 void (*abort_)(); 569 570 bool stats_enabled_; 571 RuntimeStats stats_; 572 573 const bool running_on_valgrind_; 574 575 std::string profile_output_filename_; 576 ProfilerOptions profiler_options_; 577 bool profiler_started_; 578 579 bool method_trace_; 580 std::string method_trace_file_; 581 size_t method_trace_file_size_; 582 instrumentation::Instrumentation instrumentation_; 583 584 typedef SafeMap<jobject, std::vector<const DexFile*>, JobjectComparator> CompileTimeClassPaths; 585 CompileTimeClassPaths compile_time_class_paths_; 586 bool use_compile_time_class_path_; 587 588 jobject main_thread_group_; 589 jobject system_thread_group_; 590 591 // As returned by ClassLoader.getSystemClassLoader(). 592 jobject system_class_loader_; 593 594 // If true, then we dump the GC cumulative timings on shutdown. 595 bool dump_gc_performance_on_shutdown_; 596 597 // Transaction used for pre-initializing classes at compilation time. 598 Transaction* preinitialization_transaction_; 599 NullPointerHandler* null_pointer_handler_; 600 SuspensionHandler* suspend_handler_; 601 StackOverflowHandler* stack_overflow_handler_; 602 603 // If false, verification is disabled. True by default. 604 bool verify_; 605 606 // Specifies target SDK version to allow workarounds for certain API levels. 607 int32_t target_sdk_version_; 608 609 // Implicit checks flags. 610 bool implicit_null_checks_; // NullPointer checks are implicit. 611 bool implicit_so_checks_; // StackOverflow checks are implicit. 612 bool implicit_suspend_checks_; // Thread suspension checks are implicit. 613 614 DISALLOW_COPY_AND_ASSIGN(Runtime); 615}; 616 617} // namespace art 618 619#endif // ART_RUNTIME_RUNTIME_H_ 620