runtime.h revision 88474b416eb257078e590bf9bc7957cee604a186
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_RUNTIME_H_ 18#define ART_RUNTIME_RUNTIME_H_ 19 20#include <jni.h> 21#include <stdio.h> 22 23#include <iosfwd> 24#include <string> 25#include <utility> 26#include <vector> 27 28#include "base/macros.h" 29#include "base/stringpiece.h" 30#include "gc/heap.h" 31#include "globals.h" 32#include "instruction_set.h" 33#include "instrumentation.h" 34#include "jobject_comparator.h" 35#include "locks.h" 36#include "root_visitor.h" 37#include "runtime_stats.h" 38#include "safe_map.h" 39 40namespace art { 41 42namespace gc { 43 class Heap; 44} 45namespace mirror { 46 class ArtMethod; 47 class ClassLoader; 48 template<class T> class ObjectArray; 49 template<class T> class PrimitiveArray; 50 typedef PrimitiveArray<int8_t> ByteArray; 51 class String; 52 class Throwable; 53} // namespace mirror 54class ClassLinker; 55class DexFile; 56class InternTable; 57struct JavaVMExt; 58class MonitorList; 59class SignalCatcher; 60class ThreadList; 61class Trace; 62 63class Runtime { 64 public: 65 typedef std::vector<std::pair<std::string, const void*> > Options; 66 67 enum CompilerFilter { 68 kInterpretOnly, // Compile nothing. 69 kSpace, // Maximize space savings. 70 kBalanced, // Try to get the best performance return on compilation investment. 71 kSpeed, // Maximize runtime performance. 72 kEverything // Force compilation (Note: excludes compilaton of class initializers). 73 }; 74 75 // Guide heuristics to determine whether to compile method if profile data not available. 76#if ART_SMALL_MODE 77 static const CompilerFilter kDefaultCompilerFilter = kInterpretOnly; 78#else 79 static const CompilerFilter kDefaultCompilerFilter = kSpeed; 80#endif 81 static const size_t kDefaultHugeMethodThreshold = 10000; 82 static const size_t kDefaultLargeMethodThreshold = 600; 83 static const size_t kDefaultSmallMethodThreshold = 60; 84 static const size_t kDefaultTinyMethodThreshold = 20; 85 static const size_t kDefaultNumDexMethodsThreshold = 900; 86 87 class ParsedOptions { 88 public: 89 // returns null if problem parsing and ignore_unrecognized is false 90 static ParsedOptions* Create(const Options& options, bool ignore_unrecognized); 91 92 const std::vector<const DexFile*>* boot_class_path_; 93 std::string boot_class_path_string_; 94 std::string class_path_string_; 95 std::string host_prefix_; 96 std::string image_; 97 bool check_jni_; 98 std::string jni_trace_; 99 bool is_compiler_; 100 bool is_zygote_; 101 bool interpreter_only_; 102 bool is_concurrent_gc_enabled_; 103 bool is_explicit_gc_disabled_; 104 size_t long_pause_log_threshold_; 105 size_t long_gc_log_threshold_; 106 bool ignore_max_footprint_; 107 size_t heap_initial_size_; 108 size_t heap_maximum_size_; 109 size_t heap_growth_limit_; 110 size_t heap_min_free_; 111 size_t heap_max_free_; 112 double heap_target_utilization_; 113 size_t parallel_gc_threads_; 114 size_t conc_gc_threads_; 115 size_t stack_size_; 116 size_t max_spins_before_thin_lock_inflation_; 117 bool low_memory_mode_; 118 size_t lock_profiling_threshold_; 119 std::string stack_trace_file_; 120 bool method_trace_; 121 std::string method_trace_file_; 122 size_t method_trace_file_size_; 123 bool (*hook_is_sensitive_thread_)(); 124 jint (*hook_vfprintf_)(FILE* stream, const char* format, va_list ap); 125 void (*hook_exit_)(jint status); 126 void (*hook_abort_)(); 127 std::vector<std::string> properties_; 128 CompilerFilter compiler_filter_; 129 size_t huge_method_threshold_; 130 size_t large_method_threshold_; 131 size_t small_method_threshold_; 132 size_t tiny_method_threshold_; 133 size_t num_dex_methods_threshold_; 134 bool sea_ir_mode_; 135 136 private: 137 ParsedOptions() {} 138 }; 139 140 // Creates and initializes a new runtime. 141 static bool Create(const Options& options, bool ignore_unrecognized) 142 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); 143 144 bool IsCompiler() const { 145 return is_compiler_; 146 } 147 148 bool IsZygote() const { 149 return is_zygote_; 150 } 151 152 bool IsConcurrentGcEnabled() const { 153 return is_concurrent_gc_enabled_; 154 } 155 156 bool IsExplicitGcDisabled() const { 157 return is_explicit_gc_disabled_; 158 } 159 160#ifdef ART_SEA_IR_MODE 161 bool IsSeaIRMode() const { 162 return sea_ir_mode_; 163 } 164#endif 165 166 void SetSeaIRMode(bool sea_ir_mode) { 167 sea_ir_mode_ = sea_ir_mode; 168 } 169 170 CompilerFilter GetCompilerFilter() const { 171 return compiler_filter_; 172 } 173 174 void SetCompilerFilter(CompilerFilter compiler_filter) { 175 compiler_filter_ = compiler_filter; 176 } 177 178 size_t GetHugeMethodThreshold() const { 179 return huge_method_threshold_; 180 } 181 182 size_t GetLargeMethodThreshold() const { 183 return large_method_threshold_; 184 } 185 186 size_t GetSmallMethodThreshold() const { 187 return small_method_threshold_; 188 } 189 190 size_t GetTinyMethodThreshold() const { 191 return tiny_method_threshold_; 192 } 193 194 size_t GetNumDexMethodsThreshold() const { 195 return num_dex_methods_threshold_; 196 } 197 198 const std::string& GetHostPrefix() const { 199 DCHECK(!IsStarted()); 200 return host_prefix_; 201 } 202 203 // Starts a runtime, which may cause threads to be started and code to run. 204 bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_); 205 206 bool IsShuttingDown() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { 207 return shutting_down_; 208 } 209 210 size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { 211 return threads_being_born_; 212 } 213 214 void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { 215 threads_being_born_++; 216 } 217 218 void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_); 219 220 bool IsStarted() const { 221 return started_; 222 } 223 224 bool IsFinishedStarting() const { 225 return finished_starting_; 226 } 227 228 static Runtime* Current() { 229 return instance_; 230 } 231 232 // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most 233 // callers should prefer. 234 // This isn't marked ((noreturn)) because then gcc will merge multiple calls 235 // in a single function together. This reduces code size slightly, but means 236 // that the native stack trace we get may point at the wrong call site. 237 static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_); 238 239 // Returns the "main" ThreadGroup, used when attaching user threads. 240 jobject GetMainThreadGroup() const; 241 242 // Returns the "system" ThreadGroup, used when attaching our internal threads. 243 jobject GetSystemThreadGroup() const; 244 245 // Returns the system ClassLoader which represents the CLASSPATH. 246 jobject GetSystemClassLoader() const; 247 248 // Attaches the calling native thread to the runtime. 249 bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group, 250 bool create_peer); 251 252 void CallExitHook(jint status); 253 254 // Detaches the current native thread from the runtime. 255 void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_); 256 257 void DumpForSigQuit(std::ostream& os) 258 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 259 void DumpLockHolders(std::ostream& os); 260 261 ~Runtime(); 262 263 const std::string& GetBootClassPathString() const { 264 return boot_class_path_string_; 265 } 266 267 const std::string& GetClassPathString() const { 268 return class_path_string_; 269 } 270 271 ClassLinker* GetClassLinker() const { 272 return class_linker_; 273 } 274 275 size_t GetDefaultStackSize() const { 276 return default_stack_size_; 277 } 278 279 gc::Heap* GetHeap() const { 280 return heap_; 281 } 282 283 InternTable* GetInternTable() const { 284 DCHECK(intern_table_ != NULL); 285 return intern_table_; 286 } 287 288 JavaVMExt* GetJavaVM() const { 289 return java_vm_; 290 } 291 292 size_t GetMaxSpinsBeforeThinkLockInflation() const { 293 return max_spins_before_thin_lock_inflation_; 294 } 295 296 MonitorList* GetMonitorList() const { 297 return monitor_list_; 298 } 299 300 mirror::Throwable* GetPreAllocatedOutOfMemoryError() const 301 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 302 303 const std::vector<std::string>& GetProperties() const { 304 return properties_; 305 } 306 307 ThreadList* GetThreadList() const { 308 return thread_list_; 309 } 310 311 const char* GetVersion() const { 312 return "2.0.0"; 313 } 314 315 void DisallowNewSystemWeaks() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 316 void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 317 318 // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If 319 // clean_dirty is true then dirty roots will be marked as non-dirty after visiting. 320 void VisitRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty) 321 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 322 323 // Visit all of the roots we can do safely do concurrently. 324 void VisitConcurrentRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty); 325 326 // Visit all of the non thread roots, we can do this with mutators unpaused. 327 void VisitNonThreadRoots(RootVisitor* visitor, void* arg); 328 329 // Visit all other roots which must be done with mutators suspended. 330 void VisitNonConcurrentRoots(RootVisitor* visitor, void* arg) 331 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 332 333 // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the 334 // system weak is updated to be the visitor's returned value. 335 void SweepSystemWeaks(RootVisitor* visitor, void* arg); 336 337 // Returns a special method that calls into a trampoline for runtime method resolution 338 mirror::ArtMethod* GetResolutionMethod() const { 339 CHECK(HasResolutionMethod()); 340 return resolution_method_; 341 } 342 343 bool HasResolutionMethod() const { 344 return resolution_method_ != NULL; 345 } 346 347 void SetResolutionMethod(mirror::ArtMethod* method) { 348 resolution_method_ = method; 349 } 350 351 mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 352 353 // Returns a special method that calls into a trampoline for runtime imt conflicts 354 mirror::ArtMethod* GetImtConflictMethod() const { 355 CHECK(HasImtConflictMethod()); 356 return imt_conflict_method_; 357 } 358 359 bool HasImtConflictMethod() const { 360 return imt_conflict_method_ != NULL; 361 } 362 363 void SetImtConflictMethod(mirror::ArtMethod* method) { 364 imt_conflict_method_ = method; 365 } 366 367 mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 368 369 // Returns an imt with every entry set to conflict, used as default imt for all classes. 370 mirror::ObjectArray<mirror::ArtMethod>* GetDefaultImt() const { 371 CHECK(HasDefaultImt()); 372 return default_imt_; 373 } 374 375 bool HasDefaultImt() const { 376 return default_imt_ != NULL; 377 } 378 379 void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) { 380 default_imt_ = imt; 381 } 382 383 mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl) 384 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 385 386 // Returns a special method that describes all callee saves being spilled to the stack. 387 enum CalleeSaveType { 388 kSaveAll, 389 kRefsOnly, 390 kRefsAndArgs, 391 kLastCalleeSaveType // Value used for iteration 392 }; 393 394 bool HasCalleeSaveMethod(CalleeSaveType type) const { 395 return callee_save_methods_[type] != NULL; 396 } 397 398 mirror::ArtMethod* GetCalleeSaveMethod(CalleeSaveType type) const { 399 DCHECK(HasCalleeSaveMethod(type)); 400 return callee_save_methods_[type]; 401 } 402 403 void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type); 404 405 mirror::ArtMethod* CreateCalleeSaveMethod(InstructionSet instruction_set, 406 CalleeSaveType type) 407 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 408 409 mirror::ArtMethod* CreateRefOnlyCalleeSaveMethod(InstructionSet instruction_set) 410 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 411 412 mirror::ArtMethod* CreateRefAndArgsCalleeSaveMethod(InstructionSet instruction_set) 413 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 414 415 int32_t GetStat(int kind); 416 417 RuntimeStats* GetStats() { 418 return &stats_; 419 } 420 421 bool HasStatsEnabled() const { 422 return stats_enabled_; 423 } 424 425 void ResetStats(int kinds); 426 427 void SetStatsEnabled(bool new_state); 428 429 bool PreZygoteFork(); 430 bool InitZygote(); 431 void DidForkFromZygote(); 432 433 instrumentation::Instrumentation* GetInstrumentation() { 434 return &instrumentation_; 435 } 436 437 bool UseCompileTimeClassPath() const { 438 return use_compile_time_class_path_; 439 } 440 441 const std::vector<const DexFile*>& GetCompileTimeClassPath(jobject class_loader); 442 void SetCompileTimeClassPath(jobject class_loader, std::vector<const DexFile*>& class_path); 443 444 void InstrumentQuickAllocEntryPoints(); 445 void UninstrumentQuickAllocEntryPoints(); 446 447 private: 448 static void InitPlatformSignalHandlers(); 449 450 Runtime(); 451 452 void BlockSignals(); 453 454 bool Init(const Options& options, bool ignore_unrecognized) 455 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); 456 void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_); 457 void InitThreadGroups(Thread* self); 458 void RegisterRuntimeNativeMethods(JNIEnv* env); 459 460 void StartDaemonThreads(); 461 void StartSignalCatcher(); 462 463 // A pointer to the active runtime or NULL. 464 static Runtime* instance_; 465 466 bool is_compiler_; 467 bool is_zygote_; 468 bool is_concurrent_gc_enabled_; 469 bool is_explicit_gc_disabled_; 470 471 CompilerFilter compiler_filter_; 472 size_t huge_method_threshold_; 473 size_t large_method_threshold_; 474 size_t small_method_threshold_; 475 size_t tiny_method_threshold_; 476 size_t num_dex_methods_threshold_; 477 478 bool sea_ir_mode_; 479 480 // The host prefix is used during cross compilation. It is removed 481 // from the start of host paths such as: 482 // $ANDROID_PRODUCT_OUT/system/framework/boot.oat 483 // to produce target paths such as 484 // /system/framework/boot.oat 485 // Similarly it is prepended to target paths to arrive back at a 486 // host past. In both cases this is necessary because image and oat 487 // files embedded expect paths of dependent files (an image points 488 // to an oat file and an oat files to one or more dex files). These 489 // files contain the expected target path. 490 std::string host_prefix_; 491 492 std::string boot_class_path_string_; 493 std::string class_path_string_; 494 std::vector<std::string> properties_; 495 496 // The default stack size for managed threads created by the runtime. 497 size_t default_stack_size_; 498 499 gc::Heap* heap_; 500 501 // The number of spins that are done before thread suspension is used to forcibly inflate. 502 size_t max_spins_before_thin_lock_inflation_; 503 MonitorList* monitor_list_; 504 505 ThreadList* thread_list_; 506 507 InternTable* intern_table_; 508 509 ClassLinker* class_linker_; 510 511 SignalCatcher* signal_catcher_; 512 std::string stack_trace_file_; 513 514 JavaVMExt* java_vm_; 515 516 mirror::Throwable* pre_allocated_OutOfMemoryError_; 517 518 mirror::ArtMethod* callee_save_methods_[kLastCalleeSaveType]; 519 520 mirror::ArtMethod* resolution_method_; 521 522 mirror::ArtMethod* imt_conflict_method_; 523 524 mirror::ObjectArray<mirror::ArtMethod>* default_imt_; 525 526 // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by 527 // the shutdown lock so that threads aren't born while we're shutting down. 528 size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_); 529 530 // Waited upon until no threads are being born. 531 UniquePtr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_); 532 533 // Set when runtime shutdown is past the point that new threads may attach. 534 bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_); 535 536 // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_. 537 bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_); 538 539 bool started_; 540 541 // New flag added which tells us if the runtime has finished starting. If 542 // this flag is set then the Daemon threads are created and the class loader 543 // is created. This flag is needed for knowing if its safe to request CMS. 544 bool finished_starting_; 545 546 // Hooks supported by JNI_CreateJavaVM 547 jint (*vfprintf_)(FILE* stream, const char* format, va_list ap); 548 void (*exit_)(jint status); 549 void (*abort_)(); 550 551 bool stats_enabled_; 552 RuntimeStats stats_; 553 554 bool method_trace_; 555 std::string method_trace_file_; 556 size_t method_trace_file_size_; 557 instrumentation::Instrumentation instrumentation_; 558 559 typedef SafeMap<jobject, std::vector<const DexFile*>, JobjectComparator> CompileTimeClassPaths; 560 CompileTimeClassPaths compile_time_class_paths_; 561 bool use_compile_time_class_path_; 562 563 jobject main_thread_group_; 564 jobject system_thread_group_; 565 566 // As returned by ClassLoader.getSystemClassLoader(). 567 jobject system_class_loader_; 568 569 int quick_alloc_entry_points_instrumentation_counter_; 570 571 DISALLOW_COPY_AND_ASSIGN(Runtime); 572}; 573 574} // namespace art 575 576#endif // ART_RUNTIME_RUNTIME_H_ 577