runtime.h revision 936b37f3a7f224d990a36b2ec66782a4462180d6
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
19
20#include <jni.h>
21#include <stdio.h>
22
23#include <iosfwd>
24#include <string>
25#include <utility>
26#include <vector>
27
28#include "base/macros.h"
29#include "base/stringpiece.h"
30#include "gc/collector_type.h"
31#include "gc/heap.h"
32#include "globals.h"
33#include "instruction_set.h"
34#include "instrumentation.h"
35#include "jobject_comparator.h"
36#include "locks.h"
37#include "object_callbacks.h"
38#include "runtime_stats.h"
39#include "safe_map.h"
40
41namespace art {
42
43namespace gc {
44  class Heap;
45}
46namespace mirror {
47  class ArtMethod;
48  class ClassLoader;
49  class Array;
50  template<class T> class ObjectArray;
51  template<class T> class PrimitiveArray;
52  typedef PrimitiveArray<int8_t> ByteArray;
53  class String;
54  class Throwable;
55}  // namespace mirror
56namespace verifier {
57class MethodVerifier;
58}
59class ClassLinker;
60class CompilerCallbacks;
61class DexFile;
62class InternTable;
63struct JavaVMExt;
64class MonitorList;
65class MonitorPool;
66class SignalCatcher;
67class ThreadList;
68class Trace;
69class Transaction;
70
71class Runtime {
72 public:
73  typedef std::vector<std::pair<std::string, const void*> > Options;
74
75  enum CompilerFilter {
76    kInterpretOnly,       // Compile nothing.
77    kSpace,               // Maximize space savings.
78    kBalanced,            // Try to get the best performance return on compilation investment.
79    kSpeed,               // Maximize runtime performance.
80    kEverything           // Force compilation (Note: excludes compilaton of class initializers).
81  };
82
83  // Guide heuristics to determine whether to compile method if profile data not available.
84#if ART_SMALL_MODE
85  static const CompilerFilter kDefaultCompilerFilter = kInterpretOnly;
86#else
87  static const CompilerFilter kDefaultCompilerFilter = kSpeed;
88#endif
89  static const size_t kDefaultHugeMethodThreshold = 10000;
90  static const size_t kDefaultLargeMethodThreshold = 600;
91  static const size_t kDefaultSmallMethodThreshold = 60;
92  static const size_t kDefaultTinyMethodThreshold = 20;
93  static const size_t kDefaultNumDexMethodsThreshold = 900;
94
95  class ParsedOptions {
96   public:
97    // returns null if problem parsing and ignore_unrecognized is false
98    static ParsedOptions* Create(const Options& options, bool ignore_unrecognized);
99
100    const std::vector<const DexFile*>* boot_class_path_;
101    std::string boot_class_path_string_;
102    std::string class_path_string_;
103    std::string host_prefix_;
104    std::string image_;
105    bool check_jni_;
106    std::string jni_trace_;
107    CompilerCallbacks* compiler_callbacks_;
108    bool is_zygote_;
109    bool interpreter_only_;
110    bool is_explicit_gc_disabled_;
111    bool use_tlab_;
112    bool verify_pre_gc_heap_;
113    bool verify_post_gc_heap_;
114    bool verify_pre_gc_rosalloc_;
115    bool verify_post_gc_rosalloc_;
116    size_t long_pause_log_threshold_;
117    size_t long_gc_log_threshold_;
118    bool dump_gc_performance_on_shutdown_;
119    bool ignore_max_footprint_;
120    size_t heap_initial_size_;
121    size_t heap_maximum_size_;
122    size_t heap_growth_limit_;
123    size_t heap_min_free_;
124    size_t heap_max_free_;
125    double heap_target_utilization_;
126    size_t parallel_gc_threads_;
127    size_t conc_gc_threads_;
128    gc::CollectorType collector_type_;
129    gc::CollectorType background_collector_type_;
130    size_t stack_size_;
131    size_t max_spins_before_thin_lock_inflation_;
132    bool low_memory_mode_;
133    size_t lock_profiling_threshold_;
134    std::string stack_trace_file_;
135    bool method_trace_;
136    std::string method_trace_file_;
137    size_t method_trace_file_size_;
138    bool (*hook_is_sensitive_thread_)();
139    jint (*hook_vfprintf_)(FILE* stream, const char* format, va_list ap);
140    void (*hook_exit_)(jint status);
141    void (*hook_abort_)();
142    std::vector<std::string> properties_;
143    CompilerFilter compiler_filter_;
144    size_t huge_method_threshold_;
145    size_t large_method_threshold_;
146    size_t small_method_threshold_;
147    size_t tiny_method_threshold_;
148    size_t num_dex_methods_threshold_;
149    bool sea_ir_mode_;
150    bool profile_;
151    std::string profile_output_filename_;
152    int profile_period_s_;
153    int profile_duration_s_;
154    int profile_interval_us_;
155    double profile_backoff_coefficient_;
156
157   private:
158    ParsedOptions() {}
159  };
160
161  // Creates and initializes a new runtime.
162  static bool Create(const Options& options, bool ignore_unrecognized)
163      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
164
165  bool IsCompiler() const {
166    return compiler_callbacks_ != nullptr;
167  }
168
169  CompilerCallbacks* GetCompilerCallbacks() {
170    return compiler_callbacks_;
171  }
172
173  bool IsZygote() const {
174    return is_zygote_;
175  }
176
177  bool IsExplicitGcDisabled() const {
178    return is_explicit_gc_disabled_;
179  }
180
181#ifdef ART_SEA_IR_MODE
182  bool IsSeaIRMode() const {
183    return sea_ir_mode_;
184  }
185#endif
186
187  void SetSeaIRMode(bool sea_ir_mode) {
188    sea_ir_mode_ = sea_ir_mode;
189  }
190
191  CompilerFilter GetCompilerFilter() const {
192    return compiler_filter_;
193  }
194
195  void SetCompilerFilter(CompilerFilter compiler_filter) {
196    compiler_filter_ = compiler_filter;
197  }
198
199  size_t GetHugeMethodThreshold() const {
200    return huge_method_threshold_;
201  }
202
203  size_t GetLargeMethodThreshold() const {
204    return large_method_threshold_;
205  }
206
207  size_t GetSmallMethodThreshold() const {
208    return small_method_threshold_;
209  }
210
211  size_t GetTinyMethodThreshold() const {
212    return tiny_method_threshold_;
213  }
214
215  size_t GetNumDexMethodsThreshold() const {
216      return num_dex_methods_threshold_;
217  }
218
219  const std::string& GetHostPrefix() const {
220    DCHECK(!IsStarted());
221    return host_prefix_;
222  }
223
224  // Starts a runtime, which may cause threads to be started and code to run.
225  bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
226
227  bool IsShuttingDown(Thread* self);
228  bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
229    return shutting_down_;
230  }
231
232  size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
233    return threads_being_born_;
234  }
235
236  void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
237    threads_being_born_++;
238  }
239
240  void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
241
242  bool IsStarted() const {
243    return started_;
244  }
245
246  bool IsFinishedStarting() const {
247    return finished_starting_;
248  }
249
250  static Runtime* Current() {
251    return instance_;
252  }
253
254  // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
255  // callers should prefer.
256  // This isn't marked ((noreturn)) because then gcc will merge multiple calls
257  // in a single function together. This reduces code size slightly, but means
258  // that the native stack trace we get may point at the wrong call site.
259  static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_);
260
261  // Returns the "main" ThreadGroup, used when attaching user threads.
262  jobject GetMainThreadGroup() const;
263
264  // Returns the "system" ThreadGroup, used when attaching our internal threads.
265  jobject GetSystemThreadGroup() const;
266
267  // Returns the system ClassLoader which represents the CLASSPATH.
268  jobject GetSystemClassLoader() const;
269
270  // Attaches the calling native thread to the runtime.
271  bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
272                           bool create_peer);
273
274  void CallExitHook(jint status);
275
276  // Detaches the current native thread from the runtime.
277  void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_);
278
279  void DumpForSigQuit(std::ostream& os)
280      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
281  void DumpLockHolders(std::ostream& os);
282
283  ~Runtime();
284
285  const std::string& GetBootClassPathString() const {
286    return boot_class_path_string_;
287  }
288
289  const std::string& GetClassPathString() const {
290    return class_path_string_;
291  }
292
293  ClassLinker* GetClassLinker() const {
294    return class_linker_;
295  }
296
297  size_t GetDefaultStackSize() const {
298    return default_stack_size_;
299  }
300
301  gc::Heap* GetHeap() const {
302    return heap_;
303  }
304
305  InternTable* GetInternTable() const {
306    DCHECK(intern_table_ != NULL);
307    return intern_table_;
308  }
309
310  JavaVMExt* GetJavaVM() const {
311    return java_vm_;
312  }
313
314  size_t GetMaxSpinsBeforeThinkLockInflation() const {
315    return max_spins_before_thin_lock_inflation_;
316  }
317
318  MonitorList* GetMonitorList() const {
319    return monitor_list_;
320  }
321
322  MonitorPool* GetMonitorPool() const {
323    return monitor_pool_;
324  }
325
326  mirror::Throwable* GetPreAllocatedOutOfMemoryError() const
327    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
328
329  const std::vector<std::string>& GetProperties() const {
330    return properties_;
331  }
332
333  ThreadList* GetThreadList() const {
334    return thread_list_;
335  }
336
337  const char* GetVersion() const {
338    return "2.0.0";
339  }
340
341  void DisallowNewSystemWeaks() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
342  void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
343
344  // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
345  // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
346  void VisitRoots(RootCallback* visitor, void* arg, bool only_dirty, bool clean_dirty)
347      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
348
349  // Visit all of the roots we can do safely do concurrently.
350  void VisitConcurrentRoots(RootCallback* visitor, void* arg, bool only_dirty, bool clean_dirty)
351      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
352
353  // Visit all of the non thread roots, we can do this with mutators unpaused.
354  void VisitNonThreadRoots(RootCallback* visitor, void* arg)
355      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
356
357  // Visit all other roots which must be done with mutators suspended.
358  void VisitNonConcurrentRoots(RootCallback* visitor, void* arg)
359      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
360
361  // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
362  // system weak is updated to be the visitor's returned value.
363  void SweepSystemWeaks(IsMarkedCallback* visitor, void* arg)
364      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
365
366  // Returns a special method that calls into a trampoline for runtime method resolution
367  mirror::ArtMethod* GetResolutionMethod() const {
368    CHECK(HasResolutionMethod());
369    return resolution_method_;
370  }
371
372  bool HasResolutionMethod() const {
373    return resolution_method_ != NULL;
374  }
375
376  void SetResolutionMethod(mirror::ArtMethod* method) {
377    resolution_method_ = method;
378  }
379
380  mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
381
382  // Returns a special method that calls into a trampoline for runtime imt conflicts
383  mirror::ArtMethod* GetImtConflictMethod() const {
384    CHECK(HasImtConflictMethod());
385    return imt_conflict_method_;
386  }
387
388  bool HasImtConflictMethod() const {
389    return imt_conflict_method_ != NULL;
390  }
391
392  void SetImtConflictMethod(mirror::ArtMethod* method) {
393    imt_conflict_method_ = method;
394  }
395
396  mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
397
398  // Returns an imt with every entry set to conflict, used as default imt for all classes.
399  mirror::ObjectArray<mirror::ArtMethod>* GetDefaultImt() const {
400    CHECK(HasDefaultImt());
401    return default_imt_;
402  }
403
404  bool HasDefaultImt() const {
405    return default_imt_ != NULL;
406  }
407
408  void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) {
409    default_imt_ = imt;
410  }
411
412  mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl)
413      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
414
415  // Returns a special method that describes all callee saves being spilled to the stack.
416  enum CalleeSaveType {
417    kSaveAll,
418    kRefsOnly,
419    kRefsAndArgs,
420    kLastCalleeSaveType  // Value used for iteration
421  };
422
423  bool HasCalleeSaveMethod(CalleeSaveType type) const {
424    return callee_save_methods_[type] != NULL;
425  }
426
427  mirror::ArtMethod* GetCalleeSaveMethod(CalleeSaveType type) const {
428    DCHECK(HasCalleeSaveMethod(type));
429    return callee_save_methods_[type];
430  }
431
432  static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
433    return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
434  }
435
436  void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type);
437
438  mirror::ArtMethod* CreateCalleeSaveMethod(InstructionSet instruction_set,
439                                                 CalleeSaveType type)
440      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
441
442  mirror::ArtMethod* CreateRefOnlyCalleeSaveMethod(InstructionSet instruction_set)
443      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
444
445  mirror::ArtMethod* CreateRefAndArgsCalleeSaveMethod(InstructionSet instruction_set)
446      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
447
448  int32_t GetStat(int kind);
449
450  RuntimeStats* GetStats() {
451    return &stats_;
452  }
453
454  bool HasStatsEnabled() const {
455    return stats_enabled_;
456  }
457
458  void ResetStats(int kinds);
459
460  void SetStatsEnabled(bool new_state);
461
462  bool PreZygoteFork();
463  bool InitZygote();
464  void DidForkFromZygote();
465
466  instrumentation::Instrumentation* GetInstrumentation() {
467    return &instrumentation_;
468  }
469
470  bool UseCompileTimeClassPath() const {
471    return use_compile_time_class_path_;
472  }
473
474  void AddMethodVerifier(verifier::MethodVerifier* verifier) LOCKS_EXCLUDED(method_verifier_lock_);
475  void RemoveMethodVerifier(verifier::MethodVerifier* verifier)
476      LOCKS_EXCLUDED(method_verifier_lock_);
477
478  const std::vector<const DexFile*>& GetCompileTimeClassPath(jobject class_loader);
479  void SetCompileTimeClassPath(jobject class_loader, std::vector<const DexFile*>& class_path);
480
481  void StartProfiler(const char *appDir, bool startImmediately = false);
482
483  // Transaction support.
484  bool IsActiveTransaction() const;
485  void EnterTransactionMode(Transaction* transaction);
486  void ExitTransactionMode();
487  void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
488                          bool is_volatile) const;
489  void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
490                          bool is_volatile) const;
491  void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
492                                 mirror::Object* value, bool is_volatile) const;
493  void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
494      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
495  void RecordStrongStringInsertion(mirror::String* s, uint32_t hash_code) const
496      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
497  void RecordWeakStringInsertion(mirror::String* s, uint32_t hash_code) const
498      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
499  void RecordStrongStringRemoval(mirror::String* s, uint32_t hash_code) const
500      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
501  void RecordWeakStringRemoval(mirror::String* s, uint32_t hash_code) const
502      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
503
504 private:
505  static void InitPlatformSignalHandlers();
506
507  Runtime();
508
509  void BlockSignals();
510
511  bool Init(const Options& options, bool ignore_unrecognized)
512      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
513  void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_);
514  void InitThreadGroups(Thread* self);
515  void RegisterRuntimeNativeMethods(JNIEnv* env);
516
517  void StartDaemonThreads();
518  void StartSignalCatcher();
519
520  // A pointer to the active runtime or NULL.
521  static Runtime* instance_;
522
523  CompilerCallbacks* compiler_callbacks_;
524  bool is_zygote_;
525  bool is_concurrent_gc_enabled_;
526  bool is_explicit_gc_disabled_;
527
528  CompilerFilter compiler_filter_;
529  size_t huge_method_threshold_;
530  size_t large_method_threshold_;
531  size_t small_method_threshold_;
532  size_t tiny_method_threshold_;
533  size_t num_dex_methods_threshold_;
534
535  bool sea_ir_mode_;
536
537  // The host prefix is used during cross compilation. It is removed
538  // from the start of host paths such as:
539  //    $ANDROID_PRODUCT_OUT/system/framework/boot.oat
540  // to produce target paths such as
541  //    /system/framework/boot.oat
542  // Similarly it is prepended to target paths to arrive back at a
543  // host past. In both cases this is necessary because image and oat
544  // files embedded expect paths of dependent files (an image points
545  // to an oat file and an oat files to one or more dex files). These
546  // files contain the expected target path.
547  std::string host_prefix_;
548
549  std::string boot_class_path_string_;
550  std::string class_path_string_;
551  std::vector<std::string> properties_;
552
553  // The default stack size for managed threads created by the runtime.
554  size_t default_stack_size_;
555
556  gc::Heap* heap_;
557
558  // The number of spins that are done before thread suspension is used to forcibly inflate.
559  size_t max_spins_before_thin_lock_inflation_;
560  MonitorList* monitor_list_;
561  MonitorPool* monitor_pool_;
562
563  ThreadList* thread_list_;
564
565  InternTable* intern_table_;
566
567  ClassLinker* class_linker_;
568
569  SignalCatcher* signal_catcher_;
570  std::string stack_trace_file_;
571
572  JavaVMExt* java_vm_;
573
574  mirror::Throwable* pre_allocated_OutOfMemoryError_;
575
576  mirror::ArtMethod* callee_save_methods_[kLastCalleeSaveType];
577
578  mirror::ArtMethod* resolution_method_;
579
580  mirror::ArtMethod* imt_conflict_method_;
581
582  mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
583
584  // Method verifier set, used so that we can update their GC roots.
585  Mutex method_verifiers_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
586  std::set<verifier::MethodVerifier*> method_verifiers_;
587
588  // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
589  // the shutdown lock so that threads aren't born while we're shutting down.
590  size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
591
592  // Waited upon until no threads are being born.
593  UniquePtr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
594
595  // Set when runtime shutdown is past the point that new threads may attach.
596  bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
597
598  // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
599  bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
600
601  bool started_;
602
603  // New flag added which tells us if the runtime has finished starting. If
604  // this flag is set then the Daemon threads are created and the class loader
605  // is created. This flag is needed for knowing if its safe to request CMS.
606  bool finished_starting_;
607
608  // Hooks supported by JNI_CreateJavaVM
609  jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
610  void (*exit_)(jint status);
611  void (*abort_)();
612
613  bool stats_enabled_;
614  RuntimeStats stats_;
615
616  // Runtime profile support.
617  bool profile_;
618  std::string profile_output_filename_;
619  uint32_t profile_period_s_;                  // Generate profile every n seconds.
620  uint32_t profile_duration_s_;                // Run profile for n seconds.
621  uint32_t profile_interval_us_;                // Microseconds between samples.
622  double profile_backoff_coefficient_;  // Coefficient to exponential backoff.
623
624  bool method_trace_;
625  std::string method_trace_file_;
626  size_t method_trace_file_size_;
627  instrumentation::Instrumentation instrumentation_;
628
629  typedef SafeMap<jobject, std::vector<const DexFile*>, JobjectComparator> CompileTimeClassPaths;
630  CompileTimeClassPaths compile_time_class_paths_;
631  bool use_compile_time_class_path_;
632
633  jobject main_thread_group_;
634  jobject system_thread_group_;
635
636  // As returned by ClassLoader.getSystemClassLoader().
637  jobject system_class_loader_;
638
639  // If true, then we dump the GC cumulative timings on shutdown.
640  bool dump_gc_performance_on_shutdown_;
641
642  // Transaction used for pre-initializing classes at compilation time.
643  Transaction* preinitialization_transaction;
644
645  DISALLOW_COPY_AND_ASSIGN(Runtime);
646};
647
648}  // namespace art
649
650#endif  // ART_RUNTIME_RUNTIME_H_
651