runtime.h revision e6da9af8dfe0a3e3fbc2be700554f6478380e7b9
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
19
20#include <jni.h>
21#include <stdio.h>
22
23#include <iosfwd>
24#include <string>
25#include <utility>
26#include <vector>
27
28#include "base/macros.h"
29#include "base/stringpiece.h"
30#include "gc/collector_type.h"
31#include "gc/heap.h"
32#include "globals.h"
33#include "instruction_set.h"
34#include "instrumentation.h"
35#include "jobject_comparator.h"
36#include "locks.h"
37#include "root_visitor.h"
38#include "runtime_stats.h"
39#include "safe_map.h"
40
41namespace art {
42
43namespace gc {
44  class Heap;
45}
46namespace mirror {
47  class ArtMethod;
48  class ClassLoader;
49  template<class T> class ObjectArray;
50  template<class T> class PrimitiveArray;
51  typedef PrimitiveArray<int8_t> ByteArray;
52  class String;
53  class Throwable;
54}  // namespace mirror
55namespace verifier {
56class MethodVerifier;
57}
58class ClassLinker;
59class CompilerCallbacks;
60class DexFile;
61class InternTable;
62struct JavaVMExt;
63class MonitorList;
64class SignalCatcher;
65class ThreadList;
66class Trace;
67
68class Runtime {
69 public:
70  typedef std::vector<std::pair<std::string, const void*> > Options;
71
72  enum CompilerFilter {
73    kInterpretOnly,       // Compile nothing.
74    kSpace,               // Maximize space savings.
75    kBalanced,            // Try to get the best performance return on compilation investment.
76    kSpeed,               // Maximize runtime performance.
77    kEverything           // Force compilation (Note: excludes compilaton of class initializers).
78  };
79
80  // Guide heuristics to determine whether to compile method if profile data not available.
81#if ART_SMALL_MODE
82  static const CompilerFilter kDefaultCompilerFilter = kInterpretOnly;
83#else
84  static const CompilerFilter kDefaultCompilerFilter = kSpeed;
85#endif
86  static const size_t kDefaultHugeMethodThreshold = 10000;
87  static const size_t kDefaultLargeMethodThreshold = 600;
88  static const size_t kDefaultSmallMethodThreshold = 60;
89  static const size_t kDefaultTinyMethodThreshold = 20;
90  static const size_t kDefaultNumDexMethodsThreshold = 900;
91
92  class ParsedOptions {
93   public:
94    // returns null if problem parsing and ignore_unrecognized is false
95    static ParsedOptions* Create(const Options& options, bool ignore_unrecognized);
96
97    const std::vector<const DexFile*>* boot_class_path_;
98    std::string boot_class_path_string_;
99    std::string class_path_string_;
100    std::string host_prefix_;
101    std::string image_;
102    bool check_jni_;
103    std::string jni_trace_;
104    CompilerCallbacks* compiler_callbacks_;
105    bool is_zygote_;
106    bool interpreter_only_;
107    bool is_explicit_gc_disabled_;
108    bool use_tlab_;
109    size_t long_pause_log_threshold_;
110    size_t long_gc_log_threshold_;
111    bool dump_gc_performance_on_shutdown_;
112    bool ignore_max_footprint_;
113    size_t heap_initial_size_;
114    size_t heap_maximum_size_;
115    size_t heap_growth_limit_;
116    size_t heap_min_free_;
117    size_t heap_max_free_;
118    double heap_target_utilization_;
119    size_t parallel_gc_threads_;
120    size_t conc_gc_threads_;
121    gc::CollectorType collector_type_;
122    gc::CollectorType background_collector_type_;
123    size_t stack_size_;
124    size_t max_spins_before_thin_lock_inflation_;
125    bool low_memory_mode_;
126    size_t lock_profiling_threshold_;
127    std::string stack_trace_file_;
128    bool method_trace_;
129    std::string method_trace_file_;
130    size_t method_trace_file_size_;
131    bool (*hook_is_sensitive_thread_)();
132    jint (*hook_vfprintf_)(FILE* stream, const char* format, va_list ap);
133    void (*hook_exit_)(jint status);
134    void (*hook_abort_)();
135    std::vector<std::string> properties_;
136    CompilerFilter compiler_filter_;
137    size_t huge_method_threshold_;
138    size_t large_method_threshold_;
139    size_t small_method_threshold_;
140    size_t tiny_method_threshold_;
141    size_t num_dex_methods_threshold_;
142    bool sea_ir_mode_;
143    bool profile_;
144    std::string profile_output_filename_;
145    int profile_period_s_;
146    int profile_duration_s_;
147    int profile_interval_us_;
148    double profile_backoff_coefficient_;
149
150   private:
151    ParsedOptions() {}
152  };
153
154  // Creates and initializes a new runtime.
155  static bool Create(const Options& options, bool ignore_unrecognized)
156      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
157
158  bool IsCompiler() const {
159    return compiler_callbacks_ != nullptr;
160  }
161
162  CompilerCallbacks* GetCompilerCallbacks() {
163    return compiler_callbacks_;
164  }
165
166  bool IsZygote() const {
167    return is_zygote_;
168  }
169
170  bool IsExplicitGcDisabled() const {
171    return is_explicit_gc_disabled_;
172  }
173
174#ifdef ART_SEA_IR_MODE
175  bool IsSeaIRMode() const {
176    return sea_ir_mode_;
177  }
178#endif
179
180  void SetSeaIRMode(bool sea_ir_mode) {
181    sea_ir_mode_ = sea_ir_mode;
182  }
183
184  CompilerFilter GetCompilerFilter() const {
185    return compiler_filter_;
186  }
187
188  void SetCompilerFilter(CompilerFilter compiler_filter) {
189    compiler_filter_ = compiler_filter;
190  }
191
192  size_t GetHugeMethodThreshold() const {
193    return huge_method_threshold_;
194  }
195
196  size_t GetLargeMethodThreshold() const {
197    return large_method_threshold_;
198  }
199
200  size_t GetSmallMethodThreshold() const {
201    return small_method_threshold_;
202  }
203
204  size_t GetTinyMethodThreshold() const {
205    return tiny_method_threshold_;
206  }
207
208  size_t GetNumDexMethodsThreshold() const {
209      return num_dex_methods_threshold_;
210  }
211
212  const std::string& GetHostPrefix() const {
213    DCHECK(!IsStarted());
214    return host_prefix_;
215  }
216
217  // Starts a runtime, which may cause threads to be started and code to run.
218  bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
219
220  bool IsShuttingDown(Thread* self);
221  bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
222    return shutting_down_;
223  }
224
225  size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
226    return threads_being_born_;
227  }
228
229  void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
230    threads_being_born_++;
231  }
232
233  void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
234
235  bool IsStarted() const {
236    return started_;
237  }
238
239  bool IsFinishedStarting() const {
240    return finished_starting_;
241  }
242
243  static Runtime* Current() {
244    return instance_;
245  }
246
247  // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
248  // callers should prefer.
249  // This isn't marked ((noreturn)) because then gcc will merge multiple calls
250  // in a single function together. This reduces code size slightly, but means
251  // that the native stack trace we get may point at the wrong call site.
252  static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_);
253
254  // Returns the "main" ThreadGroup, used when attaching user threads.
255  jobject GetMainThreadGroup() const;
256
257  // Returns the "system" ThreadGroup, used when attaching our internal threads.
258  jobject GetSystemThreadGroup() const;
259
260  // Returns the system ClassLoader which represents the CLASSPATH.
261  jobject GetSystemClassLoader() const;
262
263  // Attaches the calling native thread to the runtime.
264  bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
265                           bool create_peer);
266
267  void CallExitHook(jint status);
268
269  // Detaches the current native thread from the runtime.
270  void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_);
271
272  void DumpForSigQuit(std::ostream& os)
273      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
274  void DumpLockHolders(std::ostream& os);
275
276  ~Runtime();
277
278  const std::string& GetBootClassPathString() const {
279    return boot_class_path_string_;
280  }
281
282  const std::string& GetClassPathString() const {
283    return class_path_string_;
284  }
285
286  ClassLinker* GetClassLinker() const {
287    return class_linker_;
288  }
289
290  size_t GetDefaultStackSize() const {
291    return default_stack_size_;
292  }
293
294  gc::Heap* GetHeap() const {
295    return heap_;
296  }
297
298  InternTable* GetInternTable() const {
299    DCHECK(intern_table_ != NULL);
300    return intern_table_;
301  }
302
303  JavaVMExt* GetJavaVM() const {
304    return java_vm_;
305  }
306
307  size_t GetMaxSpinsBeforeThinkLockInflation() const {
308    return max_spins_before_thin_lock_inflation_;
309  }
310
311  MonitorList* GetMonitorList() const {
312    return monitor_list_;
313  }
314
315  mirror::Throwable* GetPreAllocatedOutOfMemoryError() const
316    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
317
318  const std::vector<std::string>& GetProperties() const {
319    return properties_;
320  }
321
322  ThreadList* GetThreadList() const {
323    return thread_list_;
324  }
325
326  const char* GetVersion() const {
327    return "2.0.0";
328  }
329
330  void DisallowNewSystemWeaks() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
331  void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
332
333  // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
334  // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
335  void VisitRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty)
336      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
337
338  // Visit all of the roots we can do safely do concurrently.
339  void VisitConcurrentRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty)
340      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
341
342  // Visit all of the non thread roots, we can do this with mutators unpaused.
343  void VisitNonThreadRoots(RootVisitor* visitor, void* arg)
344      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
345
346  // Visit all other roots which must be done with mutators suspended.
347  void VisitNonConcurrentRoots(RootVisitor* visitor, void* arg)
348      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
349
350  // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
351  // system weak is updated to be the visitor's returned value.
352  void SweepSystemWeaks(RootVisitor* visitor, void* arg)
353      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
354
355  // Returns a special method that calls into a trampoline for runtime method resolution
356  mirror::ArtMethod* GetResolutionMethod() const {
357    CHECK(HasResolutionMethod());
358    return resolution_method_;
359  }
360
361  bool HasResolutionMethod() const {
362    return resolution_method_ != NULL;
363  }
364
365  void SetResolutionMethod(mirror::ArtMethod* method) {
366    resolution_method_ = method;
367  }
368
369  mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
370
371  // Returns a special method that calls into a trampoline for runtime imt conflicts
372  mirror::ArtMethod* GetImtConflictMethod() const {
373    CHECK(HasImtConflictMethod());
374    return imt_conflict_method_;
375  }
376
377  bool HasImtConflictMethod() const {
378    return imt_conflict_method_ != NULL;
379  }
380
381  void SetImtConflictMethod(mirror::ArtMethod* method) {
382    imt_conflict_method_ = method;
383  }
384
385  mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
386
387  // Returns an imt with every entry set to conflict, used as default imt for all classes.
388  mirror::ObjectArray<mirror::ArtMethod>* GetDefaultImt() const {
389    CHECK(HasDefaultImt());
390    return default_imt_;
391  }
392
393  bool HasDefaultImt() const {
394    return default_imt_ != NULL;
395  }
396
397  void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) {
398    default_imt_ = imt;
399  }
400
401  mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl)
402      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
403
404  // Returns a special method that describes all callee saves being spilled to the stack.
405  enum CalleeSaveType {
406    kSaveAll,
407    kRefsOnly,
408    kRefsAndArgs,
409    kLastCalleeSaveType  // Value used for iteration
410  };
411
412  bool HasCalleeSaveMethod(CalleeSaveType type) const {
413    return callee_save_methods_[type] != NULL;
414  }
415
416  mirror::ArtMethod* GetCalleeSaveMethod(CalleeSaveType type) const {
417    DCHECK(HasCalleeSaveMethod(type));
418    return callee_save_methods_[type];
419  }
420
421  void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type);
422
423  mirror::ArtMethod* CreateCalleeSaveMethod(InstructionSet instruction_set,
424                                                 CalleeSaveType type)
425      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
426
427  mirror::ArtMethod* CreateRefOnlyCalleeSaveMethod(InstructionSet instruction_set)
428      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
429
430  mirror::ArtMethod* CreateRefAndArgsCalleeSaveMethod(InstructionSet instruction_set)
431      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
432
433  int32_t GetStat(int kind);
434
435  RuntimeStats* GetStats() {
436    return &stats_;
437  }
438
439  bool HasStatsEnabled() const {
440    return stats_enabled_;
441  }
442
443  void ResetStats(int kinds);
444
445  void SetStatsEnabled(bool new_state);
446
447  bool PreZygoteFork();
448  bool InitZygote();
449  void DidForkFromZygote();
450
451  instrumentation::Instrumentation* GetInstrumentation() {
452    return &instrumentation_;
453  }
454
455  bool UseCompileTimeClassPath() const {
456    return use_compile_time_class_path_;
457  }
458
459  void AddMethodVerifier(verifier::MethodVerifier* verifier) LOCKS_EXCLUDED(method_verifier_lock_);
460  void RemoveMethodVerifier(verifier::MethodVerifier* verifier)
461      LOCKS_EXCLUDED(method_verifier_lock_);
462
463  const std::vector<const DexFile*>& GetCompileTimeClassPath(jobject class_loader);
464  void SetCompileTimeClassPath(jobject class_loader, std::vector<const DexFile*>& class_path);
465
466  void StartProfiler(const char *appDir, bool startImmediately = false);
467
468 private:
469  static void InitPlatformSignalHandlers();
470
471  Runtime();
472
473  void BlockSignals();
474
475  bool Init(const Options& options, bool ignore_unrecognized)
476      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
477  void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_);
478  void InitThreadGroups(Thread* self);
479  void RegisterRuntimeNativeMethods(JNIEnv* env);
480
481  void StartDaemonThreads();
482  void StartSignalCatcher();
483
484  // A pointer to the active runtime or NULL.
485  static Runtime* instance_;
486
487  CompilerCallbacks* compiler_callbacks_;
488  bool is_zygote_;
489  bool is_concurrent_gc_enabled_;
490  bool is_explicit_gc_disabled_;
491
492  CompilerFilter compiler_filter_;
493  size_t huge_method_threshold_;
494  size_t large_method_threshold_;
495  size_t small_method_threshold_;
496  size_t tiny_method_threshold_;
497  size_t num_dex_methods_threshold_;
498
499  bool sea_ir_mode_;
500
501  // The host prefix is used during cross compilation. It is removed
502  // from the start of host paths such as:
503  //    $ANDROID_PRODUCT_OUT/system/framework/boot.oat
504  // to produce target paths such as
505  //    /system/framework/boot.oat
506  // Similarly it is prepended to target paths to arrive back at a
507  // host past. In both cases this is necessary because image and oat
508  // files embedded expect paths of dependent files (an image points
509  // to an oat file and an oat files to one or more dex files). These
510  // files contain the expected target path.
511  std::string host_prefix_;
512
513  std::string boot_class_path_string_;
514  std::string class_path_string_;
515  std::vector<std::string> properties_;
516
517  // The default stack size for managed threads created by the runtime.
518  size_t default_stack_size_;
519
520  gc::Heap* heap_;
521
522  // The number of spins that are done before thread suspension is used to forcibly inflate.
523  size_t max_spins_before_thin_lock_inflation_;
524  MonitorList* monitor_list_;
525
526  ThreadList* thread_list_;
527
528  InternTable* intern_table_;
529
530  ClassLinker* class_linker_;
531
532  SignalCatcher* signal_catcher_;
533  std::string stack_trace_file_;
534
535  JavaVMExt* java_vm_;
536
537  mirror::Throwable* pre_allocated_OutOfMemoryError_;
538
539  mirror::ArtMethod* callee_save_methods_[kLastCalleeSaveType];
540
541  mirror::ArtMethod* resolution_method_;
542
543  mirror::ArtMethod* imt_conflict_method_;
544
545  mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
546
547  // Method verifier set, used so that we can update their GC roots.
548  Mutex method_verifiers_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
549  std::set<verifier::MethodVerifier*> method_verifiers_;
550
551  // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
552  // the shutdown lock so that threads aren't born while we're shutting down.
553  size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
554
555  // Waited upon until no threads are being born.
556  UniquePtr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
557
558  // Set when runtime shutdown is past the point that new threads may attach.
559  bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
560
561  // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
562  bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
563
564  bool started_;
565
566  // New flag added which tells us if the runtime has finished starting. If
567  // this flag is set then the Daemon threads are created and the class loader
568  // is created. This flag is needed for knowing if its safe to request CMS.
569  bool finished_starting_;
570
571  // Hooks supported by JNI_CreateJavaVM
572  jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
573  void (*exit_)(jint status);
574  void (*abort_)();
575
576  bool stats_enabled_;
577  RuntimeStats stats_;
578
579  // Runtime profile support.
580  bool profile_;
581  std::string profile_output_filename_;
582  uint32_t profile_period_s_;                  // Generate profile every n seconds.
583  uint32_t profile_duration_s_;                // Run profile for n seconds.
584  uint32_t profile_interval_us_;                // Microseconds between samples.
585  double profile_backoff_coefficient_;  // Coefficient to exponential backoff.
586
587  bool method_trace_;
588  std::string method_trace_file_;
589  size_t method_trace_file_size_;
590  instrumentation::Instrumentation instrumentation_;
591
592  typedef SafeMap<jobject, std::vector<const DexFile*>, JobjectComparator> CompileTimeClassPaths;
593  CompileTimeClassPaths compile_time_class_paths_;
594  bool use_compile_time_class_path_;
595
596  jobject main_thread_group_;
597  jobject system_thread_group_;
598
599  // As returned by ClassLoader.getSystemClassLoader().
600  jobject system_class_loader_;
601
602  // If true, then we dump the GC cumulative timings on shutdown.
603  bool dump_gc_performance_on_shutdown_;
604
605  DISALLOW_COPY_AND_ASSIGN(Runtime);
606};
607
608}  // namespace art
609
610#endif  // ART_RUNTIME_RUNTIME_H_
611