runtime.h revision 692fafd9778141fa6ef0048c9569abd7ee0253bf
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
19
20#include <jni.h>
21#include <stdio.h>
22
23#include <iosfwd>
24#include <string>
25#include <utility>
26#include <vector>
27
28#include "base/macros.h"
29#include "base/stringpiece.h"
30#include "gc/collector_type.h"
31#include "gc/heap.h"
32#include "globals.h"
33#include "instruction_set.h"
34#include "instrumentation.h"
35#include "jobject_comparator.h"
36#include "locks.h"
37#include "root_visitor.h"
38#include "runtime_stats.h"
39#include "safe_map.h"
40
41namespace art {
42
43namespace gc {
44  class Heap;
45}
46namespace mirror {
47  class ArtMethod;
48  class ClassLoader;
49  template<class T> class ObjectArray;
50  template<class T> class PrimitiveArray;
51  typedef PrimitiveArray<int8_t> ByteArray;
52  class String;
53  class Throwable;
54}  // namespace mirror
55namespace verifier {
56class MethodVerifier;
57}
58class ClassLinker;
59class DexFile;
60class InternTable;
61struct JavaVMExt;
62class MonitorList;
63class SignalCatcher;
64class ThreadList;
65class Trace;
66
67class Runtime {
68 public:
69  typedef std::vector<std::pair<std::string, const void*> > Options;
70
71  enum CompilerFilter {
72    kInterpretOnly,       // Compile nothing.
73    kSpace,               // Maximize space savings.
74    kBalanced,            // Try to get the best performance return on compilation investment.
75    kSpeed,               // Maximize runtime performance.
76    kEverything           // Force compilation (Note: excludes compilaton of class initializers).
77  };
78
79  // Guide heuristics to determine whether to compile method if profile data not available.
80#if ART_SMALL_MODE
81  static const CompilerFilter kDefaultCompilerFilter = kInterpretOnly;
82#else
83  static const CompilerFilter kDefaultCompilerFilter = kSpeed;
84#endif
85  static const size_t kDefaultHugeMethodThreshold = 10000;
86  static const size_t kDefaultLargeMethodThreshold = 600;
87  static const size_t kDefaultSmallMethodThreshold = 60;
88  static const size_t kDefaultTinyMethodThreshold = 20;
89  static const size_t kDefaultNumDexMethodsThreshold = 900;
90
91  class ParsedOptions {
92   public:
93    // returns null if problem parsing and ignore_unrecognized is false
94    static ParsedOptions* Create(const Options& options, bool ignore_unrecognized);
95
96    const std::vector<const DexFile*>* boot_class_path_;
97    std::string boot_class_path_string_;
98    std::string class_path_string_;
99    std::string host_prefix_;
100    std::string image_;
101    bool check_jni_;
102    std::string jni_trace_;
103    bool is_compiler_;
104    bool is_zygote_;
105    bool interpreter_only_;
106    bool is_explicit_gc_disabled_;
107    bool use_tlab_;
108    size_t long_pause_log_threshold_;
109    size_t long_gc_log_threshold_;
110    bool dump_gc_performance_on_shutdown_;
111    bool ignore_max_footprint_;
112    size_t heap_initial_size_;
113    size_t heap_maximum_size_;
114    size_t heap_growth_limit_;
115    size_t heap_min_free_;
116    size_t heap_max_free_;
117    double heap_target_utilization_;
118    size_t parallel_gc_threads_;
119    size_t conc_gc_threads_;
120    gc::CollectorType collector_type_;
121    size_t stack_size_;
122    size_t max_spins_before_thin_lock_inflation_;
123    bool low_memory_mode_;
124    size_t lock_profiling_threshold_;
125    std::string stack_trace_file_;
126    bool method_trace_;
127    std::string method_trace_file_;
128    size_t method_trace_file_size_;
129    bool (*hook_is_sensitive_thread_)();
130    jint (*hook_vfprintf_)(FILE* stream, const char* format, va_list ap);
131    void (*hook_exit_)(jint status);
132    void (*hook_abort_)();
133    std::vector<std::string> properties_;
134    CompilerFilter compiler_filter_;
135    size_t huge_method_threshold_;
136    size_t large_method_threshold_;
137    size_t small_method_threshold_;
138    size_t tiny_method_threshold_;
139    size_t num_dex_methods_threshold_;
140    bool sea_ir_mode_;
141
142   private:
143    ParsedOptions() {}
144  };
145
146  // Creates and initializes a new runtime.
147  static bool Create(const Options& options, bool ignore_unrecognized)
148      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
149
150  bool IsCompiler() const {
151    return is_compiler_;
152  }
153
154  bool IsZygote() const {
155    return is_zygote_;
156  }
157
158  bool IsExplicitGcDisabled() const {
159    return is_explicit_gc_disabled_;
160  }
161
162#ifdef ART_SEA_IR_MODE
163  bool IsSeaIRMode() const {
164    return sea_ir_mode_;
165  }
166#endif
167
168  void SetSeaIRMode(bool sea_ir_mode) {
169    sea_ir_mode_ = sea_ir_mode;
170  }
171
172  CompilerFilter GetCompilerFilter() const {
173    return compiler_filter_;
174  }
175
176  void SetCompilerFilter(CompilerFilter compiler_filter) {
177    compiler_filter_ = compiler_filter;
178  }
179
180  size_t GetHugeMethodThreshold() const {
181    return huge_method_threshold_;
182  }
183
184  size_t GetLargeMethodThreshold() const {
185    return large_method_threshold_;
186  }
187
188  size_t GetSmallMethodThreshold() const {
189    return small_method_threshold_;
190  }
191
192  size_t GetTinyMethodThreshold() const {
193    return tiny_method_threshold_;
194  }
195
196  size_t GetNumDexMethodsThreshold() const {
197      return num_dex_methods_threshold_;
198  }
199
200  const std::string& GetHostPrefix() const {
201    DCHECK(!IsStarted());
202    return host_prefix_;
203  }
204
205  // Starts a runtime, which may cause threads to be started and code to run.
206  bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
207
208  bool IsShuttingDown(Thread* self);
209  bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
210    return shutting_down_;
211  }
212
213  size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
214    return threads_being_born_;
215  }
216
217  void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
218    threads_being_born_++;
219  }
220
221  void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
222
223  bool IsStarted() const {
224    return started_;
225  }
226
227  bool IsFinishedStarting() const {
228    return finished_starting_;
229  }
230
231  static Runtime* Current() {
232    return instance_;
233  }
234
235  // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
236  // callers should prefer.
237  // This isn't marked ((noreturn)) because then gcc will merge multiple calls
238  // in a single function together. This reduces code size slightly, but means
239  // that the native stack trace we get may point at the wrong call site.
240  static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_);
241
242  // Returns the "main" ThreadGroup, used when attaching user threads.
243  jobject GetMainThreadGroup() const;
244
245  // Returns the "system" ThreadGroup, used when attaching our internal threads.
246  jobject GetSystemThreadGroup() const;
247
248  // Returns the system ClassLoader which represents the CLASSPATH.
249  jobject GetSystemClassLoader() const;
250
251  // Attaches the calling native thread to the runtime.
252  bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
253                           bool create_peer);
254
255  void CallExitHook(jint status);
256
257  // Detaches the current native thread from the runtime.
258  void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_);
259
260  void DumpForSigQuit(std::ostream& os)
261      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
262  void DumpLockHolders(std::ostream& os);
263
264  ~Runtime();
265
266  const std::string& GetBootClassPathString() const {
267    return boot_class_path_string_;
268  }
269
270  const std::string& GetClassPathString() const {
271    return class_path_string_;
272  }
273
274  ClassLinker* GetClassLinker() const {
275    return class_linker_;
276  }
277
278  size_t GetDefaultStackSize() const {
279    return default_stack_size_;
280  }
281
282  gc::Heap* GetHeap() const {
283    return heap_;
284  }
285
286  InternTable* GetInternTable() const {
287    DCHECK(intern_table_ != NULL);
288    return intern_table_;
289  }
290
291  JavaVMExt* GetJavaVM() const {
292    return java_vm_;
293  }
294
295  size_t GetMaxSpinsBeforeThinkLockInflation() const {
296    return max_spins_before_thin_lock_inflation_;
297  }
298
299  MonitorList* GetMonitorList() const {
300    return monitor_list_;
301  }
302
303  mirror::Throwable* GetPreAllocatedOutOfMemoryError() const
304    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
305
306  const std::vector<std::string>& GetProperties() const {
307    return properties_;
308  }
309
310  ThreadList* GetThreadList() const {
311    return thread_list_;
312  }
313
314  const char* GetVersion() const {
315    return "2.0.0";
316  }
317
318  void DisallowNewSystemWeaks() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
319  void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
320
321  // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
322  // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
323  void VisitRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty)
324      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
325
326  // Visit all of the roots we can do safely do concurrently.
327  void VisitConcurrentRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty)
328      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
329
330  // Visit all of the non thread roots, we can do this with mutators unpaused.
331  void VisitNonThreadRoots(RootVisitor* visitor, void* arg)
332      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
333
334  // Visit all other roots which must be done with mutators suspended.
335  void VisitNonConcurrentRoots(RootVisitor* visitor, void* arg)
336      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
337
338  // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
339  // system weak is updated to be the visitor's returned value.
340  void SweepSystemWeaks(RootVisitor* visitor, void* arg)
341      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
342
343  // Returns a special method that calls into a trampoline for runtime method resolution
344  mirror::ArtMethod* GetResolutionMethod() const {
345    CHECK(HasResolutionMethod());
346    return resolution_method_;
347  }
348
349  bool HasResolutionMethod() const {
350    return resolution_method_ != NULL;
351  }
352
353  void SetResolutionMethod(mirror::ArtMethod* method) {
354    resolution_method_ = method;
355  }
356
357  mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
358
359  // Returns a special method that calls into a trampoline for runtime imt conflicts
360  mirror::ArtMethod* GetImtConflictMethod() const {
361    CHECK(HasImtConflictMethod());
362    return imt_conflict_method_;
363  }
364
365  bool HasImtConflictMethod() const {
366    return imt_conflict_method_ != NULL;
367  }
368
369  void SetImtConflictMethod(mirror::ArtMethod* method) {
370    imt_conflict_method_ = method;
371  }
372
373  mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
374
375  // Returns an imt with every entry set to conflict, used as default imt for all classes.
376  mirror::ObjectArray<mirror::ArtMethod>* GetDefaultImt() const {
377    CHECK(HasDefaultImt());
378    return default_imt_;
379  }
380
381  bool HasDefaultImt() const {
382    return default_imt_ != NULL;
383  }
384
385  void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) {
386    default_imt_ = imt;
387  }
388
389  mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl)
390      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
391
392  // Returns a special method that describes all callee saves being spilled to the stack.
393  enum CalleeSaveType {
394    kSaveAll,
395    kRefsOnly,
396    kRefsAndArgs,
397    kLastCalleeSaveType  // Value used for iteration
398  };
399
400  bool HasCalleeSaveMethod(CalleeSaveType type) const {
401    return callee_save_methods_[type] != NULL;
402  }
403
404  mirror::ArtMethod* GetCalleeSaveMethod(CalleeSaveType type) const {
405    DCHECK(HasCalleeSaveMethod(type));
406    return callee_save_methods_[type];
407  }
408
409  void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type);
410
411  mirror::ArtMethod* CreateCalleeSaveMethod(InstructionSet instruction_set,
412                                                 CalleeSaveType type)
413      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
414
415  mirror::ArtMethod* CreateRefOnlyCalleeSaveMethod(InstructionSet instruction_set)
416      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
417
418  mirror::ArtMethod* CreateRefAndArgsCalleeSaveMethod(InstructionSet instruction_set)
419      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
420
421  int32_t GetStat(int kind);
422
423  RuntimeStats* GetStats() {
424    return &stats_;
425  }
426
427  bool HasStatsEnabled() const {
428    return stats_enabled_;
429  }
430
431  void ResetStats(int kinds);
432
433  void SetStatsEnabled(bool new_state);
434
435  bool PreZygoteFork();
436  bool InitZygote();
437  void DidForkFromZygote();
438
439  instrumentation::Instrumentation* GetInstrumentation() {
440    return &instrumentation_;
441  }
442
443  bool UseCompileTimeClassPath() const {
444    return use_compile_time_class_path_;
445  }
446
447  void AddMethodVerifier(verifier::MethodVerifier* verifier);
448  void RemoveMethodVerifier(verifier::MethodVerifier* verifier);
449
450  const std::vector<const DexFile*>& GetCompileTimeClassPath(jobject class_loader);
451  void SetCompileTimeClassPath(jobject class_loader, std::vector<const DexFile*>& class_path);
452
453 private:
454  static void InitPlatformSignalHandlers();
455
456  Runtime();
457
458  void BlockSignals();
459
460  bool Init(const Options& options, bool ignore_unrecognized)
461      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
462  void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_);
463  void InitThreadGroups(Thread* self);
464  void RegisterRuntimeNativeMethods(JNIEnv* env);
465
466  void StartDaemonThreads();
467  void StartSignalCatcher();
468
469  // A pointer to the active runtime or NULL.
470  static Runtime* instance_;
471
472  bool is_compiler_;
473  bool is_zygote_;
474  bool is_concurrent_gc_enabled_;
475  bool is_explicit_gc_disabled_;
476
477  CompilerFilter compiler_filter_;
478  size_t huge_method_threshold_;
479  size_t large_method_threshold_;
480  size_t small_method_threshold_;
481  size_t tiny_method_threshold_;
482  size_t num_dex_methods_threshold_;
483
484  bool sea_ir_mode_;
485
486  // The host prefix is used during cross compilation. It is removed
487  // from the start of host paths such as:
488  //    $ANDROID_PRODUCT_OUT/system/framework/boot.oat
489  // to produce target paths such as
490  //    /system/framework/boot.oat
491  // Similarly it is prepended to target paths to arrive back at a
492  // host past. In both cases this is necessary because image and oat
493  // files embedded expect paths of dependent files (an image points
494  // to an oat file and an oat files to one or more dex files). These
495  // files contain the expected target path.
496  std::string host_prefix_;
497
498  std::string boot_class_path_string_;
499  std::string class_path_string_;
500  std::vector<std::string> properties_;
501
502  // The default stack size for managed threads created by the runtime.
503  size_t default_stack_size_;
504
505  gc::Heap* heap_;
506
507  // The number of spins that are done before thread suspension is used to forcibly inflate.
508  size_t max_spins_before_thin_lock_inflation_;
509  MonitorList* monitor_list_;
510
511  ThreadList* thread_list_;
512
513  InternTable* intern_table_;
514
515  ClassLinker* class_linker_;
516
517  SignalCatcher* signal_catcher_;
518  std::string stack_trace_file_;
519
520  JavaVMExt* java_vm_;
521
522  mirror::Throwable* pre_allocated_OutOfMemoryError_;
523
524  mirror::ArtMethod* callee_save_methods_[kLastCalleeSaveType];
525
526  mirror::ArtMethod* resolution_method_;
527
528  mirror::ArtMethod* imt_conflict_method_;
529
530  mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
531
532  // Method verifier set, used so that we can update their GC roots.
533  Mutex method_verifiers_lock_;
534  std::set<verifier::MethodVerifier*> method_verifiers_;
535
536  // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
537  // the shutdown lock so that threads aren't born while we're shutting down.
538  size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
539
540  // Waited upon until no threads are being born.
541  UniquePtr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
542
543  // Set when runtime shutdown is past the point that new threads may attach.
544  bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
545
546  // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
547  bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
548
549  bool started_;
550
551  // New flag added which tells us if the runtime has finished starting. If
552  // this flag is set then the Daemon threads are created and the class loader
553  // is created. This flag is needed for knowing if its safe to request CMS.
554  bool finished_starting_;
555
556  // Hooks supported by JNI_CreateJavaVM
557  jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
558  void (*exit_)(jint status);
559  void (*abort_)();
560
561  bool stats_enabled_;
562  RuntimeStats stats_;
563
564  bool method_trace_;
565  std::string method_trace_file_;
566  size_t method_trace_file_size_;
567  instrumentation::Instrumentation instrumentation_;
568
569  typedef SafeMap<jobject, std::vector<const DexFile*>, JobjectComparator> CompileTimeClassPaths;
570  CompileTimeClassPaths compile_time_class_paths_;
571  bool use_compile_time_class_path_;
572
573  jobject main_thread_group_;
574  jobject system_thread_group_;
575
576  // As returned by ClassLoader.getSystemClassLoader().
577  jobject system_class_loader_;
578
579  // If true, then we dump the GC cumulative timings on shutdown.
580  bool dump_gc_performance_on_shutdown_;
581
582  DISALLOW_COPY_AND_ASSIGN(Runtime);
583};
584
585}  // namespace art
586
587#endif  // ART_RUNTIME_RUNTIME_H_
588