runtime.h revision 2cd334ae2d4287216523882f0d298cf3901b7ab1
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
19
20#include <jni.h>
21#include <stdio.h>
22
23#include <iosfwd>
24#include <set>
25#include <string>
26#include <utility>
27#include <vector>
28
29#include "arch/instruction_set.h"
30#include "base/allocator.h"
31#include "compiler_callbacks.h"
32#include "gc_root.h"
33#include "instrumentation.h"
34#include "jobject_comparator.h"
35#include "object_callbacks.h"
36#include "offsets.h"
37#include "profiler_options.h"
38#include "quick/quick_method_frame_info.h"
39#include "runtime_stats.h"
40#include "safe_map.h"
41
42namespace art {
43
44namespace gc {
45  class Heap;
46  namespace collector {
47    class GarbageCollector;
48  }  // namespace collector
49}  // namespace gc
50namespace mirror {
51  class ArtMethod;
52  class ClassLoader;
53  class Array;
54  template<class T> class ObjectArray;
55  template<class T> class PrimitiveArray;
56  typedef PrimitiveArray<int8_t> ByteArray;
57  class String;
58  class Throwable;
59}  // namespace mirror
60namespace verifier {
61  class MethodVerifier;
62}  // namespace verifier
63class ClassLinker;
64class Closure;
65class DexFile;
66class InternTable;
67class JavaVMExt;
68class MonitorList;
69class MonitorPool;
70class NullPointerHandler;
71class SignalCatcher;
72class StackOverflowHandler;
73class SuspensionHandler;
74class ThreadList;
75class Trace;
76class Transaction;
77
78typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
79
80// Not all combinations of flags are valid. You may not visit all roots as well as the new roots
81// (no logical reason to do this). You also may not start logging new roots and stop logging new
82// roots (also no logical reason to do this).
83enum VisitRootFlags : uint8_t {
84  kVisitRootFlagAllRoots = 0x1,
85  kVisitRootFlagNewRoots = 0x2,
86  kVisitRootFlagStartLoggingNewRoots = 0x4,
87  kVisitRootFlagStopLoggingNewRoots = 0x8,
88  kVisitRootFlagClearRootLog = 0x10,
89};
90
91class Runtime {
92 public:
93  // Creates and initializes a new runtime.
94  static bool Create(const RuntimeOptions& options, bool ignore_unrecognized)
95      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
96
97  bool IsCompiler() const {
98    return compiler_callbacks_ != nullptr;
99  }
100
101  bool CanRelocate() const {
102    return !IsCompiler() || compiler_callbacks_->IsRelocationPossible();
103  }
104
105  bool ShouldRelocate() const {
106    return must_relocate_ && CanRelocate();
107  }
108
109  bool MustRelocateIfPossible() const {
110    return must_relocate_;
111  }
112
113  bool IsDex2OatEnabled() const {
114    return dex2oat_enabled_ && IsImageDex2OatEnabled();
115  }
116
117  bool IsImageDex2OatEnabled() const {
118    return image_dex2oat_enabled_;
119  }
120
121  CompilerCallbacks* GetCompilerCallbacks() {
122    return compiler_callbacks_;
123  }
124
125  bool IsZygote() const {
126    return is_zygote_;
127  }
128
129  bool IsExplicitGcDisabled() const {
130    return is_explicit_gc_disabled_;
131  }
132
133  std::string GetCompilerExecutable() const;
134  std::string GetPatchoatExecutable() const;
135
136  const std::vector<std::string>& GetCompilerOptions() const {
137    return compiler_options_;
138  }
139
140  void AddCompilerOption(std::string option) {
141    compiler_options_.push_back(option);
142  }
143
144  const std::vector<std::string>& GetImageCompilerOptions() const {
145    return image_compiler_options_;
146  }
147
148  const std::string& GetImageLocation() const {
149    return image_location_;
150  }
151
152  const ProfilerOptions& GetProfilerOptions() const {
153    return profiler_options_;
154  }
155
156  // Starts a runtime, which may cause threads to be started and code to run.
157  bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
158
159  bool IsShuttingDown(Thread* self);
160  bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
161    return shutting_down_;
162  }
163
164  size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
165    return threads_being_born_;
166  }
167
168  void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
169    threads_being_born_++;
170  }
171
172  void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
173
174  bool IsStarted() const {
175    return started_;
176  }
177
178  bool IsFinishedStarting() const {
179    return finished_starting_;
180  }
181
182  static Runtime* Current() {
183    return instance_;
184  }
185
186  // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
187  // callers should prefer.
188  [[noreturn]] static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_);
189
190  // Returns the "main" ThreadGroup, used when attaching user threads.
191  jobject GetMainThreadGroup() const;
192
193  // Returns the "system" ThreadGroup, used when attaching our internal threads.
194  jobject GetSystemThreadGroup() const;
195
196  // Returns the system ClassLoader which represents the CLASSPATH.
197  jobject GetSystemClassLoader() const;
198
199  // Attaches the calling native thread to the runtime.
200  bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
201                           bool create_peer);
202
203  void CallExitHook(jint status);
204
205  // Detaches the current native thread from the runtime.
206  void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_);
207
208  void DumpForSigQuit(std::ostream& os);
209  void DumpLockHolders(std::ostream& os);
210
211  ~Runtime();
212
213  const std::string& GetBootClassPathString() const {
214    return boot_class_path_string_;
215  }
216
217  const std::string& GetClassPathString() const {
218    return class_path_string_;
219  }
220
221  ClassLinker* GetClassLinker() const {
222    return class_linker_;
223  }
224
225  size_t GetDefaultStackSize() const {
226    return default_stack_size_;
227  }
228
229  gc::Heap* GetHeap() const {
230    return heap_;
231  }
232
233  InternTable* GetInternTable() const {
234    DCHECK(intern_table_ != NULL);
235    return intern_table_;
236  }
237
238  JavaVMExt* GetJavaVM() const {
239    return java_vm_;
240  }
241
242  size_t GetMaxSpinsBeforeThinkLockInflation() const {
243    return max_spins_before_thin_lock_inflation_;
244  }
245
246  MonitorList* GetMonitorList() const {
247    return monitor_list_;
248  }
249
250  MonitorPool* GetMonitorPool() const {
251    return monitor_pool_;
252  }
253
254  // Is the given object the special object used to mark a cleared JNI weak global?
255  bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
256
257  // Get the special object used to mark a cleared JNI weak global.
258  mirror::Object* GetClearedJniWeakGlobal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
259
260  mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
261
262  mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
263      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
264
265  const std::vector<std::string>& GetProperties() const {
266    return properties_;
267  }
268
269  ThreadList* GetThreadList() const {
270    return thread_list_;
271  }
272
273  static const char* GetVersion() {
274    return "2.1.0";
275  }
276
277  void DisallowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
278  void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
279  void EnsureNewSystemWeaksDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
280
281  // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
282  // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
283  void VisitRoots(RootCallback* visitor, void* arg, VisitRootFlags flags = kVisitRootFlagAllRoots)
284      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
285
286  // Visit all of the roots we can do safely do concurrently.
287  void VisitConcurrentRoots(RootCallback* visitor, void* arg,
288                            VisitRootFlags flags = kVisitRootFlagAllRoots)
289      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
290
291  // Visit all of the non thread roots, we can do this with mutators unpaused.
292  void VisitNonThreadRoots(RootCallback* visitor, void* arg)
293      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
294
295  void VisitTransactionRoots(RootCallback* visitor, void* arg)
296      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
297
298  // Visit all of the thread roots.
299  void VisitThreadRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
300
301  // Flip thread roots from from-space refs to to-space refs.
302  size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
303                         gc::collector::GarbageCollector* collector)
304      LOCKS_EXCLUDED(Locks::mutator_lock_);
305
306  // Visit all other roots which must be done with mutators suspended.
307  void VisitNonConcurrentRoots(RootCallback* visitor, void* arg)
308      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
309
310  // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
311  // system weak is updated to be the visitor's returned value.
312  void SweepSystemWeaks(IsMarkedCallback* visitor, void* arg)
313      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
314
315  // Constant roots are the roots which never change after the runtime is initialized, they only
316  // need to be visited once per GC cycle.
317  void VisitConstantRoots(RootCallback* callback, void* arg)
318      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
319
320  // Returns a special method that calls into a trampoline for runtime method resolution
321  mirror::ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
322
323  bool HasResolutionMethod() const {
324    return !resolution_method_.IsNull();
325  }
326
327  void SetResolutionMethod(mirror::ArtMethod* method) {
328    resolution_method_ = GcRoot<mirror::ArtMethod>(method);
329  }
330
331  mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
332
333  // Returns a special method that calls into a trampoline for runtime imt conflicts.
334  mirror::ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
335  mirror::ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
336
337  bool HasImtConflictMethod() const {
338    return !imt_conflict_method_.IsNull();
339  }
340
341  void SetImtConflictMethod(mirror::ArtMethod* method) {
342    imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
343  }
344  void SetImtUnimplementedMethod(mirror::ArtMethod* method) {
345    imt_unimplemented_method_ = GcRoot<mirror::ArtMethod>(method);
346  }
347
348  mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
349
350  // Returns an imt with every entry set to conflict, used as default imt for all classes.
351  mirror::ObjectArray<mirror::ArtMethod>* GetDefaultImt()
352      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
353
354  bool HasDefaultImt() const {
355    return !default_imt_.IsNull();
356  }
357
358  void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) {
359    default_imt_ = GcRoot<mirror::ObjectArray<mirror::ArtMethod>>(imt);
360  }
361
362  mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl)
363      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
364
365  // Returns a special method that describes all callee saves being spilled to the stack.
366  enum CalleeSaveType {
367    kSaveAll,
368    kRefsOnly,
369    kRefsAndArgs,
370    kLastCalleeSaveType  // Value used for iteration
371  };
372
373  bool HasCalleeSaveMethod(CalleeSaveType type) const {
374    return !callee_save_methods_[type].IsNull();
375  }
376
377  mirror::ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
378      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
379
380  mirror::ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
381      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
382
383  QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
384    return callee_save_method_frame_infos_[type];
385  }
386
387  QuickMethodFrameInfo GetRuntimeMethodFrameInfo(mirror::ArtMethod* method)
388      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
389
390  static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
391    return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
392  }
393
394  InstructionSet GetInstructionSet() const {
395    return instruction_set_;
396  }
397
398  void SetInstructionSet(InstructionSet instruction_set);
399
400  void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type);
401
402  mirror::ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
403
404  int32_t GetStat(int kind);
405
406  RuntimeStats* GetStats() {
407    return &stats_;
408  }
409
410  bool HasStatsEnabled() const {
411    return stats_enabled_;
412  }
413
414  void ResetStats(int kinds);
415
416  void SetStatsEnabled(bool new_state) LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_,
417                                                      Locks::mutator_lock_);
418
419  enum class NativeBridgeAction {  // private
420    kUnload,
421    kInitialize
422  };
423  void PreZygoteFork();
424  bool InitZygote();
425  void DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const char* isa);
426
427  const instrumentation::Instrumentation* GetInstrumentation() const {
428    return &instrumentation_;
429  }
430
431  instrumentation::Instrumentation* GetInstrumentation() {
432    return &instrumentation_;
433  }
434
435  bool UseCompileTimeClassPath() const {
436    return use_compile_time_class_path_;
437  }
438
439  void AddMethodVerifier(verifier::MethodVerifier* verifier) LOCKS_EXCLUDED(method_verifier_lock_);
440  void RemoveMethodVerifier(verifier::MethodVerifier* verifier)
441      LOCKS_EXCLUDED(method_verifier_lock_);
442
443  const std::vector<const DexFile*>& GetCompileTimeClassPath(jobject class_loader);
444
445  // The caller is responsible for ensuring the class_path DexFiles remain
446  // valid as long as the Runtime object remains valid.
447  void SetCompileTimeClassPath(jobject class_loader, std::vector<const DexFile*>& class_path);
448
449  void StartProfiler(const char* profile_output_filename);
450  void UpdateProfilerState(int state);
451
452  // Transaction support.
453  bool IsActiveTransaction() const {
454    return preinitialization_transaction_ != nullptr;
455  }
456  void EnterTransactionMode(Transaction* transaction);
457  void ExitTransactionMode();
458  void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
459                               bool is_volatile) const;
460  void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
461                            bool is_volatile) const;
462  void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
463                            bool is_volatile) const;
464  void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
465                          bool is_volatile) const;
466  void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
467                          bool is_volatile) const;
468  void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
469                          bool is_volatile) const;
470  void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
471                                 mirror::Object* value, bool is_volatile) const;
472  void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
473      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
474  void RecordStrongStringInsertion(mirror::String* s) const
475      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
476  void RecordWeakStringInsertion(mirror::String* s) const
477      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
478  void RecordStrongStringRemoval(mirror::String* s) const
479      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
480  void RecordWeakStringRemoval(mirror::String* s) const
481      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
482
483  void SetFaultMessage(const std::string& message);
484  // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
485  // with the unexpected_signal_lock_.
486  const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
487    return fault_message_;
488  }
489
490  void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
491
492  bool ExplicitStackOverflowChecks() const {
493    return !implicit_so_checks_;
494  }
495
496  bool IsVerificationEnabled() const {
497    return verify_;
498  }
499
500  bool RunningOnValgrind() const {
501    return running_on_valgrind_;
502  }
503
504  void SetTargetSdkVersion(int32_t version) {
505    target_sdk_version_ = version;
506  }
507
508  int32_t GetTargetSdkVersion() const {
509    return target_sdk_version_;
510  }
511
512 private:
513  static void InitPlatformSignalHandlers();
514
515  Runtime();
516
517  void BlockSignals();
518
519  bool Init(const RuntimeOptions& options, bool ignore_unrecognized)
520      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
521  void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_);
522  void InitThreadGroups(Thread* self);
523  void RegisterRuntimeNativeMethods(JNIEnv* env);
524
525  void StartDaemonThreads();
526  void StartSignalCatcher();
527
528  // A pointer to the active runtime or NULL.
529  static Runtime* instance_;
530
531  // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
532  static constexpr int kProfileForground = 0;
533  static constexpr int kProfileBackgrouud = 1;
534
535  GcRoot<mirror::ArtMethod> callee_save_methods_[kLastCalleeSaveType];
536  GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
537  GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
538  GcRoot<mirror::ArtMethod> resolution_method_;
539  GcRoot<mirror::ArtMethod> imt_conflict_method_;
540  // Unresolved method has the same behavior as the conflict method, it is used by the class linker
541  // for differentiating between unfilled imt slots vs conflict slots in superclasses.
542  GcRoot<mirror::ArtMethod> imt_unimplemented_method_;
543  GcRoot<mirror::ObjectArray<mirror::ArtMethod>> default_imt_;
544
545  // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
546  // JDWP (invalid references).
547  GcRoot<mirror::Object> sentinel_;
548
549  InstructionSet instruction_set_;
550  QuickMethodFrameInfo callee_save_method_frame_infos_[kLastCalleeSaveType];
551
552  CompilerCallbacks* compiler_callbacks_;
553  bool is_zygote_;
554  bool must_relocate_;
555  bool is_concurrent_gc_enabled_;
556  bool is_explicit_gc_disabled_;
557  bool dex2oat_enabled_;
558  bool image_dex2oat_enabled_;
559
560  std::string compiler_executable_;
561  std::string patchoat_executable_;
562  std::vector<std::string> compiler_options_;
563  std::vector<std::string> image_compiler_options_;
564  std::string image_location_;
565
566  std::string boot_class_path_string_;
567  std::string class_path_string_;
568  std::vector<std::string> properties_;
569
570  // The default stack size for managed threads created by the runtime.
571  size_t default_stack_size_;
572
573  gc::Heap* heap_;
574
575  // The number of spins that are done before thread suspension is used to forcibly inflate.
576  size_t max_spins_before_thin_lock_inflation_;
577  MonitorList* monitor_list_;
578  MonitorPool* monitor_pool_;
579
580  ThreadList* thread_list_;
581
582  InternTable* intern_table_;
583
584  ClassLinker* class_linker_;
585
586  SignalCatcher* signal_catcher_;
587  std::string stack_trace_file_;
588
589  JavaVMExt* java_vm_;
590
591  // Fault message, printed when we get a SIGSEGV.
592  Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
593  std::string fault_message_ GUARDED_BY(fault_message_lock_);
594
595  // Method verifier set, used so that we can update their GC roots.
596  Mutex method_verifier_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
597  std::set<verifier::MethodVerifier*> method_verifiers_;
598
599  // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
600  // the shutdown lock so that threads aren't born while we're shutting down.
601  size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
602
603  // Waited upon until no threads are being born.
604  std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
605
606  // Set when runtime shutdown is past the point that new threads may attach.
607  bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
608
609  // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
610  bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
611
612  bool started_;
613
614  // New flag added which tells us if the runtime has finished starting. If
615  // this flag is set then the Daemon threads are created and the class loader
616  // is created. This flag is needed for knowing if its safe to request CMS.
617  bool finished_starting_;
618
619  // Hooks supported by JNI_CreateJavaVM
620  jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
621  void (*exit_)(jint status);
622  void (*abort_)();
623
624  bool stats_enabled_;
625  RuntimeStats stats_;
626
627  const bool running_on_valgrind_;
628
629  std::string profile_output_filename_;
630  ProfilerOptions profiler_options_;
631  bool profiler_started_;
632
633  bool method_trace_;
634  std::string method_trace_file_;
635  size_t method_trace_file_size_;
636  instrumentation::Instrumentation instrumentation_;
637
638  typedef AllocationTrackingSafeMap<jobject, std::vector<const DexFile*>,
639                                    kAllocatorTagCompileTimeClassPath, JobjectComparator>
640      CompileTimeClassPaths;
641  CompileTimeClassPaths compile_time_class_paths_;
642  bool use_compile_time_class_path_;
643
644  jobject main_thread_group_;
645  jobject system_thread_group_;
646
647  // As returned by ClassLoader.getSystemClassLoader().
648  jobject system_class_loader_;
649
650  // If true, then we dump the GC cumulative timings on shutdown.
651  bool dump_gc_performance_on_shutdown_;
652
653  // Transaction used for pre-initializing classes at compilation time.
654  Transaction* preinitialization_transaction_;
655
656  // If false, verification is disabled. True by default.
657  bool verify_;
658
659  // Specifies target SDK version to allow workarounds for certain API levels.
660  int32_t target_sdk_version_;
661
662  // Implicit checks flags.
663  bool implicit_null_checks_;       // NullPointer checks are implicit.
664  bool implicit_so_checks_;         // StackOverflow checks are implicit.
665  bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
666
667  // Whether or not a native bridge has been loaded.
668  //
669  // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
670  // if standard dlopen fails to load native library associated with native activity, it calls to
671  // the native bridge to load it and then gets the trampoline for the entry to native activity.
672  //
673  // The option 'native_bridge_library_filename' specifies the name of the native bridge.
674  // When non-empty the native bridge will be loaded from the given file. An empty value means
675  // that there's no native bridge.
676  bool is_native_bridge_loaded_;
677
678  DISALLOW_COPY_AND_ASSIGN(Runtime);
679};
680std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
681
682}  // namespace art
683
684#endif  // ART_RUNTIME_RUNTIME_H_
685