runtime.h revision cade5c3c75588da6d873df727acdaf3378a66efa
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
19
20#include <jni.h>
21#include <stdio.h>
22
23#include <iosfwd>
24#include <set>
25#include <string>
26#include <utility>
27#include <memory>
28#include <vector>
29
30#include "arch/instruction_set.h"
31#include "base/macros.h"
32#include "base/mutex.h"
33#include "deoptimization_kind.h"
34#include "dex_file_types.h"
35#include "experimental_flags.h"
36#include "gc_root.h"
37#include "instrumentation.h"
38#include "obj_ptr.h"
39#include "offsets.h"
40#include "process_state.h"
41#include "quick/quick_method_frame_info.h"
42#include "runtime_stats.h"
43
44namespace art {
45
46namespace gc {
47  class AbstractSystemWeakHolder;
48  class Heap;
49}  // namespace gc
50
51namespace jit {
52  class Jit;
53  class JitOptions;
54}  // namespace jit
55
56namespace mirror {
57  class Array;
58  class ClassLoader;
59  class DexCache;
60  template<class T> class ObjectArray;
61  template<class T> class PrimitiveArray;
62  typedef PrimitiveArray<int8_t> ByteArray;
63  class String;
64  class Throwable;
65}  // namespace mirror
66namespace ti {
67  class Agent;
68}  // namespace ti
69namespace verifier {
70  class MethodVerifier;
71  enum class VerifyMode : int8_t;
72}  // namespace verifier
73class ArenaPool;
74class ArtMethod;
75enum class CalleeSaveType: uint32_t;
76class ClassHierarchyAnalysis;
77class ClassLinker;
78class CompilerCallbacks;
79class DexFile;
80class InternTable;
81class IsMarkedVisitor;
82class JavaVMExt;
83class LinearAlloc;
84class MemMap;
85class MonitorList;
86class MonitorPool;
87class NullPointerHandler;
88class OatFileManager;
89class Plugin;
90struct RuntimeArgumentMap;
91class RuntimeCallbacks;
92class SignalCatcher;
93class StackOverflowHandler;
94class SuspensionHandler;
95class ThreadList;
96class Trace;
97struct TraceConfig;
98class Transaction;
99
100typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
101
102class Runtime {
103 public:
104  // Parse raw runtime options.
105  static bool ParseOptions(const RuntimeOptions& raw_options,
106                           bool ignore_unrecognized,
107                           RuntimeArgumentMap* runtime_options);
108
109  // Creates and initializes a new runtime.
110  static bool Create(RuntimeArgumentMap&& runtime_options)
111      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
112
113  // Creates and initializes a new runtime.
114  static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
115      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
116
117  // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
118  bool IsAotCompiler() const {
119    return !UseJitCompilation() && IsCompiler();
120  }
121
122  // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
123  bool IsCompiler() const {
124    return compiler_callbacks_ != nullptr;
125  }
126
127  // If a compiler, are we compiling a boot image?
128  bool IsCompilingBootImage() const;
129
130  bool CanRelocate() const;
131
132  bool ShouldRelocate() const {
133    return must_relocate_ && CanRelocate();
134  }
135
136  bool MustRelocateIfPossible() const {
137    return must_relocate_;
138  }
139
140  bool IsDex2OatEnabled() const {
141    return dex2oat_enabled_ && IsImageDex2OatEnabled();
142  }
143
144  bool IsImageDex2OatEnabled() const {
145    return image_dex2oat_enabled_;
146  }
147
148  CompilerCallbacks* GetCompilerCallbacks() {
149    return compiler_callbacks_;
150  }
151
152  void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
153    CHECK(callbacks != nullptr);
154    compiler_callbacks_ = callbacks;
155  }
156
157  bool IsZygote() const {
158    return is_zygote_;
159  }
160
161  bool IsExplicitGcDisabled() const {
162    return is_explicit_gc_disabled_;
163  }
164
165  std::string GetCompilerExecutable() const;
166  std::string GetPatchoatExecutable() const;
167
168  const std::vector<std::string>& GetCompilerOptions() const {
169    return compiler_options_;
170  }
171
172  void AddCompilerOption(const std::string& option) {
173    compiler_options_.push_back(option);
174  }
175
176  const std::vector<std::string>& GetImageCompilerOptions() const {
177    return image_compiler_options_;
178  }
179
180  const std::string& GetImageLocation() const {
181    return image_location_;
182  }
183
184  // Starts a runtime, which may cause threads to be started and code to run.
185  bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
186
187  bool IsShuttingDown(Thread* self);
188  bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
189    return shutting_down_;
190  }
191
192  size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
193    return threads_being_born_;
194  }
195
196  void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
197    threads_being_born_++;
198  }
199
200  void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
201
202  bool IsStarted() const {
203    return started_;
204  }
205
206  bool IsFinishedStarting() const {
207    return finished_starting_;
208  }
209
210  static Runtime* Current() {
211    return instance_;
212  }
213
214  // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
215  // callers should prefer.
216  NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
217
218  // Returns the "main" ThreadGroup, used when attaching user threads.
219  jobject GetMainThreadGroup() const;
220
221  // Returns the "system" ThreadGroup, used when attaching our internal threads.
222  jobject GetSystemThreadGroup() const;
223
224  // Returns the system ClassLoader which represents the CLASSPATH.
225  jobject GetSystemClassLoader() const;
226
227  // Attaches the calling native thread to the runtime.
228  bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
229                           bool create_peer);
230
231  void CallExitHook(jint status);
232
233  // Detaches the current native thread from the runtime.
234  void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
235
236  void DumpDeoptimizations(std::ostream& os);
237  void DumpForSigQuit(std::ostream& os);
238  void DumpLockHolders(std::ostream& os);
239
240  ~Runtime();
241
242  const std::string& GetBootClassPathString() const {
243    return boot_class_path_string_;
244  }
245
246  const std::string& GetClassPathString() const {
247    return class_path_string_;
248  }
249
250  ClassLinker* GetClassLinker() const {
251    return class_linker_;
252  }
253
254  size_t GetDefaultStackSize() const {
255    return default_stack_size_;
256  }
257
258  gc::Heap* GetHeap() const {
259    return heap_;
260  }
261
262  InternTable* GetInternTable() const {
263    DCHECK(intern_table_ != nullptr);
264    return intern_table_;
265  }
266
267  JavaVMExt* GetJavaVM() const {
268    return java_vm_.get();
269  }
270
271  size_t GetMaxSpinsBeforeThinLockInflation() const {
272    return max_spins_before_thin_lock_inflation_;
273  }
274
275  MonitorList* GetMonitorList() const {
276    return monitor_list_;
277  }
278
279  MonitorPool* GetMonitorPool() const {
280    return monitor_pool_;
281  }
282
283  // Is the given object the special object used to mark a cleared JNI weak global?
284  bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
285
286  // Get the special object used to mark a cleared JNI weak global.
287  mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
288
289  mirror::Throwable* GetPreAllocatedOutOfMemoryError() REQUIRES_SHARED(Locks::mutator_lock_);
290
291  mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
292      REQUIRES_SHARED(Locks::mutator_lock_);
293
294  const std::vector<std::string>& GetProperties() const {
295    return properties_;
296  }
297
298  ThreadList* GetThreadList() const {
299    return thread_list_;
300  }
301
302  static const char* GetVersion() {
303    return "2.1.0";
304  }
305
306  bool IsMethodHandlesEnabled() const {
307    return true;
308  }
309
310  void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
311  void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
312  // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
313  // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
314  // access is reenabled.
315  void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
316
317  // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
318  // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
319  void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
320      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
321      REQUIRES_SHARED(Locks::mutator_lock_);
322
323  // Visit image roots, only used for hprof since the GC uses the image space mod union table
324  // instead.
325  void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
326
327  // Visit all of the roots we can do safely do concurrently.
328  void VisitConcurrentRoots(RootVisitor* visitor,
329                            VisitRootFlags flags = kVisitRootFlagAllRoots)
330      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
331      REQUIRES_SHARED(Locks::mutator_lock_);
332
333  // Visit all of the non thread roots, we can do this with mutators unpaused.
334  void VisitNonThreadRoots(RootVisitor* visitor)
335      REQUIRES_SHARED(Locks::mutator_lock_);
336
337  void VisitTransactionRoots(RootVisitor* visitor)
338      REQUIRES_SHARED(Locks::mutator_lock_);
339
340  // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
341  // system weak is updated to be the visitor's returned value.
342  void SweepSystemWeaks(IsMarkedVisitor* visitor)
343      REQUIRES_SHARED(Locks::mutator_lock_);
344
345  // Returns a special method that calls into a trampoline for runtime method resolution
346  ArtMethod* GetResolutionMethod();
347
348  bool HasResolutionMethod() const {
349    return resolution_method_ != nullptr;
350  }
351
352  void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
353  void ClearResolutionMethod() {
354    resolution_method_ = nullptr;
355  }
356
357  ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
358
359  // Returns a special method that calls into a trampoline for runtime imt conflicts.
360  ArtMethod* GetImtConflictMethod();
361  ArtMethod* GetImtUnimplementedMethod();
362
363  bool HasImtConflictMethod() const {
364    return imt_conflict_method_ != nullptr;
365  }
366
367  void ClearImtConflictMethod() {
368    imt_conflict_method_ = nullptr;
369  }
370
371  void FixupConflictTables();
372  void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
373  void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
374
375  ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
376      REQUIRES_SHARED(Locks::mutator_lock_);
377
378  void ClearImtUnimplementedMethod() {
379    imt_unimplemented_method_ = nullptr;
380  }
381
382  bool HasCalleeSaveMethod(CalleeSaveType type) const {
383    return callee_save_methods_[static_cast<size_t>(type)] != 0u;
384  }
385
386  ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
387      REQUIRES_SHARED(Locks::mutator_lock_);
388
389  ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
390      REQUIRES_SHARED(Locks::mutator_lock_);
391
392  QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
393    return callee_save_method_frame_infos_[static_cast<size_t>(type)];
394  }
395
396  QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
397      REQUIRES_SHARED(Locks::mutator_lock_);
398
399  static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
400    return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
401  }
402
403  InstructionSet GetInstructionSet() const {
404    return instruction_set_;
405  }
406
407  void SetInstructionSet(InstructionSet instruction_set);
408  void ClearInstructionSet();
409
410  void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
411  void ClearCalleeSaveMethods();
412
413  ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
414
415  int32_t GetStat(int kind);
416
417  RuntimeStats* GetStats() {
418    return &stats_;
419  }
420
421  bool HasStatsEnabled() const {
422    return stats_enabled_;
423  }
424
425  void ResetStats(int kinds);
426
427  void SetStatsEnabled(bool new_state)
428      REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
429
430  enum class NativeBridgeAction {  // private
431    kUnload,
432    kInitialize
433  };
434
435  jit::Jit* GetJit() const {
436    return jit_.get();
437  }
438
439  // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
440  bool UseJitCompilation() const;
441
442  void PreZygoteFork();
443  void InitNonZygoteOrPostFork(
444      JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa);
445
446  const instrumentation::Instrumentation* GetInstrumentation() const {
447    return &instrumentation_;
448  }
449
450  instrumentation::Instrumentation* GetInstrumentation() {
451    return &instrumentation_;
452  }
453
454  void RegisterAppInfo(const std::vector<std::string>& code_paths,
455                       const std::string& profile_output_filename);
456
457  // Transaction support.
458  bool IsActiveTransaction() const {
459    return preinitialization_transaction_ != nullptr;
460  }
461  void EnterTransactionMode();
462  void EnterTransactionMode(mirror::Class* root);
463  void ExitTransactionMode();
464  // Transaction rollback and exit transaction are always done together, it's convenience to
465  // do them in one function.
466  void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_);
467  bool IsTransactionAborted() const;
468
469  void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
470      REQUIRES_SHARED(Locks::mutator_lock_);
471  void ThrowTransactionAbortError(Thread* self)
472      REQUIRES_SHARED(Locks::mutator_lock_);
473
474  void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
475                               bool is_volatile) const;
476  void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
477                            bool is_volatile) const;
478  void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
479                            bool is_volatile) const;
480  void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
481                          bool is_volatile) const;
482  void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
483                          bool is_volatile) const;
484  void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
485                          bool is_volatile) const;
486  void RecordWriteFieldReference(mirror::Object* obj,
487                                 MemberOffset field_offset,
488                                 ObjPtr<mirror::Object> value,
489                                 bool is_volatile) const
490      REQUIRES_SHARED(Locks::mutator_lock_);
491  void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
492      REQUIRES_SHARED(Locks::mutator_lock_);
493  void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
494      REQUIRES(Locks::intern_table_lock_);
495  void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
496      REQUIRES(Locks::intern_table_lock_);
497  void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
498      REQUIRES(Locks::intern_table_lock_);
499  void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
500      REQUIRES(Locks::intern_table_lock_);
501  void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
502      REQUIRES_SHARED(Locks::mutator_lock_);
503
504  void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
505  // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
506  // with the unexpected_signal_lock_.
507  const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
508    return fault_message_;
509  }
510
511  void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
512
513  bool ExplicitStackOverflowChecks() const {
514    return !implicit_so_checks_;
515  }
516
517  bool IsVerificationEnabled() const;
518  bool IsVerificationSoftFail() const;
519
520  bool IsDexFileFallbackEnabled() const {
521    return allow_dex_file_fallback_;
522  }
523
524  const std::vector<std::string>& GetCpuAbilist() const {
525    return cpu_abilist_;
526  }
527
528  bool IsRunningOnMemoryTool() const {
529    return is_running_on_memory_tool_;
530  }
531
532  void SetTargetSdkVersion(int32_t version) {
533    target_sdk_version_ = version;
534  }
535
536  int32_t GetTargetSdkVersion() const {
537    return target_sdk_version_;
538  }
539
540  uint32_t GetZygoteMaxFailedBoots() const {
541    return zygote_max_failed_boots_;
542  }
543
544  bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
545    return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
546  }
547
548  // Create the JIT and instrumentation and code cache.
549  void CreateJit();
550
551  ArenaPool* GetArenaPool() {
552    return arena_pool_.get();
553  }
554  ArenaPool* GetJitArenaPool() {
555    return jit_arena_pool_.get();
556  }
557  const ArenaPool* GetArenaPool() const {
558    return arena_pool_.get();
559  }
560
561  void ReclaimArenaPoolMemory();
562
563  LinearAlloc* GetLinearAlloc() {
564    return linear_alloc_.get();
565  }
566
567  jit::JitOptions* GetJITOptions() {
568    return jit_options_.get();
569  }
570
571  bool IsJavaDebuggable() const {
572    return is_java_debuggable_;
573  }
574
575  void SetJavaDebuggable(bool value);
576
577  // Deoptimize the boot image, called for Java debuggable apps.
578  void DeoptimizeBootImage();
579
580  bool IsNativeDebuggable() const {
581    return is_native_debuggable_;
582  }
583
584  void SetNativeDebuggable(bool value) {
585    is_native_debuggable_ = value;
586  }
587
588  // Returns the build fingerprint, if set. Otherwise an empty string is returned.
589  std::string GetFingerprint() {
590    return fingerprint_;
591  }
592
593  // Called from class linker.
594  void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
595
596  // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
597  LinearAlloc* CreateLinearAlloc();
598
599  OatFileManager& GetOatFileManager() const {
600    DCHECK(oat_file_manager_ != nullptr);
601    return *oat_file_manager_;
602  }
603
604  double GetHashTableMinLoadFactor() const;
605  double GetHashTableMaxLoadFactor() const;
606
607  void SetSafeMode(bool mode) {
608    safe_mode_ = mode;
609  }
610
611  bool GetDumpNativeStackOnSigQuit() const {
612    return dump_native_stack_on_sig_quit_;
613  }
614
615  bool GetPrunedDalvikCache() const {
616    return pruned_dalvik_cache_;
617  }
618
619  void SetPrunedDalvikCache(bool pruned) {
620    pruned_dalvik_cache_ = pruned;
621  }
622
623  void UpdateProcessState(ProcessState process_state);
624
625  // Returns true if we currently care about long mutator pause.
626  bool InJankPerceptibleProcessState() const {
627    return process_state_ == kProcessStateJankPerceptible;
628  }
629
630  void RegisterSensitiveThread() const;
631
632  void SetZygoteNoThreadSection(bool val) {
633    zygote_no_threads_ = val;
634  }
635
636  bool IsZygoteNoThreadSection() const {
637    return zygote_no_threads_;
638  }
639
640  // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
641  // optimization that makes it impossible to deoptimize.
642  bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
643
644  // Returns a saved copy of the environment (getenv/setenv values).
645  // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
646  char** GetEnvSnapshot() const {
647    return env_snapshot_.GetSnapshot();
648  }
649
650  void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
651  void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
652
653  ClassHierarchyAnalysis* GetClassHierarchyAnalysis() {
654    return cha_;
655  }
656
657  void AttachAgent(const std::string& agent_arg);
658
659  const std::list<ti::Agent>& GetAgents() const {
660    return agents_;
661  }
662
663  RuntimeCallbacks* GetRuntimeCallbacks();
664
665  void InitThreadGroups(Thread* self);
666
667  void SetDumpGCPerformanceOnShutdown(bool value) {
668    dump_gc_performance_on_shutdown_ = value;
669  }
670
671  void IncrementDeoptimizationCount(DeoptimizationKind kind) {
672    DCHECK_LE(kind, DeoptimizationKind::kLast);
673    deoptimization_counts_[static_cast<size_t>(kind)]++;
674  }
675
676  uint32_t GetNumberOfDeoptimizations() const {
677    uint32_t result = 0;
678    for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
679      result += deoptimization_counts_[i];
680    }
681    return result;
682  }
683
684 private:
685  static void InitPlatformSignalHandlers();
686
687  Runtime();
688
689  void BlockSignals();
690
691  bool Init(RuntimeArgumentMap&& runtime_options)
692      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
693  void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
694  void RegisterRuntimeNativeMethods(JNIEnv* env);
695
696  void StartDaemonThreads();
697  void StartSignalCatcher();
698
699  void MaybeSaveJitProfilingInfo();
700
701  // Visit all of the thread roots.
702  void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
703      REQUIRES_SHARED(Locks::mutator_lock_);
704
705  // Visit all other roots which must be done with mutators suspended.
706  void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
707      REQUIRES_SHARED(Locks::mutator_lock_);
708
709  // Constant roots are the roots which never change after the runtime is initialized, they only
710  // need to be visited once per GC cycle.
711  void VisitConstantRoots(RootVisitor* visitor)
712      REQUIRES_SHARED(Locks::mutator_lock_);
713
714  // A pointer to the active runtime or null.
715  static Runtime* instance_;
716
717  // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
718  static constexpr int kProfileForground = 0;
719  static constexpr int kProfileBackground = 1;
720
721  static constexpr uint32_t kCalleeSaveSize = 4u;
722
723  // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
724  uint64_t callee_save_methods_[kCalleeSaveSize];
725  GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
726  GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
727  ArtMethod* resolution_method_;
728  ArtMethod* imt_conflict_method_;
729  // Unresolved method has the same behavior as the conflict method, it is used by the class linker
730  // for differentiating between unfilled imt slots vs conflict slots in superclasses.
731  ArtMethod* imt_unimplemented_method_;
732
733  // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
734  // JDWP (invalid references).
735  GcRoot<mirror::Object> sentinel_;
736
737  InstructionSet instruction_set_;
738  QuickMethodFrameInfo callee_save_method_frame_infos_[kCalleeSaveSize];
739
740  CompilerCallbacks* compiler_callbacks_;
741  bool is_zygote_;
742  bool must_relocate_;
743  bool is_concurrent_gc_enabled_;
744  bool is_explicit_gc_disabled_;
745  bool dex2oat_enabled_;
746  bool image_dex2oat_enabled_;
747
748  std::string compiler_executable_;
749  std::string patchoat_executable_;
750  std::vector<std::string> compiler_options_;
751  std::vector<std::string> image_compiler_options_;
752  std::string image_location_;
753
754  std::string boot_class_path_string_;
755  std::string class_path_string_;
756  std::vector<std::string> properties_;
757
758  std::list<ti::Agent> agents_;
759  std::vector<Plugin> plugins_;
760
761  // The default stack size for managed threads created by the runtime.
762  size_t default_stack_size_;
763
764  gc::Heap* heap_;
765
766  std::unique_ptr<ArenaPool> jit_arena_pool_;
767  std::unique_ptr<ArenaPool> arena_pool_;
768  // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
769  // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
770  // since the field arrays are int arrays in this case.
771  std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
772
773  // Shared linear alloc for now.
774  std::unique_ptr<LinearAlloc> linear_alloc_;
775
776  // The number of spins that are done before thread suspension is used to forcibly inflate.
777  size_t max_spins_before_thin_lock_inflation_;
778  MonitorList* monitor_list_;
779  MonitorPool* monitor_pool_;
780
781  ThreadList* thread_list_;
782
783  InternTable* intern_table_;
784
785  ClassLinker* class_linker_;
786
787  SignalCatcher* signal_catcher_;
788
789  // If true, the runtime will connect to tombstoned via a socket to
790  // request an open file descriptor to write its traces to.
791  bool use_tombstoned_traces_;
792
793  // Location to which traces must be written on SIGQUIT. Only used if
794  // tombstoned_traces_ == false.
795  std::string stack_trace_file_;
796
797  std::unique_ptr<JavaVMExt> java_vm_;
798
799  std::unique_ptr<jit::Jit> jit_;
800  std::unique_ptr<jit::JitOptions> jit_options_;
801
802  // Fault message, printed when we get a SIGSEGV.
803  Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
804  std::string fault_message_ GUARDED_BY(fault_message_lock_);
805
806  // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
807  // the shutdown lock so that threads aren't born while we're shutting down.
808  size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
809
810  // Waited upon until no threads are being born.
811  std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
812
813  // Set when runtime shutdown is past the point that new threads may attach.
814  bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
815
816  // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
817  bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
818
819  bool started_;
820
821  // New flag added which tells us if the runtime has finished starting. If
822  // this flag is set then the Daemon threads are created and the class loader
823  // is created. This flag is needed for knowing if its safe to request CMS.
824  bool finished_starting_;
825
826  // Hooks supported by JNI_CreateJavaVM
827  jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
828  void (*exit_)(jint status);
829  void (*abort_)();
830
831  bool stats_enabled_;
832  RuntimeStats stats_;
833
834  const bool is_running_on_memory_tool_;
835
836  std::unique_ptr<TraceConfig> trace_config_;
837
838  instrumentation::Instrumentation instrumentation_;
839
840  jobject main_thread_group_;
841  jobject system_thread_group_;
842
843  // As returned by ClassLoader.getSystemClassLoader().
844  jobject system_class_loader_;
845
846  // If true, then we dump the GC cumulative timings on shutdown.
847  bool dump_gc_performance_on_shutdown_;
848
849  // Transaction used for pre-initializing classes at compilation time.
850  std::unique_ptr<Transaction> preinitialization_transaction_;
851
852  // If kNone, verification is disabled. kEnable by default.
853  verifier::VerifyMode verify_;
854
855  // If true, the runtime may use dex files directly with the interpreter if an oat file is not
856  // available/usable.
857  bool allow_dex_file_fallback_;
858
859  // List of supported cpu abis.
860  std::vector<std::string> cpu_abilist_;
861
862  // Specifies target SDK version to allow workarounds for certain API levels.
863  int32_t target_sdk_version_;
864
865  // Implicit checks flags.
866  bool implicit_null_checks_;       // NullPointer checks are implicit.
867  bool implicit_so_checks_;         // StackOverflow checks are implicit.
868  bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
869
870  // Whether or not the sig chain (and implicitly the fault handler) should be
871  // disabled. Tools like dex2oat or patchoat don't need them. This enables
872  // building a statically link version of dex2oat.
873  bool no_sig_chain_;
874
875  // Force the use of native bridge even if the app ISA matches the runtime ISA.
876  bool force_native_bridge_;
877
878  // Whether or not a native bridge has been loaded.
879  //
880  // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
881  // if standard dlopen fails to load native library associated with native activity, it calls to
882  // the native bridge to load it and then gets the trampoline for the entry to native activity.
883  //
884  // The option 'native_bridge_library_filename' specifies the name of the native bridge.
885  // When non-empty the native bridge will be loaded from the given file. An empty value means
886  // that there's no native bridge.
887  bool is_native_bridge_loaded_;
888
889  // Whether we are running under native debugger.
890  bool is_native_debuggable_;
891
892  // Whether Java code needs to be debuggable.
893  bool is_java_debuggable_;
894
895  // The maximum number of failed boots we allow before pruning the dalvik cache
896  // and trying again. This option is only inspected when we're running as a
897  // zygote.
898  uint32_t zygote_max_failed_boots_;
899
900  // Enable experimental opcodes that aren't fully specified yet. The intent is to
901  // eventually publish them as public-usable opcodes, but they aren't ready yet.
902  //
903  // Experimental opcodes should not be used by other production code.
904  ExperimentalFlags experimental_flags_;
905
906  // Contains the build fingerprint, if given as a parameter.
907  std::string fingerprint_;
908
909  // Oat file manager, keeps track of what oat files are open.
910  OatFileManager* oat_file_manager_;
911
912  // Whether or not we are on a low RAM device.
913  bool is_low_memory_mode_;
914
915  // Whether the application should run in safe mode, that is, interpreter only.
916  bool safe_mode_;
917
918  // Whether threads should dump their native stack on SIGQUIT.
919  bool dump_native_stack_on_sig_quit_;
920
921  // Whether the dalvik cache was pruned when initializing the runtime.
922  bool pruned_dalvik_cache_;
923
924  // Whether or not we currently care about pause times.
925  ProcessState process_state_;
926
927  // Whether zygote code is in a section that should not start threads.
928  bool zygote_no_threads_;
929
930  // Saved environment.
931  class EnvSnapshot {
932   public:
933    EnvSnapshot() = default;
934    void TakeSnapshot();
935    char** GetSnapshot() const;
936
937   private:
938    std::unique_ptr<char*[]> c_env_vector_;
939    std::vector<std::unique_ptr<std::string>> name_value_pairs_;
940
941    DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
942  } env_snapshot_;
943
944  // Generic system-weak holders.
945  std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
946
947  ClassHierarchyAnalysis* cha_;
948
949  std::unique_ptr<RuntimeCallbacks> callbacks_;
950
951  std::atomic<uint32_t> deoptimization_counts_[
952      static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
953
954  std::unique_ptr<MemMap> protected_fault_page_;
955
956  DISALLOW_COPY_AND_ASSIGN(Runtime);
957};
958
959}  // namespace art
960
961#endif  // ART_RUNTIME_RUNTIME_H_
962