runtime.h revision 92265222f1e1df56ee6d106493b1bd2be65d5ce9
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
19
20#include <jni.h>
21#include <stdio.h>
22
23#include <iosfwd>
24#include <set>
25#include <string>
26#include <utility>
27#include <memory>
28#include <vector>
29
30#include "arch/instruction_set.h"
31#include "base/macros.h"
32#include "base/mutex.h"
33#include "deoptimization_kind.h"
34#include "dex/dex_file_types.h"
35#include "experimental_flags.h"
36#include "gc_root.h"
37#include "instrumentation.h"
38#include "jdwp_provider.h"
39#include "obj_ptr.h"
40#include "offsets.h"
41#include "process_state.h"
42#include "quick/quick_method_frame_info.h"
43#include "runtime_stats.h"
44
45namespace art {
46
47namespace gc {
48class AbstractSystemWeakHolder;
49class Heap;
50}  // namespace gc
51
52namespace jit {
53class Jit;
54class JitOptions;
55}  // namespace jit
56
57namespace mirror {
58class Array;
59class ClassLoader;
60class DexCache;
61template<class T> class ObjectArray;
62template<class T> class PrimitiveArray;
63typedef PrimitiveArray<int8_t> ByteArray;
64class String;
65class Throwable;
66}  // namespace mirror
67namespace ti {
68class Agent;
69class AgentSpec;
70}  // namespace ti
71namespace verifier {
72class MethodVerifier;
73enum class VerifyMode : int8_t;
74}  // namespace verifier
75class ArenaPool;
76class ArtMethod;
77enum class CalleeSaveType: uint32_t;
78class ClassLinker;
79class CompilerCallbacks;
80class DexFile;
81class InternTable;
82class IsMarkedVisitor;
83class JavaVMExt;
84class LinearAlloc;
85class MemMap;
86class MonitorList;
87class MonitorPool;
88class NullPointerHandler;
89class OatFileManager;
90class Plugin;
91struct RuntimeArgumentMap;
92class RuntimeCallbacks;
93class SignalCatcher;
94class StackOverflowHandler;
95class SuspensionHandler;
96class ThreadList;
97class Trace;
98struct TraceConfig;
99class Transaction;
100
101typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
102
103class Runtime {
104 public:
105  // Parse raw runtime options.
106  static bool ParseOptions(const RuntimeOptions& raw_options,
107                           bool ignore_unrecognized,
108                           RuntimeArgumentMap* runtime_options);
109
110  // Creates and initializes a new runtime.
111  static bool Create(RuntimeArgumentMap&& runtime_options)
112      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
113
114  // Creates and initializes a new runtime.
115  static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
116      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
117
118  // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
119  bool IsAotCompiler() const {
120    return !UseJitCompilation() && IsCompiler();
121  }
122
123  // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
124  bool IsCompiler() const {
125    return compiler_callbacks_ != nullptr;
126  }
127
128  // If a compiler, are we compiling a boot image?
129  bool IsCompilingBootImage() const;
130
131  bool CanRelocate() const;
132
133  bool ShouldRelocate() const {
134    return must_relocate_ && CanRelocate();
135  }
136
137  bool MustRelocateIfPossible() const {
138    return must_relocate_;
139  }
140
141  bool IsDex2OatEnabled() const {
142    return dex2oat_enabled_ && IsImageDex2OatEnabled();
143  }
144
145  bool IsImageDex2OatEnabled() const {
146    return image_dex2oat_enabled_;
147  }
148
149  CompilerCallbacks* GetCompilerCallbacks() {
150    return compiler_callbacks_;
151  }
152
153  void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
154    CHECK(callbacks != nullptr);
155    compiler_callbacks_ = callbacks;
156  }
157
158  bool IsZygote() const {
159    return is_zygote_;
160  }
161
162  bool IsExplicitGcDisabled() const {
163    return is_explicit_gc_disabled_;
164  }
165
166  std::string GetCompilerExecutable() const;
167  std::string GetPatchoatExecutable() const;
168
169  const std::vector<std::string>& GetCompilerOptions() const {
170    return compiler_options_;
171  }
172
173  void AddCompilerOption(const std::string& option) {
174    compiler_options_.push_back(option);
175  }
176
177  const std::vector<std::string>& GetImageCompilerOptions() const {
178    return image_compiler_options_;
179  }
180
181  const std::string& GetImageLocation() const {
182    return image_location_;
183  }
184
185  // Starts a runtime, which may cause threads to be started and code to run.
186  bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
187
188  bool IsShuttingDown(Thread* self);
189  bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
190    return shutting_down_;
191  }
192
193  size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
194    return threads_being_born_;
195  }
196
197  void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
198    threads_being_born_++;
199  }
200
201  void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
202
203  bool IsStarted() const {
204    return started_;
205  }
206
207  bool IsFinishedStarting() const {
208    return finished_starting_;
209  }
210
211  static Runtime* Current() {
212    return instance_;
213  }
214
215  // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
216  // callers should prefer.
217  NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
218
219  // Returns the "main" ThreadGroup, used when attaching user threads.
220  jobject GetMainThreadGroup() const;
221
222  // Returns the "system" ThreadGroup, used when attaching our internal threads.
223  jobject GetSystemThreadGroup() const;
224
225  // Returns the system ClassLoader which represents the CLASSPATH.
226  jobject GetSystemClassLoader() const;
227
228  // Attaches the calling native thread to the runtime.
229  bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
230                           bool create_peer);
231
232  void CallExitHook(jint status);
233
234  // Detaches the current native thread from the runtime.
235  void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
236
237  void DumpDeoptimizations(std::ostream& os);
238  void DumpForSigQuit(std::ostream& os);
239  void DumpLockHolders(std::ostream& os);
240
241  ~Runtime();
242
243  const std::string& GetBootClassPathString() const {
244    return boot_class_path_string_;
245  }
246
247  const std::string& GetClassPathString() const {
248    return class_path_string_;
249  }
250
251  ClassLinker* GetClassLinker() const {
252    return class_linker_;
253  }
254
255  size_t GetDefaultStackSize() const {
256    return default_stack_size_;
257  }
258
259  gc::Heap* GetHeap() const {
260    return heap_;
261  }
262
263  InternTable* GetInternTable() const {
264    DCHECK(intern_table_ != nullptr);
265    return intern_table_;
266  }
267
268  JavaVMExt* GetJavaVM() const {
269    return java_vm_.get();
270  }
271
272  size_t GetMaxSpinsBeforeThinLockInflation() const {
273    return max_spins_before_thin_lock_inflation_;
274  }
275
276  MonitorList* GetMonitorList() const {
277    return monitor_list_;
278  }
279
280  MonitorPool* GetMonitorPool() const {
281    return monitor_pool_;
282  }
283
284  // Is the given object the special object used to mark a cleared JNI weak global?
285  bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
286
287  // Get the special object used to mark a cleared JNI weak global.
288  mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
289
290  mirror::Throwable* GetPreAllocatedOutOfMemoryError() REQUIRES_SHARED(Locks::mutator_lock_);
291
292  mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
293      REQUIRES_SHARED(Locks::mutator_lock_);
294
295  const std::vector<std::string>& GetProperties() const {
296    return properties_;
297  }
298
299  ThreadList* GetThreadList() const {
300    return thread_list_;
301  }
302
303  static const char* GetVersion() {
304    return "2.1.0";
305  }
306
307  bool IsMethodHandlesEnabled() const {
308    return true;
309  }
310
311  void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
312  void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
313  // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
314  // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
315  // access is reenabled.
316  void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
317
318  // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
319  // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
320  void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
321      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
322      REQUIRES_SHARED(Locks::mutator_lock_);
323
324  // Visit image roots, only used for hprof since the GC uses the image space mod union table
325  // instead.
326  void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
327
328  // Visit all of the roots we can do safely do concurrently.
329  void VisitConcurrentRoots(RootVisitor* visitor,
330                            VisitRootFlags flags = kVisitRootFlagAllRoots)
331      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
332      REQUIRES_SHARED(Locks::mutator_lock_);
333
334  // Visit all of the non thread roots, we can do this with mutators unpaused.
335  void VisitNonThreadRoots(RootVisitor* visitor)
336      REQUIRES_SHARED(Locks::mutator_lock_);
337
338  void VisitTransactionRoots(RootVisitor* visitor)
339      REQUIRES_SHARED(Locks::mutator_lock_);
340
341  // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
342  // system weak is updated to be the visitor's returned value.
343  void SweepSystemWeaks(IsMarkedVisitor* visitor)
344      REQUIRES_SHARED(Locks::mutator_lock_);
345
346  // Returns a special method that calls into a trampoline for runtime method resolution
347  ArtMethod* GetResolutionMethod();
348
349  bool HasResolutionMethod() const {
350    return resolution_method_ != nullptr;
351  }
352
353  void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
354  void ClearResolutionMethod() {
355    resolution_method_ = nullptr;
356  }
357
358  ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
359
360  // Returns a special method that calls into a trampoline for runtime imt conflicts.
361  ArtMethod* GetImtConflictMethod();
362  ArtMethod* GetImtUnimplementedMethod();
363
364  bool HasImtConflictMethod() const {
365    return imt_conflict_method_ != nullptr;
366  }
367
368  void ClearImtConflictMethod() {
369    imt_conflict_method_ = nullptr;
370  }
371
372  void FixupConflictTables();
373  void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
374  void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
375
376  ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
377      REQUIRES_SHARED(Locks::mutator_lock_);
378
379  void ClearImtUnimplementedMethod() {
380    imt_unimplemented_method_ = nullptr;
381  }
382
383  bool HasCalleeSaveMethod(CalleeSaveType type) const {
384    return callee_save_methods_[static_cast<size_t>(type)] != 0u;
385  }
386
387  ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
388      REQUIRES_SHARED(Locks::mutator_lock_);
389
390  ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
391      REQUIRES_SHARED(Locks::mutator_lock_);
392
393  QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
394    return callee_save_method_frame_infos_[static_cast<size_t>(type)];
395  }
396
397  QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
398      REQUIRES_SHARED(Locks::mutator_lock_);
399
400  static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
401    return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
402  }
403
404  InstructionSet GetInstructionSet() const {
405    return instruction_set_;
406  }
407
408  void SetInstructionSet(InstructionSet instruction_set);
409  void ClearInstructionSet();
410
411  void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
412  void ClearCalleeSaveMethods();
413
414  ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
415
416  int32_t GetStat(int kind);
417
418  RuntimeStats* GetStats() {
419    return &stats_;
420  }
421
422  bool HasStatsEnabled() const {
423    return stats_enabled_;
424  }
425
426  void ResetStats(int kinds);
427
428  void SetStatsEnabled(bool new_state)
429      REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
430
431  enum class NativeBridgeAction {  // private
432    kUnload,
433    kInitialize
434  };
435
436  jit::Jit* GetJit() const {
437    return jit_.get();
438  }
439
440  // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
441  bool UseJitCompilation() const;
442
443  void PreZygoteFork();
444  void InitNonZygoteOrPostFork(
445      JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa);
446
447  const instrumentation::Instrumentation* GetInstrumentation() const {
448    return &instrumentation_;
449  }
450
451  instrumentation::Instrumentation* GetInstrumentation() {
452    return &instrumentation_;
453  }
454
455  void RegisterAppInfo(const std::vector<std::string>& code_paths,
456                       const std::string& profile_output_filename);
457
458  // Transaction support.
459  bool IsActiveTransaction() const;
460  void EnterTransactionMode();
461  void EnterTransactionMode(bool strict, mirror::Class* root);
462  void ExitTransactionMode();
463  void RollbackAllTransactions() REQUIRES_SHARED(Locks::mutator_lock_);
464  // Transaction rollback and exit transaction are always done together, it's convenience to
465  // do them in one function.
466  void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_);
467  bool IsTransactionAborted() const;
468  const std::unique_ptr<Transaction>& GetTransaction() const;
469  bool IsActiveStrictTransactionMode() const;
470
471  void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
472      REQUIRES_SHARED(Locks::mutator_lock_);
473  void ThrowTransactionAbortError(Thread* self)
474      REQUIRES_SHARED(Locks::mutator_lock_);
475
476  void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
477                               bool is_volatile) const;
478  void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
479                            bool is_volatile) const;
480  void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
481                            bool is_volatile) const;
482  void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
483                          bool is_volatile) const;
484  void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
485                          bool is_volatile) const;
486  void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
487                          bool is_volatile) const;
488  void RecordWriteFieldReference(mirror::Object* obj,
489                                 MemberOffset field_offset,
490                                 ObjPtr<mirror::Object> value,
491                                 bool is_volatile) const
492      REQUIRES_SHARED(Locks::mutator_lock_);
493  void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
494      REQUIRES_SHARED(Locks::mutator_lock_);
495  void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
496      REQUIRES(Locks::intern_table_lock_);
497  void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
498      REQUIRES(Locks::intern_table_lock_);
499  void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
500      REQUIRES(Locks::intern_table_lock_);
501  void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
502      REQUIRES(Locks::intern_table_lock_);
503  void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
504      REQUIRES_SHARED(Locks::mutator_lock_);
505
506  void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
507  // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
508  // with the unexpected_signal_lock_.
509  const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
510    return fault_message_;
511  }
512
513  void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
514
515  bool ExplicitStackOverflowChecks() const {
516    return !implicit_so_checks_;
517  }
518
519  void DisableVerifier();
520  bool IsVerificationEnabled() const;
521  bool IsVerificationSoftFail() const;
522
523  void SetHiddenApiChecksEnabled(bool value) {
524    do_hidden_api_checks_ = value;
525  }
526
527  bool AreHiddenApiChecksEnabled() const {
528    return do_hidden_api_checks_;
529  }
530
531  void SetPendingHiddenApiWarning(bool value) {
532    pending_hidden_api_warning_ = value;
533  }
534
535  bool HasPendingHiddenApiWarning() const {
536    return pending_hidden_api_warning_;
537  }
538
539  void SetDedupeHiddenApiWarnings(bool value) {
540    dedupe_hidden_api_warnings_ = value;
541  }
542
543  bool ShouldDedupeHiddenApiWarnings() {
544    return dedupe_hidden_api_warnings_;
545  }
546
547  void AlwaysSetHiddenApiWarningFlag() {
548    always_set_hidden_api_warning_flag_ = true;
549  }
550
551  bool ShouldAlwaysSetHiddenApiWarningFlag() const {
552    return always_set_hidden_api_warning_flag_;
553  }
554
555  bool IsDexFileFallbackEnabled() const {
556    return allow_dex_file_fallback_;
557  }
558
559  const std::vector<std::string>& GetCpuAbilist() const {
560    return cpu_abilist_;
561  }
562
563  bool IsRunningOnMemoryTool() const {
564    return is_running_on_memory_tool_;
565  }
566
567  void SetTargetSdkVersion(int32_t version) {
568    target_sdk_version_ = version;
569  }
570
571  int32_t GetTargetSdkVersion() const {
572    return target_sdk_version_;
573  }
574
575  uint32_t GetZygoteMaxFailedBoots() const {
576    return zygote_max_failed_boots_;
577  }
578
579  bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
580    return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
581  }
582
583  // Create the JIT and instrumentation and code cache.
584  void CreateJit();
585
586  ArenaPool* GetArenaPool() {
587    return arena_pool_.get();
588  }
589  ArenaPool* GetJitArenaPool() {
590    return jit_arena_pool_.get();
591  }
592  const ArenaPool* GetArenaPool() const {
593    return arena_pool_.get();
594  }
595
596  void ReclaimArenaPoolMemory();
597
598  LinearAlloc* GetLinearAlloc() {
599    return linear_alloc_.get();
600  }
601
602  jit::JitOptions* GetJITOptions() {
603    return jit_options_.get();
604  }
605
606  bool IsJavaDebuggable() const {
607    return is_java_debuggable_;
608  }
609
610  void SetJavaDebuggable(bool value);
611
612  // Deoptimize the boot image, called for Java debuggable apps.
613  void DeoptimizeBootImage();
614
615  bool IsNativeDebuggable() const {
616    return is_native_debuggable_;
617  }
618
619  void SetNativeDebuggable(bool value) {
620    is_native_debuggable_ = value;
621  }
622
623  bool AreAsyncExceptionsThrown() const {
624    return async_exceptions_thrown_;
625  }
626
627  void SetAsyncExceptionsThrown() {
628    async_exceptions_thrown_ = true;
629  }
630
631  // Returns the build fingerprint, if set. Otherwise an empty string is returned.
632  std::string GetFingerprint() {
633    return fingerprint_;
634  }
635
636  // Called from class linker.
637  void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
638
639  // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
640  LinearAlloc* CreateLinearAlloc();
641
642  OatFileManager& GetOatFileManager() const {
643    DCHECK(oat_file_manager_ != nullptr);
644    return *oat_file_manager_;
645  }
646
647  double GetHashTableMinLoadFactor() const;
648  double GetHashTableMaxLoadFactor() const;
649
650  void SetSafeMode(bool mode) {
651    safe_mode_ = mode;
652  }
653
654  bool GetDumpNativeStackOnSigQuit() const {
655    return dump_native_stack_on_sig_quit_;
656  }
657
658  bool GetPrunedDalvikCache() const {
659    return pruned_dalvik_cache_;
660  }
661
662  void SetPrunedDalvikCache(bool pruned) {
663    pruned_dalvik_cache_ = pruned;
664  }
665
666  void UpdateProcessState(ProcessState process_state);
667
668  // Returns true if we currently care about long mutator pause.
669  bool InJankPerceptibleProcessState() const {
670    return process_state_ == kProcessStateJankPerceptible;
671  }
672
673  void RegisterSensitiveThread() const;
674
675  void SetZygoteNoThreadSection(bool val) {
676    zygote_no_threads_ = val;
677  }
678
679  bool IsZygoteNoThreadSection() const {
680    return zygote_no_threads_;
681  }
682
683  // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
684  // optimization that makes it impossible to deoptimize.
685  bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
686
687  // Returns a saved copy of the environment (getenv/setenv values).
688  // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
689  char** GetEnvSnapshot() const {
690    return env_snapshot_.GetSnapshot();
691  }
692
693  void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
694  void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
695
696  void AttachAgent(JNIEnv* env,
697                   const std::string& agent_arg,
698                   jobject class_loader,
699                   bool allow_non_debuggable_tooling = false);
700
701  const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const {
702    return agents_;
703  }
704
705  RuntimeCallbacks* GetRuntimeCallbacks();
706
707  bool HasLoadedPlugins() const {
708    return !plugins_.empty();
709  }
710
711  void InitThreadGroups(Thread* self);
712
713  void SetDumpGCPerformanceOnShutdown(bool value) {
714    dump_gc_performance_on_shutdown_ = value;
715  }
716
717  void IncrementDeoptimizationCount(DeoptimizationKind kind) {
718    DCHECK_LE(kind, DeoptimizationKind::kLast);
719    deoptimization_counts_[static_cast<size_t>(kind)]++;
720  }
721
722  uint32_t GetNumberOfDeoptimizations() const {
723    uint32_t result = 0;
724    for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
725      result += deoptimization_counts_[i];
726    }
727    return result;
728  }
729
730  // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
731  // This is beneficial for low RAM devices since it reduces page cache thrashing.
732  bool MAdviseRandomAccess() const {
733    return madvise_random_access_;
734  }
735
736  const std::string& GetJdwpOptions() {
737    return jdwp_options_;
738  }
739
740  JdwpProvider GetJdwpProvider() const {
741    return jdwp_provider_;
742  }
743
744  static constexpr int32_t kUnsetSdkVersion = 0u;
745
746 private:
747  static void InitPlatformSignalHandlers();
748
749  Runtime();
750
751  void BlockSignals();
752
753  bool Init(RuntimeArgumentMap&& runtime_options)
754      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
755  void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
756  void RegisterRuntimeNativeMethods(JNIEnv* env);
757
758  void StartDaemonThreads();
759  void StartSignalCatcher();
760
761  void MaybeSaveJitProfilingInfo();
762
763  // Visit all of the thread roots.
764  void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
765      REQUIRES_SHARED(Locks::mutator_lock_);
766
767  // Visit all other roots which must be done with mutators suspended.
768  void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
769      REQUIRES_SHARED(Locks::mutator_lock_);
770
771  // Constant roots are the roots which never change after the runtime is initialized, they only
772  // need to be visited once per GC cycle.
773  void VisitConstantRoots(RootVisitor* visitor)
774      REQUIRES_SHARED(Locks::mutator_lock_);
775
776  // A pointer to the active runtime or null.
777  static Runtime* instance_;
778
779  // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
780  static constexpr int kProfileForground = 0;
781  static constexpr int kProfileBackground = 1;
782
783  static constexpr uint32_t kCalleeSaveSize = 6u;
784
785  // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
786  uint64_t callee_save_methods_[kCalleeSaveSize];
787  GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
788  GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
789  ArtMethod* resolution_method_;
790  ArtMethod* imt_conflict_method_;
791  // Unresolved method has the same behavior as the conflict method, it is used by the class linker
792  // for differentiating between unfilled imt slots vs conflict slots in superclasses.
793  ArtMethod* imt_unimplemented_method_;
794
795  // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
796  // JDWP (invalid references).
797  GcRoot<mirror::Object> sentinel_;
798
799  InstructionSet instruction_set_;
800  QuickMethodFrameInfo callee_save_method_frame_infos_[kCalleeSaveSize];
801
802  CompilerCallbacks* compiler_callbacks_;
803  bool is_zygote_;
804  bool must_relocate_;
805  bool is_concurrent_gc_enabled_;
806  bool is_explicit_gc_disabled_;
807  bool dex2oat_enabled_;
808  bool image_dex2oat_enabled_;
809
810  std::string compiler_executable_;
811  std::string patchoat_executable_;
812  std::vector<std::string> compiler_options_;
813  std::vector<std::string> image_compiler_options_;
814  std::string image_location_;
815
816  std::string boot_class_path_string_;
817  std::string class_path_string_;
818  std::vector<std::string> properties_;
819
820  std::list<ti::AgentSpec> agent_specs_;
821  std::list<std::unique_ptr<ti::Agent>> agents_;
822  std::vector<Plugin> plugins_;
823
824  // The default stack size for managed threads created by the runtime.
825  size_t default_stack_size_;
826
827  gc::Heap* heap_;
828
829  std::unique_ptr<ArenaPool> jit_arena_pool_;
830  std::unique_ptr<ArenaPool> arena_pool_;
831  // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
832  // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
833  // since the field arrays are int arrays in this case.
834  std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
835
836  // Shared linear alloc for now.
837  std::unique_ptr<LinearAlloc> linear_alloc_;
838
839  // The number of spins that are done before thread suspension is used to forcibly inflate.
840  size_t max_spins_before_thin_lock_inflation_;
841  MonitorList* monitor_list_;
842  MonitorPool* monitor_pool_;
843
844  ThreadList* thread_list_;
845
846  InternTable* intern_table_;
847
848  ClassLinker* class_linker_;
849
850  SignalCatcher* signal_catcher_;
851
852  // If true, the runtime will connect to tombstoned via a socket to
853  // request an open file descriptor to write its traces to.
854  bool use_tombstoned_traces_;
855
856  // Location to which traces must be written on SIGQUIT. Only used if
857  // tombstoned_traces_ == false.
858  std::string stack_trace_file_;
859
860  std::unique_ptr<JavaVMExt> java_vm_;
861
862  std::unique_ptr<jit::Jit> jit_;
863  std::unique_ptr<jit::JitOptions> jit_options_;
864
865  // Fault message, printed when we get a SIGSEGV.
866  Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
867  std::string fault_message_ GUARDED_BY(fault_message_lock_);
868
869  // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
870  // the shutdown lock so that threads aren't born while we're shutting down.
871  size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
872
873  // Waited upon until no threads are being born.
874  std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
875
876  // Set when runtime shutdown is past the point that new threads may attach.
877  bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
878
879  // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
880  bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
881
882  bool started_;
883
884  // New flag added which tells us if the runtime has finished starting. If
885  // this flag is set then the Daemon threads are created and the class loader
886  // is created. This flag is needed for knowing if its safe to request CMS.
887  bool finished_starting_;
888
889  // Hooks supported by JNI_CreateJavaVM
890  jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
891  void (*exit_)(jint status);
892  void (*abort_)();
893
894  bool stats_enabled_;
895  RuntimeStats stats_;
896
897  const bool is_running_on_memory_tool_;
898
899  std::unique_ptr<TraceConfig> trace_config_;
900
901  instrumentation::Instrumentation instrumentation_;
902
903  jobject main_thread_group_;
904  jobject system_thread_group_;
905
906  // As returned by ClassLoader.getSystemClassLoader().
907  jobject system_class_loader_;
908
909  // If true, then we dump the GC cumulative timings on shutdown.
910  bool dump_gc_performance_on_shutdown_;
911
912  // Transactions used for pre-initializing classes at compilation time.
913  // Support nested transactions, maintain a list containing all transactions. Transactions are
914  // handled under a stack discipline. Because GC needs to go over all transactions, we choose list
915  // as substantial data structure instead of stack.
916  std::list<std::unique_ptr<Transaction>> preinitialization_transactions_;
917
918  // If kNone, verification is disabled. kEnable by default.
919  verifier::VerifyMode verify_;
920
921  // If true, the runtime may use dex files directly with the interpreter if an oat file is not
922  // available/usable.
923  bool allow_dex_file_fallback_;
924
925  // List of supported cpu abis.
926  std::vector<std::string> cpu_abilist_;
927
928  // Specifies target SDK version to allow workarounds for certain API levels.
929  int32_t target_sdk_version_;
930
931  // Implicit checks flags.
932  bool implicit_null_checks_;       // NullPointer checks are implicit.
933  bool implicit_so_checks_;         // StackOverflow checks are implicit.
934  bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
935
936  // Whether or not the sig chain (and implicitly the fault handler) should be
937  // disabled. Tools like dex2oat or patchoat don't need them. This enables
938  // building a statically link version of dex2oat.
939  bool no_sig_chain_;
940
941  // Force the use of native bridge even if the app ISA matches the runtime ISA.
942  bool force_native_bridge_;
943
944  // Whether or not a native bridge has been loaded.
945  //
946  // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
947  // if standard dlopen fails to load native library associated with native activity, it calls to
948  // the native bridge to load it and then gets the trampoline for the entry to native activity.
949  //
950  // The option 'native_bridge_library_filename' specifies the name of the native bridge.
951  // When non-empty the native bridge will be loaded from the given file. An empty value means
952  // that there's no native bridge.
953  bool is_native_bridge_loaded_;
954
955  // Whether we are running under native debugger.
956  bool is_native_debuggable_;
957
958  // whether or not any async exceptions have ever been thrown. This is used to speed up the
959  // MterpShouldSwitchInterpreters function.
960  bool async_exceptions_thrown_;
961
962  // Whether Java code needs to be debuggable.
963  bool is_java_debuggable_;
964
965  // The maximum number of failed boots we allow before pruning the dalvik cache
966  // and trying again. This option is only inspected when we're running as a
967  // zygote.
968  uint32_t zygote_max_failed_boots_;
969
970  // Enable experimental opcodes that aren't fully specified yet. The intent is to
971  // eventually publish them as public-usable opcodes, but they aren't ready yet.
972  //
973  // Experimental opcodes should not be used by other production code.
974  ExperimentalFlags experimental_flags_;
975
976  // Contains the build fingerprint, if given as a parameter.
977  std::string fingerprint_;
978
979  // Oat file manager, keeps track of what oat files are open.
980  OatFileManager* oat_file_manager_;
981
982  // Whether or not we are on a low RAM device.
983  bool is_low_memory_mode_;
984
985  // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
986  // This is beneficial for low RAM devices since it reduces page cache thrashing.
987  bool madvise_random_access_;
988
989  // Whether the application should run in safe mode, that is, interpreter only.
990  bool safe_mode_;
991
992  // Whether access checks on hidden API should be performed.
993  bool do_hidden_api_checks_;
994
995  // Whether the application has used an API which is not restricted but we
996  // should issue a warning about it.
997  bool pending_hidden_api_warning_;
998
999  // Do not warn about the same hidden API access violation twice.
1000  // This is only used for testing.
1001  bool dedupe_hidden_api_warnings_;
1002
1003  // Hidden API can print warnings into the log and/or set a flag read by the
1004  // framework to show a UI warning. If this flag is set, always set the flag
1005  // when there is a warning. This is only used for testing.
1006  bool always_set_hidden_api_warning_flag_;
1007
1008  // Whether threads should dump their native stack on SIGQUIT.
1009  bool dump_native_stack_on_sig_quit_;
1010
1011  // Whether the dalvik cache was pruned when initializing the runtime.
1012  bool pruned_dalvik_cache_;
1013
1014  // Whether or not we currently care about pause times.
1015  ProcessState process_state_;
1016
1017  // Whether zygote code is in a section that should not start threads.
1018  bool zygote_no_threads_;
1019
1020  // The string containing requested jdwp options
1021  std::string jdwp_options_;
1022
1023  // The jdwp provider we were configured with.
1024  JdwpProvider jdwp_provider_;
1025
1026  // Saved environment.
1027  class EnvSnapshot {
1028   public:
1029    EnvSnapshot() = default;
1030    void TakeSnapshot();
1031    char** GetSnapshot() const;
1032
1033   private:
1034    std::unique_ptr<char*[]> c_env_vector_;
1035    std::vector<std::unique_ptr<std::string>> name_value_pairs_;
1036
1037    DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
1038  } env_snapshot_;
1039
1040  // Generic system-weak holders.
1041  std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
1042
1043  std::unique_ptr<RuntimeCallbacks> callbacks_;
1044
1045  std::atomic<uint32_t> deoptimization_counts_[
1046      static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
1047
1048  std::unique_ptr<MemMap> protected_fault_page_;
1049
1050  DISALLOW_COPY_AND_ASSIGN(Runtime);
1051};
1052
1053}  // namespace art
1054
1055#endif  // ART_RUNTIME_RUNTIME_H_
1056