mutex.h revision 88fd720b6799184c8ad61e766a6d37af33ed30ef
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class SHARED_LOCKABLE ReaderWriterMutex;
47class SHARED_LOCKABLE MutatorMutex;
48class ScopedContentionRecorder;
49class Thread;
50
51// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
52// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
53// partial ordering and thereby cause deadlock situations to fail checks.
54//
55// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
56enum LockLevel {
57  kLoggingLock = 0,
58  kSwapMutexesLock,
59  kUnexpectedSignalLock,
60  kThreadSuspendCountLock,
61  kAbortLock,
62  kJdwpAdbStateLock,
63  kJdwpSocketLock,
64  kRegionSpaceRegionLock,
65  kMarkSweepMarkStackLock,
66  kRosAllocGlobalLock,
67  kRosAllocBracketLock,
68  kRosAllocBulkFreeLock,
69  kTaggingLockLevel,
70  kTransactionLogLock,
71  kJniFunctionTableLock,
72  kJniWeakGlobalsLock,
73  kJniGlobalsLock,
74  kReferenceQueueSoftReferencesLock,
75  kReferenceQueuePhantomReferencesLock,
76  kReferenceQueueFinalizerReferencesLock,
77  kReferenceQueueWeakReferencesLock,
78  kReferenceQueueClearedReferencesLock,
79  kReferenceProcessorLock,
80  kJitDebugInterfaceLock,
81  kAllocSpaceLock,
82  kBumpPointerSpaceBlockLock,
83  kArenaPoolLock,
84  kInternTableLock,
85  kOatFileSecondaryLookupLock,
86  kHostDlOpenHandlesLock,
87  kVerifierDepsLock,
88  kOatFileManagerLock,
89  kTracingUniqueMethodsLock,
90  kTracingStreamingLock,
91  kDeoptimizedMethodsLock,
92  kClassLoaderClassesLock,
93  kDefaultMutexLevel,
94  kDexLock,
95  kMarkSweepLargeObjectLock,
96  kJdwpObjectRegistryLock,
97  kModifyLdtLock,
98  kAllocatedThreadIdsLock,
99  kMonitorPoolLock,
100  kClassLinkerClassesLock,  // TODO rename.
101  kJitCodeCacheLock,
102  kCHALock,
103  kBreakpointLock,
104  kMonitorLock,
105  kMonitorListLock,
106  kJniLoadLibraryLock,
107  kThreadListLock,
108  kAllocTrackerLock,
109  kDeoptimizationLock,
110  kProfilerLock,
111  kJdwpShutdownLock,
112  kJdwpEventListLock,
113  kJdwpAttachLock,
114  kJdwpStartLock,
115  kRuntimeShutdownLock,
116  kTraceLock,
117  kHeapBitmapLock,
118  kMutatorLock,
119  kUserCodeSuspensionLock,
120  kInstrumentEntrypointsLock,
121  kZygoteCreationLock,
122
123  kLockLevelCount  // Must come last.
124};
125std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
126
127const bool kDebugLocking = kIsDebugBuild;
128
129// Record Log contention information, dumpable via SIGQUIT.
130#ifdef ART_USE_FUTEXES
131// To enable lock contention logging, set this to true.
132const bool kLogLockContentions = false;
133#else
134// Keep this false as lock contention logging is supported only with
135// futex.
136const bool kLogLockContentions = false;
137#endif
138const size_t kContentionLogSize = 4;
139const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
140const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
141
142// Base class for all Mutex implementations
143class BaseMutex {
144 public:
145  const char* GetName() const {
146    return name_;
147  }
148
149  virtual bool IsMutex() const { return false; }
150  virtual bool IsReaderWriterMutex() const { return false; }
151  virtual bool IsMutatorMutex() const { return false; }
152
153  virtual void Dump(std::ostream& os) const = 0;
154
155  static void DumpAll(std::ostream& os);
156
157  bool ShouldRespondToEmptyCheckpointRequest() const {
158    return should_respond_to_empty_checkpoint_request_;
159  }
160
161  void SetShouldRespondToEmptyCheckpointRequest(bool value) {
162    should_respond_to_empty_checkpoint_request_ = value;
163  }
164
165  virtual void WakeupToRespondToEmptyCheckpoint() = 0;
166
167 protected:
168  friend class ConditionVariable;
169
170  BaseMutex(const char* name, LockLevel level);
171  virtual ~BaseMutex();
172  void RegisterAsLocked(Thread* self);
173  void RegisterAsUnlocked(Thread* self);
174  void CheckSafeToWait(Thread* self);
175
176  friend class ScopedContentionRecorder;
177
178  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
179  void DumpContention(std::ostream& os) const;
180
181  const LockLevel level_;  // Support for lock hierarchy.
182  const char* const name_;
183  bool should_respond_to_empty_checkpoint_request_;
184
185  // A log entry that records contention but makes no guarantee that either tid will be held live.
186  struct ContentionLogEntry {
187    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
188    uint64_t blocked_tid;
189    uint64_t owner_tid;
190    AtomicInteger count;
191  };
192  struct ContentionLogData {
193    ContentionLogEntry contention_log[kContentionLogSize];
194    // The next entry in the contention log to be updated. Value ranges from 0 to
195    // kContentionLogSize - 1.
196    AtomicInteger cur_content_log_entry;
197    // Number of times the Mutex has been contended.
198    AtomicInteger contention_count;
199    // Sum of time waited by all contenders in ns.
200    Atomic<uint64_t> wait_time;
201    void AddToWaitTime(uint64_t value);
202    ContentionLogData() : wait_time(0) {}
203  };
204  ContentionLogData contention_log_data_[kContentionLogDataSize];
205
206 public:
207  bool HasEverContended() const {
208    if (kLogLockContentions) {
209      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
210    }
211    return false;
212  }
213};
214
215// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
216// exclusive access to what it guards. A Mutex can be in one of two states:
217// - Free - not owned by any thread,
218// - Exclusive - owned by a single thread.
219//
220// The effect of locking and unlocking operations on the state is:
221// State     | ExclusiveLock | ExclusiveUnlock
222// -------------------------------------------
223// Free      | Exclusive     | error
224// Exclusive | Block*        | Free
225// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
226//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
227std::ostream& operator<<(std::ostream& os, const Mutex& mu);
228class LOCKABLE Mutex : public BaseMutex {
229 public:
230  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
231  ~Mutex();
232
233  virtual bool IsMutex() const { return true; }
234
235  // Block until mutex is free then acquire exclusive access.
236  void ExclusiveLock(Thread* self) ACQUIRE();
237  void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
238
239  // Returns true if acquires exclusive access, false otherwise.
240  bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
241  bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
242
243  // Release exclusive access.
244  void ExclusiveUnlock(Thread* self) RELEASE();
245  void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
246
247  // Is the current thread the exclusive holder of the Mutex.
248  ALWAYS_INLINE bool IsExclusiveHeld(const Thread* self) const;
249
250  // Assert that the Mutex is exclusively held by the current thread.
251  ALWAYS_INLINE void AssertExclusiveHeld(const Thread* self) const ASSERT_CAPABILITY(this);
252  ALWAYS_INLINE void AssertHeld(const Thread* self) const ASSERT_CAPABILITY(this);
253
254  // Assert that the Mutex is not held by the current thread.
255  void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
256    if (kDebugLocking && (gAborting == 0)) {
257      CHECK(!IsExclusiveHeld(self)) << *this;
258    }
259  }
260  void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
261    AssertNotHeldExclusive(self);
262  }
263
264  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
265  // than the owner.
266  uint64_t GetExclusiveOwnerTid() const;
267
268  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
269  unsigned int GetDepth() const {
270    return recursion_count_;
271  }
272
273  virtual void Dump(std::ostream& os) const;
274
275  // For negative capabilities in clang annotations.
276  const Mutex& operator!() const { return *this; }
277
278  void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
279
280 private:
281#if ART_USE_FUTEXES
282  // 0 is unheld, 1 is held.
283  AtomicInteger state_;
284  // Exclusive owner.
285  volatile uint64_t exclusive_owner_;
286  // Number of waiting contenders.
287  AtomicInteger num_contenders_;
288#else
289  pthread_mutex_t mutex_;
290  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
291#endif
292  const bool recursive_;  // Can the lock be recursively held?
293  unsigned int recursion_count_;
294  friend class ConditionVariable;
295  DISALLOW_COPY_AND_ASSIGN(Mutex);
296};
297
298// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
299// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
300// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
301// condition variable. A ReaderWriterMutex can be in one of three states:
302// - Free - not owned by any thread,
303// - Exclusive - owned by a single thread,
304// - Shared(n) - shared amongst n threads.
305//
306// The effect of locking and unlocking operations on the state is:
307//
308// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
309// ----------------------------------------------------------------------------
310// Free      | Exclusive     | error           | SharedLock(1)    | error
311// Exclusive | Block         | Free            | Block            | error
312// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
313// * for large values of n the SharedLock may block.
314std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
315class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
316 public:
317  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
318  ~ReaderWriterMutex();
319
320  virtual bool IsReaderWriterMutex() const { return true; }
321
322  // Block until ReaderWriterMutex is free then acquire exclusive access.
323  void ExclusiveLock(Thread* self) ACQUIRE();
324  void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
325
326  // Release exclusive access.
327  void ExclusiveUnlock(Thread* self) RELEASE();
328  void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
329
330  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
331  // or false if timeout is reached.
332#if HAVE_TIMED_RWLOCK
333  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
334      EXCLUSIVE_TRYLOCK_FUNCTION(true);
335#endif
336
337  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
338  void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
339  void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
340
341  // Try to acquire share of ReaderWriterMutex.
342  bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
343
344  // Release a share of the access.
345  void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
346  void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
347
348  // Is the current thread the exclusive holder of the ReaderWriterMutex.
349  ALWAYS_INLINE bool IsExclusiveHeld(const Thread* self) const;
350
351  // Assert the current thread has exclusive access to the ReaderWriterMutex.
352  ALWAYS_INLINE void AssertExclusiveHeld(const Thread* self) const ASSERT_CAPABILITY(this);
353  ALWAYS_INLINE void AssertWriterHeld(const Thread* self) const ASSERT_CAPABILITY(this);
354
355  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
356  void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
357    if (kDebugLocking && (gAborting == 0)) {
358      CHECK(!IsExclusiveHeld(self)) << *this;
359    }
360  }
361  void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
362    AssertNotExclusiveHeld(self);
363  }
364
365  // Is the current thread a shared holder of the ReaderWriterMutex.
366  bool IsSharedHeld(const Thread* self) const;
367
368  // Assert the current thread has shared access to the ReaderWriterMutex.
369  ALWAYS_INLINE void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
370    if (kDebugLocking && (gAborting == 0)) {
371      // TODO: we can only assert this well when self != null.
372      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
373    }
374  }
375  ALWAYS_INLINE void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
376    AssertSharedHeld(self);
377  }
378
379  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
380  // mode.
381  ALWAYS_INLINE void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
382    if (kDebugLocking && (gAborting == 0)) {
383      CHECK(!IsSharedHeld(self)) << *this;
384    }
385  }
386
387  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
388  // than the owner.
389  uint64_t GetExclusiveOwnerTid() const;
390
391  virtual void Dump(std::ostream& os) const;
392
393  // For negative capabilities in clang annotations.
394  const ReaderWriterMutex& operator!() const { return *this; }
395
396  void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
397
398 private:
399#if ART_USE_FUTEXES
400  // Out-of-inline path for handling contention for a SharedLock.
401  void HandleSharedLockContention(Thread* self, int32_t cur_state);
402
403  // -1 implies held exclusive, +ve shared held by state_ many owners.
404  AtomicInteger state_;
405  // Exclusive owner. Modification guarded by this mutex.
406  volatile uint64_t exclusive_owner_;
407  // Number of contenders waiting for a reader share.
408  AtomicInteger num_pending_readers_;
409  // Number of contenders waiting to be the writer.
410  AtomicInteger num_pending_writers_;
411#else
412  pthread_rwlock_t rwlock_;
413  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
414#endif
415  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
416};
417
418// MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
419// Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
420// thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
421// held by any mutator threads. However, a thread in the kRunnable state is considered to have
422// shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
423// state have associated implications on lock ownership. Extra methods to handle the state
424// transitions have been added to the interface but are only accessible to the methods dealing
425// with state transitions. The thread state and flags attributes are used to ensure thread state
426// transitions are consistent with the permitted behaviour of the mutex.
427//
428// *) The most important consequence of this behaviour is that all threads must be in one of the
429// suspended states before exclusive ownership of the mutator mutex is sought.
430//
431std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
432class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
433 public:
434  explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
435    : ReaderWriterMutex(name, level) {}
436  ~MutatorMutex() {}
437
438  virtual bool IsMutatorMutex() const { return true; }
439
440  // For negative capabilities in clang annotations.
441  const MutatorMutex& operator!() const { return *this; }
442
443 private:
444  friend class Thread;
445  void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
446  void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
447
448  DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
449};
450
451// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
452// (Signal) or all at once (Broadcast).
453class ConditionVariable {
454 public:
455  ConditionVariable(const char* name, Mutex& mutex);
456  ~ConditionVariable();
457
458  void Broadcast(Thread* self);
459  void Signal(Thread* self);
460  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
461  //       pointer copy, thereby defeating annotalysis.
462  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
463  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
464  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
465  // when waiting.
466  // TODO: remove this.
467  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
468
469 private:
470  const char* const name_;
471  // The Mutex being used by waiters. It is an error to mix condition variables between different
472  // Mutexes.
473  Mutex& guard_;
474#if ART_USE_FUTEXES
475  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
476  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
477  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
478  // without guard_ held.
479  AtomicInteger sequence_;
480  // Number of threads that have come into to wait, not the length of the waiters on the futex as
481  // waiters may have been requeued onto guard_. Guarded by guard_.
482  volatile int32_t num_waiters_;
483#else
484  pthread_cond_t cond_;
485#endif
486  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
487};
488
489// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
490// upon destruction.
491class SCOPED_CAPABILITY MutexLock {
492 public:
493  MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
494    mu_.ExclusiveLock(self_);
495  }
496
497  ~MutexLock() RELEASE() {
498    mu_.ExclusiveUnlock(self_);
499  }
500
501 private:
502  Thread* const self_;
503  Mutex& mu_;
504  DISALLOW_COPY_AND_ASSIGN(MutexLock);
505};
506// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
507#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
508
509// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
510// construction and releases it upon destruction.
511class SCOPED_CAPABILITY ReaderMutexLock {
512 public:
513  ALWAYS_INLINE ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu);
514
515  ALWAYS_INLINE ~ReaderMutexLock() RELEASE();
516
517 private:
518  Thread* const self_;
519  ReaderWriterMutex& mu_;
520  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
521};
522
523// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
524// construction and releases it upon destruction.
525class SCOPED_CAPABILITY WriterMutexLock {
526 public:
527  WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
528      self_(self), mu_(mu) {
529    mu_.ExclusiveLock(self_);
530  }
531
532  ~WriterMutexLock() UNLOCK_FUNCTION() {
533    mu_.ExclusiveUnlock(self_);
534  }
535
536 private:
537  Thread* const self_;
538  ReaderWriterMutex& mu_;
539  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
540};
541// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
542// "WriterMutexLock mu(lock)".
543#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
544
545// For StartNoThreadSuspension and EndNoThreadSuspension.
546class CAPABILITY("role") Role {
547 public:
548  void Acquire() ACQUIRE() {}
549  void Release() RELEASE() {}
550  const Role& operator!() const { return *this; }
551};
552
553class Uninterruptible : public Role {
554};
555
556// Global mutexes corresponding to the levels above.
557class Locks {
558 public:
559  static void Init();
560  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
561
562  // Destroying various lock types can emit errors that vary depending upon
563  // whether the client (art::Runtime) is currently active.  Allow the client
564  // to set a callback that is used to check when it is acceptable to call
565  // Abort.  The default behavior is that the client *is not* able to call
566  // Abort if no callback is established.
567  using ClientCallback = bool();
568  static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
569  // Checks for whether it is safe to call Abort() without using locks.
570  static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
571
572  // Add a mutex to expected_mutexes_on_weak_ref_access_.
573  static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
574  // Remove a mutex from expected_mutexes_on_weak_ref_access_.
575  static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
576  // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
577  static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
578
579  // Guards allocation entrypoint instrumenting.
580  static Mutex* instrument_entrypoints_lock_;
581
582  // Guards code that deals with user-code suspension. This mutex must be held when suspending or
583  // resuming threads with SuspendReason::kForUserCode. It may be held by a suspended thread, but
584  // only if the suspension is not due to SuspendReason::kForUserCode.
585  static Mutex* user_code_suspension_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
586
587  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
588  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
589  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
590  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
591  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
592  //
593  // Thread suspension:
594  // mutator thread                                | GC/Debugger
595  //   .. running ..                               |   .. running ..
596  //   .. running ..                               | Request thread suspension by:
597  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
598  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
599  //   .. running ..                               |     all mutator threads
600  //   .. running ..                               |   - releasing thread_suspend_count_lock_
601  //   .. running ..                               | Block wait for all threads to pass a barrier
602  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
603  // suspend code.                                 |   .. blocked ..
604  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
605  // x: Acquire thread_suspend_count_lock_         |   .. running ..
606  // while Thread::suspend_count_ > 0              |   .. running ..
607  //   - wait on Thread::resume_cond_              |   .. running ..
608  //     (releases thread_suspend_count_lock_)     |   .. running ..
609  //   .. waiting ..                               | Request thread resumption by:
610  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
611  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
612  //   .. waiting ..                               |     all mutator threads
613  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
614  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
615  // Release thread_suspend_count_lock_            |  .. running ..
616  // Change to kRunnable                           |  .. running ..
617  //  - this uses a CAS operation to ensure the    |  .. running ..
618  //    suspend request flag isn't raised as the   |  .. running ..
619  //    state is changed                           |  .. running ..
620  //  - if the CAS operation fails then goto x     |  .. running ..
621  //  .. running ..                                |  .. running ..
622  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(user_code_suspension_lock_);
623
624  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
625  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
626
627  // Guards shutdown of the runtime.
628  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
629
630  // Guards background profiler global state.
631  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
632
633  // Guards trace (ie traceview) requests.
634  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
635
636  // Guards debugger recent allocation records.
637  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
638
639  // Guards updates to instrumentation to ensure mutual exclusion of
640  // events like deoptimization requests.
641  // TODO: improve name, perhaps instrumentation_update_lock_.
642  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
643
644  // Guards Class Hierarchy Analysis (CHA).
645  static Mutex* cha_lock_ ACQUIRED_AFTER(deoptimization_lock_);
646
647  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
648  // attaching and detaching.
649  static Mutex* thread_list_lock_ ACQUIRED_AFTER(cha_lock_);
650
651  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
652  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
653
654  // Guards maintaining loading library data structures.
655  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
656
657  // Guards breakpoints.
658  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
659
660  // Guards lists of classes within the class linker.
661  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
662
663  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
664  // doesn't try to hold a higher level Mutex.
665  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
666
667  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
668
669  // Guard the allocation/deallocation of thread ids.
670  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
671
672  // Guards modification of the LDT on x86.
673  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
674
675  static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
676
677  // Guards opened oat files in OatFileManager.
678  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
679
680  // Guards extra string entries for VerifierDeps.
681  static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
682
683  // Guards dlopen_handles_ in DlOpenOatFile.
684  static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
685
686  // Guards intern table.
687  static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
688
689  // Guards reference processor.
690  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
691
692  // Guards cleared references queue.
693  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
694
695  // Guards weak references queue.
696  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
697
698  // Guards finalizer references queue.
699  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
700
701  // Guards phantom references queue.
702  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
703
704  // Guards soft references queue.
705  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
706
707  // Guard accesses to the JNI Global Reference table.
708  static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
709
710  // Guard accesses to the JNI Weak Global Reference table.
711  static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
712
713  // Guard accesses to the JNI function table override.
714  static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
715
716  // Have an exclusive aborting thread.
717  static Mutex* abort_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
718
719  // Allow mutual exclusion when manipulating Thread::suspend_count_.
720  // TODO: Does the trade-off of a per-thread lock make sense?
721  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
722
723  // One unexpected signal at a time lock.
724  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
725
726  // Have an exclusive logging thread.
727  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
728
729  // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
730  // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
731  // encounter an unexpected mutex on accessing weak refs,
732  // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
733  static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
734  static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
735  class ScopedExpectedMutexesOnWeakRefAccessLock;
736};
737
738class Roles {
739 public:
740  // Uninterruptible means that the thread may not become suspended.
741  static Uninterruptible uninterruptible_;
742};
743
744}  // namespace art
745
746#endif  // ART_RUNTIME_BASE_MUTEX_H_
747