mutex.h revision 90ef3db4bd1d4865f5f9cb95c8e7d9afb46994f9
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class SHARED_LOCKABLE ReaderWriterMutex;
47class SHARED_LOCKABLE MutatorMutex;
48class ScopedContentionRecorder;
49class Thread;
50
51// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
52// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
53// partial ordering and thereby cause deadlock situations to fail checks.
54//
55// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
56enum LockLevel {
57  kLoggingLock = 0,
58  kMemMapsLock,
59  kSwapMutexesLock,
60  kUnexpectedSignalLock,
61  kThreadSuspendCountLock,
62  kAbortLock,
63  kLambdaTableLock,
64  kJdwpSocketLock,
65  kRegionSpaceRegionLock,
66  kTransactionLogLock,
67  kReferenceQueueSoftReferencesLock,
68  kReferenceQueuePhantomReferencesLock,
69  kReferenceQueueFinalizerReferencesLock,
70  kReferenceQueueWeakReferencesLock,
71  kReferenceQueueClearedReferencesLock,
72  kReferenceProcessorLock,
73  kJitCodeCacheLock,
74  kRosAllocGlobalLock,
75  kRosAllocBracketLock,
76  kRosAllocBulkFreeLock,
77  kAllocSpaceLock,
78  kBumpPointerSpaceBlockLock,
79  kArenaPoolLock,
80  kDexFileMethodInlinerLock,
81  kDexFileToMethodInlinerMapLock,
82  kMarkSweepMarkStackLock,
83  kInternTableLock,
84  kOatFileSecondaryLookupLock,
85  kTracingUniqueMethodsLock,
86  kTracingStreamingLock,
87  kDefaultMutexLevel,
88  kMarkSweepLargeObjectLock,
89  kPinTableLock,
90  kJdwpObjectRegistryLock,
91  kModifyLdtLock,
92  kAllocatedThreadIdsLock,
93  kMonitorPoolLock,
94  kMethodVerifiersLock,
95  kClassLinkerClassesLock,
96  kBreakpointLock,
97  kMonitorLock,
98  kMonitorListLock,
99  kJniLoadLibraryLock,
100  kThreadListLock,
101  kAllocTrackerLock,
102  kDeoptimizationLock,
103  kProfilerLock,
104  kJdwpShutdownLock,
105  kJdwpEventListLock,
106  kJdwpAttachLock,
107  kJdwpStartLock,
108  kRuntimeShutdownLock,
109  kTraceLock,
110  kHeapBitmapLock,
111  kMutatorLock,
112  kInstrumentEntrypointsLock,
113  kZygoteCreationLock,
114
115  kLockLevelCount  // Must come last.
116};
117std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
118
119const bool kDebugLocking = kIsDebugBuild;
120
121// Record Log contention information, dumpable via SIGQUIT.
122#ifdef ART_USE_FUTEXES
123// To enable lock contention logging, set this to true.
124const bool kLogLockContentions = false;
125#else
126// Keep this false as lock contention logging is supported only with
127// futex.
128const bool kLogLockContentions = false;
129#endif
130const size_t kContentionLogSize = 4;
131const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
132const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
133
134// Base class for all Mutex implementations
135class BaseMutex {
136 public:
137  const char* GetName() const {
138    return name_;
139  }
140
141  virtual bool IsMutex() const { return false; }
142  virtual bool IsReaderWriterMutex() const { return false; }
143  virtual bool IsMutatorMutex() const { return false; }
144
145  virtual void Dump(std::ostream& os) const = 0;
146
147  static void DumpAll(std::ostream& os);
148
149 protected:
150  friend class ConditionVariable;
151
152  BaseMutex(const char* name, LockLevel level);
153  virtual ~BaseMutex();
154  void RegisterAsLocked(Thread* self);
155  void RegisterAsUnlocked(Thread* self);
156  void CheckSafeToWait(Thread* self);
157
158  friend class ScopedContentionRecorder;
159
160  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
161  void DumpContention(std::ostream& os) const;
162
163  const LockLevel level_;  // Support for lock hierarchy.
164  const char* const name_;
165
166  // A log entry that records contention but makes no guarantee that either tid will be held live.
167  struct ContentionLogEntry {
168    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
169    uint64_t blocked_tid;
170    uint64_t owner_tid;
171    AtomicInteger count;
172  };
173  struct ContentionLogData {
174    ContentionLogEntry contention_log[kContentionLogSize];
175    // The next entry in the contention log to be updated. Value ranges from 0 to
176    // kContentionLogSize - 1.
177    AtomicInteger cur_content_log_entry;
178    // Number of times the Mutex has been contended.
179    AtomicInteger contention_count;
180    // Sum of time waited by all contenders in ns.
181    Atomic<uint64_t> wait_time;
182    void AddToWaitTime(uint64_t value);
183    ContentionLogData() : wait_time(0) {}
184  };
185  ContentionLogData contention_log_data_[kContentionLogDataSize];
186
187 public:
188  bool HasEverContended() const {
189    if (kLogLockContentions) {
190      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
191    }
192    return false;
193  }
194};
195
196// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
197// exclusive access to what it guards. A Mutex can be in one of two states:
198// - Free - not owned by any thread,
199// - Exclusive - owned by a single thread.
200//
201// The effect of locking and unlocking operations on the state is:
202// State     | ExclusiveLock | ExclusiveUnlock
203// -------------------------------------------
204// Free      | Exclusive     | error
205// Exclusive | Block*        | Free
206// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
207//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
208std::ostream& operator<<(std::ostream& os, const Mutex& mu);
209class LOCKABLE Mutex : public BaseMutex {
210 public:
211  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
212  ~Mutex();
213
214  virtual bool IsMutex() const { return true; }
215
216  // Block until mutex is free then acquire exclusive access.
217  void ExclusiveLock(Thread* self) ACQUIRE();
218  void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
219
220  // Returns true if acquires exclusive access, false otherwise.
221  bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
222  bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
223
224  // Release exclusive access.
225  void ExclusiveUnlock(Thread* self) RELEASE();
226  void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
227
228  // Is the current thread the exclusive holder of the Mutex.
229  bool IsExclusiveHeld(const Thread* self) const;
230
231  // Assert that the Mutex is exclusively held by the current thread.
232  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
233    if (kDebugLocking && (gAborting == 0)) {
234      CHECK(IsExclusiveHeld(self)) << *this;
235    }
236  }
237  void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
238
239  // Assert that the Mutex is not held by the current thread.
240  void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
241    if (kDebugLocking && (gAborting == 0)) {
242      CHECK(!IsExclusiveHeld(self)) << *this;
243    }
244  }
245  void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
246    AssertNotHeldExclusive(self);
247  }
248
249  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
250  // than the owner.
251  uint64_t GetExclusiveOwnerTid() const;
252
253  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
254  unsigned int GetDepth() const {
255    return recursion_count_;
256  }
257
258  virtual void Dump(std::ostream& os) const;
259
260  // For negative capabilities in clang annotations.
261  const Mutex& operator!() const { return *this; }
262
263 private:
264#if ART_USE_FUTEXES
265  // 0 is unheld, 1 is held.
266  AtomicInteger state_;
267  // Exclusive owner.
268  volatile uint64_t exclusive_owner_;
269  // Number of waiting contenders.
270  AtomicInteger num_contenders_;
271#else
272  pthread_mutex_t mutex_;
273  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
274#endif
275  const bool recursive_;  // Can the lock be recursively held?
276  unsigned int recursion_count_;
277  friend class ConditionVariable;
278  DISALLOW_COPY_AND_ASSIGN(Mutex);
279};
280
281// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
282// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
283// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
284// condition variable. A ReaderWriterMutex can be in one of three states:
285// - Free - not owned by any thread,
286// - Exclusive - owned by a single thread,
287// - Shared(n) - shared amongst n threads.
288//
289// The effect of locking and unlocking operations on the state is:
290//
291// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
292// ----------------------------------------------------------------------------
293// Free      | Exclusive     | error           | SharedLock(1)    | error
294// Exclusive | Block         | Free            | Block            | error
295// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
296// * for large values of n the SharedLock may block.
297std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
298class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
299 public:
300  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
301  ~ReaderWriterMutex();
302
303  virtual bool IsReaderWriterMutex() const { return true; }
304
305  // Block until ReaderWriterMutex is free then acquire exclusive access.
306  void ExclusiveLock(Thread* self) ACQUIRE();
307  void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
308
309  // Release exclusive access.
310  void ExclusiveUnlock(Thread* self) RELEASE();
311  void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
312
313  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
314  // or false if timeout is reached.
315#if HAVE_TIMED_RWLOCK
316  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
317      EXCLUSIVE_TRYLOCK_FUNCTION(true);
318#endif
319
320  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
321  void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
322  void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
323
324  // Try to acquire share of ReaderWriterMutex.
325  bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
326
327  // Release a share of the access.
328  void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
329  void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
330
331  // Is the current thread the exclusive holder of the ReaderWriterMutex.
332  bool IsExclusiveHeld(const Thread* self) const;
333
334  // Assert the current thread has exclusive access to the ReaderWriterMutex.
335  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
336    if (kDebugLocking && (gAborting == 0)) {
337      CHECK(IsExclusiveHeld(self)) << *this;
338    }
339  }
340  void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
341
342  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
343  void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
344    if (kDebugLocking && (gAborting == 0)) {
345      CHECK(!IsExclusiveHeld(self)) << *this;
346    }
347  }
348  void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
349    AssertNotExclusiveHeld(self);
350  }
351
352  // Is the current thread a shared holder of the ReaderWriterMutex.
353  bool IsSharedHeld(const Thread* self) const;
354
355  // Assert the current thread has shared access to the ReaderWriterMutex.
356  void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
357    if (kDebugLocking && (gAborting == 0)) {
358      // TODO: we can only assert this well when self != null.
359      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
360    }
361  }
362  void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
363    AssertSharedHeld(self);
364  }
365
366  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
367  // mode.
368  void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
369    if (kDebugLocking && (gAborting == 0)) {
370      CHECK(!IsSharedHeld(self)) << *this;
371    }
372  }
373
374  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
375  // than the owner.
376  uint64_t GetExclusiveOwnerTid() const;
377
378  virtual void Dump(std::ostream& os) const;
379
380  // For negative capabilities in clang annotations.
381  const ReaderWriterMutex& operator!() const { return *this; }
382
383 private:
384#if ART_USE_FUTEXES
385  // Out-of-inline path for handling contention for a SharedLock.
386  void HandleSharedLockContention(Thread* self, int32_t cur_state);
387
388  // -1 implies held exclusive, +ve shared held by state_ many owners.
389  AtomicInteger state_;
390  // Exclusive owner. Modification guarded by this mutex.
391  volatile uint64_t exclusive_owner_;
392  // Number of contenders waiting for a reader share.
393  AtomicInteger num_pending_readers_;
394  // Number of contenders waiting to be the writer.
395  AtomicInteger num_pending_writers_;
396#else
397  pthread_rwlock_t rwlock_;
398  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
399#endif
400  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
401};
402
403// MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
404// Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
405// thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
406// held by any mutator threads. However, a thread in the kRunnable state is considered to have
407// shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
408// state have associated implications on lock ownership. Extra methods to handle the state
409// transitions have been added to the interface but are only accessible to the methods dealing
410// with state transitions. The thread state and flags attributes are used to ensure thread state
411// transitions are consistent with the permitted behaviour of the mutex.
412//
413// *) The most important consequence of this behaviour is that all threads must be in one of the
414// suspended states before exclusive ownership of the mutator mutex is sought.
415//
416std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
417class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
418 public:
419  explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
420    : ReaderWriterMutex(name, level) {}
421  ~MutatorMutex() {}
422
423  virtual bool IsMutatorMutex() const { return true; }
424
425  // For negative capabilities in clang annotations.
426  const MutatorMutex& operator!() const { return *this; }
427
428 private:
429  friend class Thread;
430  void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
431  void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
432
433  DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
434};
435
436// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
437// (Signal) or all at once (Broadcast).
438class ConditionVariable {
439 public:
440  explicit ConditionVariable(const char* name, Mutex& mutex);
441  ~ConditionVariable();
442
443  void Broadcast(Thread* self);
444  void Signal(Thread* self);
445  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
446  //       pointer copy, thereby defeating annotalysis.
447  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
448  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
449  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
450  // when waiting.
451  // TODO: remove this.
452  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
453
454 private:
455  const char* const name_;
456  // The Mutex being used by waiters. It is an error to mix condition variables between different
457  // Mutexes.
458  Mutex& guard_;
459#if ART_USE_FUTEXES
460  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
461  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
462  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
463  // without guard_ held.
464  AtomicInteger sequence_;
465  // Number of threads that have come into to wait, not the length of the waiters on the futex as
466  // waiters may have been requeued onto guard_. Guarded by guard_.
467  volatile int32_t num_waiters_;
468#else
469  pthread_cond_t cond_;
470#endif
471  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
472};
473
474// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
475// upon destruction.
476class SCOPED_CAPABILITY MutexLock {
477 public:
478  explicit MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
479    mu_.ExclusiveLock(self_);
480  }
481
482  ~MutexLock() RELEASE() {
483    mu_.ExclusiveUnlock(self_);
484  }
485
486 private:
487  Thread* const self_;
488  Mutex& mu_;
489  DISALLOW_COPY_AND_ASSIGN(MutexLock);
490};
491// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
492#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
493
494// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
495// construction and releases it upon destruction.
496class SCOPED_CAPABILITY ReaderMutexLock {
497 public:
498  explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) :
499      self_(self), mu_(mu) {
500    mu_.SharedLock(self_);
501  }
502
503  ~ReaderMutexLock() RELEASE() {
504    mu_.SharedUnlock(self_);
505  }
506
507 private:
508  Thread* const self_;
509  ReaderWriterMutex& mu_;
510  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
511};
512// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
513// "ReaderMutexLock mu(lock)".
514#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
515
516// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
517// construction and releases it upon destruction.
518class SCOPED_CAPABILITY WriterMutexLock {
519 public:
520  explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
521      self_(self), mu_(mu) {
522    mu_.ExclusiveLock(self_);
523  }
524
525  ~WriterMutexLock() UNLOCK_FUNCTION() {
526    mu_.ExclusiveUnlock(self_);
527  }
528
529 private:
530  Thread* const self_;
531  ReaderWriterMutex& mu_;
532  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
533};
534// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
535// "WriterMutexLock mu(lock)".
536#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
537
538// For StartNoThreadSuspension and EndNoThreadSuspension.
539class CAPABILITY("role") Role {
540 public:
541  void Acquire() ACQUIRE() {}
542  void Release() RELEASE() {}
543  const Role& operator!() const { return *this; }
544};
545
546class Uninterruptible : public Role {
547};
548
549// Global mutexes corresponding to the levels above.
550class Locks {
551 public:
552  static void Init();
553  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
554  // Guards allocation entrypoint instrumenting.
555  static Mutex* instrument_entrypoints_lock_;
556
557  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
558  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
559  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
560  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
561  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
562  //
563  // Thread suspension:
564  // mutator thread                                | GC/Debugger
565  //   .. running ..                               |   .. running ..
566  //   .. running ..                               | Request thread suspension by:
567  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
568  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
569  //   .. running ..                               |     all mutator threads
570  //   .. running ..                               |   - releasing thread_suspend_count_lock_
571  //   .. running ..                               | Block wait for all threads to pass a barrier
572  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
573  // suspend code.                                 |   .. blocked ..
574  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
575  // x: Acquire thread_suspend_count_lock_         |   .. running ..
576  // while Thread::suspend_count_ > 0              |   .. running ..
577  //   - wait on Thread::resume_cond_              |   .. running ..
578  //     (releases thread_suspend_count_lock_)     |   .. running ..
579  //   .. waiting ..                               | Request thread resumption by:
580  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
581  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
582  //   .. waiting ..                               |     all mutator threads
583  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
584  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
585  // Release thread_suspend_count_lock_            |  .. running ..
586  // Change to kRunnable                           |  .. running ..
587  //  - this uses a CAS operation to ensure the    |  .. running ..
588  //    suspend request flag isn't raised as the   |  .. running ..
589  //    state is changed                           |  .. running ..
590  //  - if the CAS operation fails then goto x     |  .. running ..
591  //  .. running ..                                |  .. running ..
592  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
593
594  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
595  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
596
597  // Guards shutdown of the runtime.
598  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
599
600  // Guards background profiler global state.
601  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
602
603  // Guards trace (ie traceview) requests.
604  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
605
606  // Guards debugger recent allocation records.
607  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
608
609  // Guards updates to instrumentation to ensure mutual exclusion of
610  // events like deoptimization requests.
611  // TODO: improve name, perhaps instrumentation_update_lock_.
612  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
613
614  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
615  // attaching and detaching.
616  static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
617
618  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
619  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
620
621  // Guards maintaining loading library data structures.
622  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
623
624  // Guards breakpoints.
625  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
626
627  // Guards lists of classes within the class linker.
628  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
629
630  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
631  // doesn't try to hold a higher level Mutex.
632  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
633
634  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
635
636  // Guard the allocation/deallocation of thread ids.
637  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
638
639  // Guards modification of the LDT on x86.
640  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
641
642  // Guards intern table.
643  static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
644
645  // Guards reference processor.
646  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
647
648  // Guards cleared references queue.
649  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
650
651  // Guards weak references queue.
652  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
653
654  // Guards finalizer references queue.
655  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
656
657  // Guards phantom references queue.
658  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
659
660  // Guards soft references queue.
661  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
662
663  // Have an exclusive aborting thread.
664  static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
665
666  // Allow mutual exclusion when manipulating Thread::suspend_count_.
667  // TODO: Does the trade-off of a per-thread lock make sense?
668  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
669
670  // One unexpected signal at a time lock.
671  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
672
673  // Guards the maps in mem_map.
674  static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
675
676  // Have an exclusive logging thread.
677  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
678
679  // Allow reader-writer mutual exclusion on the boxed table of lambda objects.
680  // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
681  static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
682};
683
684class Roles {
685 public:
686  // Uninterruptible means that the thread may not become suspended.
687  static Uninterruptible uninterruptible_;
688};
689
690}  // namespace art
691
692#endif  // ART_RUNTIME_BASE_MUTEX_H_
693