mutex.h revision e2facc5b18cd756a8b5500fb3d90da69c9ee0fb7
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class LOCKABLE ReaderWriterMutex;
47class LOCKABLE MutatorMutex;
48class ScopedContentionRecorder;
49class Thread;
50
51// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
52// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
53// partial ordering and thereby cause deadlock situations to fail checks.
54//
55// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
56enum LockLevel {
57  kLoggingLock = 0,
58  kMemMapsLock,
59  kSwapMutexesLock,
60  kUnexpectedSignalLock,
61  kThreadSuspendCountLock,
62  kAbortLock,
63  kLambdaTableLock,
64  kJdwpSocketLock,
65  kRegionSpaceRegionLock,
66  kTransactionLogLock,
67  kReferenceQueueSoftReferencesLock,
68  kReferenceQueuePhantomReferencesLock,
69  kReferenceQueueFinalizerReferencesLock,
70  kReferenceQueueWeakReferencesLock,
71  kReferenceQueueClearedReferencesLock,
72  kReferenceProcessorLock,
73  kJitCodeCacheLock,
74  kRosAllocGlobalLock,
75  kRosAllocBracketLock,
76  kRosAllocBulkFreeLock,
77  kAllocSpaceLock,
78  kBumpPointerSpaceBlockLock,
79  kArenaPoolLock,
80  kDexFileMethodInlinerLock,
81  kDexFileToMethodInlinerMapLock,
82  kMarkSweepMarkStackLock,
83  kInternTableLock,
84  kOatFileSecondaryLookupLock,
85  kTracingUniqueMethodsLock,
86  kTracingStreamingLock,
87  kDefaultMutexLevel,
88  kMarkSweepLargeObjectLock,
89  kPinTableLock,
90  kJdwpObjectRegistryLock,
91  kModifyLdtLock,
92  kAllocatedThreadIdsLock,
93  kMonitorPoolLock,
94  kMethodVerifiersLock,
95  kClassLinkerClassesLock,
96  kBreakpointLock,
97  kMonitorLock,
98  kMonitorListLock,
99  kJniLoadLibraryLock,
100  kThreadListLock,
101  kAllocTrackerLock,
102  kDeoptimizationLock,
103  kProfilerLock,
104  kJdwpShutdownLock,
105  kJdwpEventListLock,
106  kJdwpAttachLock,
107  kJdwpStartLock,
108  kRuntimeShutdownLock,
109  kTraceLock,
110  kHeapBitmapLock,
111  kMutatorLock,
112  kInstrumentEntrypointsLock,
113  kZygoteCreationLock,
114
115  kLockLevelCount  // Must come last.
116};
117std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
118
119const bool kDebugLocking = kIsDebugBuild;
120
121// Record Log contention information, dumpable via SIGQUIT.
122#ifdef ART_USE_FUTEXES
123// To enable lock contention logging, set this to true.
124const bool kLogLockContentions = false;
125#else
126// Keep this false as lock contention logging is supported only with
127// futex.
128const bool kLogLockContentions = false;
129#endif
130const size_t kContentionLogSize = 4;
131const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
132const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
133
134// Base class for all Mutex implementations
135class BaseMutex {
136 public:
137  const char* GetName() const {
138    return name_;
139  }
140
141  virtual bool IsMutex() const { return false; }
142  virtual bool IsReaderWriterMutex() const { return false; }
143  virtual bool IsMutatorMutex() const { return false; }
144
145  virtual void Dump(std::ostream& os) const = 0;
146
147  static void DumpAll(std::ostream& os);
148
149 protected:
150  friend class ConditionVariable;
151
152  BaseMutex(const char* name, LockLevel level);
153  virtual ~BaseMutex();
154  void RegisterAsLocked(Thread* self);
155  void RegisterAsUnlocked(Thread* self);
156  void CheckSafeToWait(Thread* self);
157
158  friend class ScopedContentionRecorder;
159
160  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
161  void DumpContention(std::ostream& os) const;
162
163  const LockLevel level_;  // Support for lock hierarchy.
164  const char* const name_;
165
166  // A log entry that records contention but makes no guarantee that either tid will be held live.
167  struct ContentionLogEntry {
168    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
169    uint64_t blocked_tid;
170    uint64_t owner_tid;
171    AtomicInteger count;
172  };
173  struct ContentionLogData {
174    ContentionLogEntry contention_log[kContentionLogSize];
175    // The next entry in the contention log to be updated. Value ranges from 0 to
176    // kContentionLogSize - 1.
177    AtomicInteger cur_content_log_entry;
178    // Number of times the Mutex has been contended.
179    AtomicInteger contention_count;
180    // Sum of time waited by all contenders in ns.
181    Atomic<uint64_t> wait_time;
182    void AddToWaitTime(uint64_t value);
183    ContentionLogData() : wait_time(0) {}
184  };
185  ContentionLogData contention_log_data_[kContentionLogDataSize];
186
187 public:
188  bool HasEverContended() const {
189    if (kLogLockContentions) {
190      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
191    }
192    return false;
193  }
194};
195
196// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
197// exclusive access to what it guards. A Mutex can be in one of two states:
198// - Free - not owned by any thread,
199// - Exclusive - owned by a single thread.
200//
201// The effect of locking and unlocking operations on the state is:
202// State     | ExclusiveLock | ExclusiveUnlock
203// -------------------------------------------
204// Free      | Exclusive     | error
205// Exclusive | Block*        | Free
206// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
207//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
208std::ostream& operator<<(std::ostream& os, const Mutex& mu);
209class LOCKABLE Mutex : public BaseMutex {
210 public:
211  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
212  ~Mutex();
213
214  virtual bool IsMutex() const { return true; }
215
216  // Block until mutex is free then acquire exclusive access.
217  void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
218  void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
219
220  // Returns true if acquires exclusive access, false otherwise.
221  bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
222  bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); }
223
224  // Release exclusive access.
225  void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
226  void Unlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
227
228  // Is the current thread the exclusive holder of the Mutex.
229  bool IsExclusiveHeld(const Thread* self) const;
230
231  // Assert that the Mutex is exclusively held by the current thread.
232  void AssertExclusiveHeld(const Thread* self) {
233    if (kDebugLocking && (gAborting == 0)) {
234      CHECK(IsExclusiveHeld(self)) << *this;
235    }
236  }
237  void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); }
238
239  // Assert that the Mutex is not held by the current thread.
240  void AssertNotHeldExclusive(const Thread* self) {
241    if (kDebugLocking && (gAborting == 0)) {
242      CHECK(!IsExclusiveHeld(self)) << *this;
243    }
244  }
245  void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); }
246
247  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
248  // than the owner.
249  uint64_t GetExclusiveOwnerTid() const;
250
251  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
252  unsigned int GetDepth() const {
253    return recursion_count_;
254  }
255
256  virtual void Dump(std::ostream& os) const;
257
258 private:
259#if ART_USE_FUTEXES
260  // 0 is unheld, 1 is held.
261  AtomicInteger state_;
262  // Exclusive owner.
263  volatile uint64_t exclusive_owner_;
264  // Number of waiting contenders.
265  AtomicInteger num_contenders_;
266#else
267  pthread_mutex_t mutex_;
268  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
269#endif
270  const bool recursive_;  // Can the lock be recursively held?
271  unsigned int recursion_count_;
272  friend class ConditionVariable;
273  DISALLOW_COPY_AND_ASSIGN(Mutex);
274};
275
276// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
277// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
278// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
279// condition variable. A ReaderWriterMutex can be in one of three states:
280// - Free - not owned by any thread,
281// - Exclusive - owned by a single thread,
282// - Shared(n) - shared amongst n threads.
283//
284// The effect of locking and unlocking operations on the state is:
285//
286// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
287// ----------------------------------------------------------------------------
288// Free      | Exclusive     | error           | SharedLock(1)    | error
289// Exclusive | Block         | Free            | Block            | error
290// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
291// * for large values of n the SharedLock may block.
292std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
293class LOCKABLE ReaderWriterMutex : public BaseMutex {
294 public:
295  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
296  ~ReaderWriterMutex();
297
298  virtual bool IsReaderWriterMutex() const { return true; }
299
300  // Block until ReaderWriterMutex is free then acquire exclusive access.
301  void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
302  void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
303
304  // Release exclusive access.
305  void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
306  void WriterUnlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
307
308  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
309  // or false if timeout is reached.
310#if HAVE_TIMED_RWLOCK
311  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
312      EXCLUSIVE_TRYLOCK_FUNCTION(true);
313#endif
314
315  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
316  void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
317  void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
318
319  // Try to acquire share of ReaderWriterMutex.
320  bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
321
322  // Release a share of the access.
323  void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
324  void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
325
326  // Is the current thread the exclusive holder of the ReaderWriterMutex.
327  bool IsExclusiveHeld(const Thread* self) const;
328
329  // Assert the current thread has exclusive access to the ReaderWriterMutex.
330  void AssertExclusiveHeld(const Thread* self) {
331    if (kDebugLocking && (gAborting == 0)) {
332      CHECK(IsExclusiveHeld(self)) << *this;
333    }
334  }
335  void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
336
337  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
338  void AssertNotExclusiveHeld(const Thread* self) {
339    if (kDebugLocking && (gAborting == 0)) {
340      CHECK(!IsExclusiveHeld(self)) << *this;
341    }
342  }
343  void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
344
345  // Is the current thread a shared holder of the ReaderWriterMutex.
346  bool IsSharedHeld(const Thread* self) const;
347
348  // Assert the current thread has shared access to the ReaderWriterMutex.
349  void AssertSharedHeld(const Thread* self) {
350    if (kDebugLocking && (gAborting == 0)) {
351      // TODO: we can only assert this well when self != null.
352      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
353    }
354  }
355  void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
356
357  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
358  // mode.
359  void AssertNotHeld(const Thread* self) {
360    if (kDebugLocking && (gAborting == 0)) {
361      CHECK(!IsSharedHeld(self)) << *this;
362    }
363  }
364
365  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
366  // than the owner.
367  uint64_t GetExclusiveOwnerTid() const;
368
369  virtual void Dump(std::ostream& os) const;
370
371 private:
372#if ART_USE_FUTEXES
373  // Out-of-inline path for handling contention for a SharedLock.
374  void HandleSharedLockContention(Thread* self, int32_t cur_state);
375
376  // -1 implies held exclusive, +ve shared held by state_ many owners.
377  AtomicInteger state_;
378  // Exclusive owner. Modification guarded by this mutex.
379  volatile uint64_t exclusive_owner_;
380  // Number of contenders waiting for a reader share.
381  AtomicInteger num_pending_readers_;
382  // Number of contenders waiting to be the writer.
383  AtomicInteger num_pending_writers_;
384#else
385  pthread_rwlock_t rwlock_;
386  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
387#endif
388  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
389};
390
391// MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
392// Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
393// thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
394// held by any mutator threads. However, a thread in the kRunnable state is considered to have
395// shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
396// state have associated implications on lock ownership. Extra methods to handle the state
397// transitions have been added to the interface but are only accessible to the methods dealing
398// with state transitions. The thread state and flags attributes are used to ensure thread state
399// transitions are consistent with the permitted behaviour of the mutex.
400//
401// *) The most important consequence of this behaviour is that all threads must be in one of the
402// suspended states before exclusive ownership of the mutator mutex is sought.
403//
404std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
405class LOCKABLE MutatorMutex : public ReaderWriterMutex {
406 public:
407  explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
408    : ReaderWriterMutex(name, level) {}
409  ~MutatorMutex() {}
410
411  virtual bool IsMutatorMutex() const { return true; }
412
413 private:
414  friend class Thread;
415  void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
416  void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
417
418  DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
419};
420
421// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
422// (Signal) or all at once (Broadcast).
423class ConditionVariable {
424 public:
425  explicit ConditionVariable(const char* name, Mutex& mutex);
426  ~ConditionVariable();
427
428  void Broadcast(Thread* self);
429  void Signal(Thread* self);
430  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
431  //       pointer copy, thereby defeating annotalysis.
432  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
433  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
434  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
435  // when waiting.
436  // TODO: remove this.
437  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
438
439 private:
440  const char* const name_;
441  // The Mutex being used by waiters. It is an error to mix condition variables between different
442  // Mutexes.
443  Mutex& guard_;
444#if ART_USE_FUTEXES
445  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
446  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
447  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
448  // without guard_ held.
449  AtomicInteger sequence_;
450  // Number of threads that have come into to wait, not the length of the waiters on the futex as
451  // waiters may have been requeued onto guard_. Guarded by guard_.
452  volatile int32_t num_waiters_;
453#else
454  pthread_cond_t cond_;
455#endif
456  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
457};
458
459// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
460// upon destruction.
461class SCOPED_LOCKABLE MutexLock {
462 public:
463  explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) {
464    mu_.ExclusiveLock(self_);
465  }
466
467  ~MutexLock() UNLOCK_FUNCTION() {
468    mu_.ExclusiveUnlock(self_);
469  }
470
471 private:
472  Thread* const self_;
473  Mutex& mu_;
474  DISALLOW_COPY_AND_ASSIGN(MutexLock);
475};
476// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
477#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
478
479// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
480// construction and releases it upon destruction.
481class SCOPED_LOCKABLE ReaderMutexLock {
482 public:
483  explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
484      self_(self), mu_(mu) {
485    mu_.SharedLock(self_);
486  }
487
488  ~ReaderMutexLock() UNLOCK_FUNCTION() {
489    mu_.SharedUnlock(self_);
490  }
491
492 private:
493  Thread* const self_;
494  ReaderWriterMutex& mu_;
495  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
496};
497// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
498// "ReaderMutexLock mu(lock)".
499#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
500
501// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
502// construction and releases it upon destruction.
503class SCOPED_LOCKABLE WriterMutexLock {
504 public:
505  explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
506      self_(self), mu_(mu) {
507    mu_.ExclusiveLock(self_);
508  }
509
510  ~WriterMutexLock() UNLOCK_FUNCTION() {
511    mu_.ExclusiveUnlock(self_);
512  }
513
514 private:
515  Thread* const self_;
516  ReaderWriterMutex& mu_;
517  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
518};
519// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
520// "WriterMutexLock mu(lock)".
521#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
522
523// Global mutexes corresponding to the levels above.
524class Locks {
525 public:
526  static void Init();
527  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
528  // Guards allocation entrypoint instrumenting.
529  static Mutex* instrument_entrypoints_lock_;
530
531  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
532  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
533  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
534  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
535  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
536  //
537  // Thread suspension:
538  // mutator thread                                | GC/Debugger
539  //   .. running ..                               |   .. running ..
540  //   .. running ..                               | Request thread suspension by:
541  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
542  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
543  //   .. running ..                               |     all mutator threads
544  //   .. running ..                               |   - releasing thread_suspend_count_lock_
545  //   .. running ..                               | Block wait for all threads to pass a barrier
546  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
547  // suspend code.                                 |   .. blocked ..
548  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
549  // x: Acquire thread_suspend_count_lock_         |   .. running ..
550  // while Thread::suspend_count_ > 0              |   .. running ..
551  //   - wait on Thread::resume_cond_              |   .. running ..
552  //     (releases thread_suspend_count_lock_)     |   .. running ..
553  //   .. waiting ..                               | Request thread resumption by:
554  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
555  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
556  //   .. waiting ..                               |     all mutator threads
557  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
558  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
559  // Release thread_suspend_count_lock_            |  .. running ..
560  // Change to kRunnable                           |  .. running ..
561  //  - this uses a CAS operation to ensure the    |  .. running ..
562  //    suspend request flag isn't raised as the   |  .. running ..
563  //    state is changed                           |  .. running ..
564  //  - if the CAS operation fails then goto x     |  .. running ..
565  //  .. running ..                                |  .. running ..
566  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
567
568  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
569  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
570
571  // Guards shutdown of the runtime.
572  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
573
574  // Guards background profiler global state.
575  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
576
577  // Guards trace (ie traceview) requests.
578  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
579
580  // Guards debugger recent allocation records.
581  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
582
583  // Guards updates to instrumentation to ensure mutual exclusion of
584  // events like deoptimization requests.
585  // TODO: improve name, perhaps instrumentation_update_lock_.
586  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
587
588  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
589  // attaching and detaching.
590  static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
591
592  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
593  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
594
595  // Guards maintaining loading library data structures.
596  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
597
598  // Guards breakpoints.
599  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
600
601  // Guards lists of classes within the class linker.
602  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
603
604  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
605  // doesn't try to hold a higher level Mutex.
606  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
607
608  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
609
610  // Guard the allocation/deallocation of thread ids.
611  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
612
613  // Guards modification of the LDT on x86.
614  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
615
616  // Guards intern table.
617  static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
618
619  // Guards reference processor.
620  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
621
622  // Guards cleared references queue.
623  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
624
625  // Guards weak references queue.
626  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
627
628  // Guards finalizer references queue.
629  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
630
631  // Guards phantom references queue.
632  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
633
634  // Guards soft references queue.
635  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
636
637  // Have an exclusive aborting thread.
638  static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
639
640  // Allow mutual exclusion when manipulating Thread::suspend_count_.
641  // TODO: Does the trade-off of a per-thread lock make sense?
642  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
643
644  // One unexpected signal at a time lock.
645  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
646
647  // Guards the maps in mem_map.
648  static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
649
650  // Have an exclusive logging thread.
651  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
652
653  // Allow reader-writer mutual exclusion on the boxed table of lambda objects.
654  // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
655  static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
656};
657
658}  // namespace art
659
660#endif  // ART_RUNTIME_BASE_MUTEX_H_
661