mutex.h revision 59d9d668d4f4286813afe2b4e7c6db839222ce96
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class LOCKABLE ReaderWriterMutex;
47class ScopedContentionRecorder;
48class Thread;
49
50// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
51// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
52// partial ordering and thereby cause deadlock situations to fail checks.
53//
54// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
55enum LockLevel {
56  kLoggingLock = 0,
57  kMemMapsLock,
58  kSwapMutexesLock,
59  kUnexpectedSignalLock,
60  kThreadSuspendCountLock,
61  kAbortLock,
62  kJdwpSocketLock,
63  kRosAllocGlobalLock,
64  kRosAllocBracketLock,
65  kRosAllocBulkFreeLock,
66  kAllocSpaceLock,
67  kReferenceProcessorLock,
68  kDexFileMethodInlinerLock,
69  kDexFileToMethodInlinerMapLock,
70  kMarkSweepMarkStackLock,
71  kTransactionLogLock,
72  kInternTableLock,
73  kOatFileSecondaryLookupLock,
74  kDefaultMutexLevel,
75  kMarkSweepLargeObjectLock,
76  kPinTableLock,
77  kLoadLibraryLock,
78  kJdwpObjectRegistryLock,
79  kModifyLdtLock,
80  kAllocatedThreadIdsLock,
81  kMonitorPoolLock,
82  kClassLinkerClassesLock,
83  kBreakpointLock,
84  kMonitorLock,
85  kMonitorListLock,
86  kThreadListLock,
87  kBreakpointInvokeLock,
88  kDeoptimizationLock,
89  kTraceLock,
90  kProfilerLock,
91  kJdwpEventListLock,
92  kJdwpAttachLock,
93  kJdwpStartLock,
94  kRuntimeShutdownLock,
95  kHeapBitmapLock,
96  kMutatorLock,
97  kThreadListSuspendThreadLock,
98  kZygoteCreationLock,
99
100  kLockLevelCount  // Must come last.
101};
102std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
103
104const bool kDebugLocking = kIsDebugBuild;
105
106// Record Log contention information, dumpable via SIGQUIT.
107#ifdef ART_USE_FUTEXES
108// To enable lock contention logging, set this to true.
109const bool kLogLockContentions = false;
110#else
111// Keep this false as lock contention logging is supported only with
112// futex.
113const bool kLogLockContentions = false;
114#endif
115const size_t kContentionLogSize = 4;
116const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
117const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
118
119// Base class for all Mutex implementations
120class BaseMutex {
121 public:
122  const char* GetName() const {
123    return name_;
124  }
125
126  virtual bool IsMutex() const { return false; }
127  virtual bool IsReaderWriterMutex() const { return false; }
128
129  virtual void Dump(std::ostream& os) const = 0;
130
131  static void DumpAll(std::ostream& os);
132
133 protected:
134  friend class ConditionVariable;
135
136  BaseMutex(const char* name, LockLevel level);
137  virtual ~BaseMutex();
138  void RegisterAsLocked(Thread* self);
139  void RegisterAsUnlocked(Thread* self);
140  void CheckSafeToWait(Thread* self);
141
142  friend class ScopedContentionRecorder;
143
144  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
145  void DumpContention(std::ostream& os) const;
146
147  const LockLevel level_;  // Support for lock hierarchy.
148  const char* const name_;
149
150  // A log entry that records contention but makes no guarantee that either tid will be held live.
151  struct ContentionLogEntry {
152    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
153    uint64_t blocked_tid;
154    uint64_t owner_tid;
155    AtomicInteger count;
156  };
157  struct ContentionLogData {
158    ContentionLogEntry contention_log[kContentionLogSize];
159    // The next entry in the contention log to be updated. Value ranges from 0 to
160    // kContentionLogSize - 1.
161    AtomicInteger cur_content_log_entry;
162    // Number of times the Mutex has been contended.
163    AtomicInteger contention_count;
164    // Sum of time waited by all contenders in ns.
165    Atomic<uint64_t> wait_time;
166    void AddToWaitTime(uint64_t value);
167    ContentionLogData() : wait_time(0) {}
168  };
169  ContentionLogData contention_log_data_[kContentionLogDataSize];
170
171 public:
172  bool HasEverContended() const {
173    if (kLogLockContentions) {
174      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
175    }
176    return false;
177  }
178};
179
180// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
181// exclusive access to what it guards. A Mutex can be in one of two states:
182// - Free - not owned by any thread,
183// - Exclusive - owned by a single thread.
184//
185// The effect of locking and unlocking operations on the state is:
186// State     | ExclusiveLock | ExclusiveUnlock
187// -------------------------------------------
188// Free      | Exclusive     | error
189// Exclusive | Block*        | Free
190// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
191//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
192std::ostream& operator<<(std::ostream& os, const Mutex& mu);
193class LOCKABLE Mutex : public BaseMutex {
194 public:
195  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
196  ~Mutex();
197
198  virtual bool IsMutex() const { return true; }
199
200  // Block until mutex is free then acquire exclusive access.
201  void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
202  void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
203
204  // Returns true if acquires exclusive access, false otherwise.
205  bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
206  bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); }
207
208  // Release exclusive access.
209  void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
210  void Unlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
211
212  // Is the current thread the exclusive holder of the Mutex.
213  bool IsExclusiveHeld(const Thread* self) const;
214
215  // Assert that the Mutex is exclusively held by the current thread.
216  void AssertExclusiveHeld(const Thread* self) {
217    if (kDebugLocking && (gAborting == 0)) {
218      CHECK(IsExclusiveHeld(self)) << *this;
219    }
220  }
221  void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); }
222
223  // Assert that the Mutex is not held by the current thread.
224  void AssertNotHeldExclusive(const Thread* self) {
225    if (kDebugLocking && (gAborting == 0)) {
226      CHECK(!IsExclusiveHeld(self)) << *this;
227    }
228  }
229  void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); }
230
231  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
232  // than the owner.
233  uint64_t GetExclusiveOwnerTid() const;
234
235  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
236  unsigned int GetDepth() const {
237    return recursion_count_;
238  }
239
240  virtual void Dump(std::ostream& os) const;
241
242 private:
243#if ART_USE_FUTEXES
244  // 0 is unheld, 1 is held.
245  AtomicInteger state_;
246  // Exclusive owner.
247  volatile uint64_t exclusive_owner_;
248  // Number of waiting contenders.
249  AtomicInteger num_contenders_;
250#else
251  pthread_mutex_t mutex_;
252  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
253#endif
254  const bool recursive_;  // Can the lock be recursively held?
255  unsigned int recursion_count_;
256  friend class ConditionVariable;
257  DISALLOW_COPY_AND_ASSIGN(Mutex);
258};
259
260// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
261// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
262// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
263// condition variable. A ReaderWriterMutex can be in one of three states:
264// - Free - not owned by any thread,
265// - Exclusive - owned by a single thread,
266// - Shared(n) - shared amongst n threads.
267//
268// The effect of locking and unlocking operations on the state is:
269//
270// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
271// ----------------------------------------------------------------------------
272// Free      | Exclusive     | error           | SharedLock(1)    | error
273// Exclusive | Block         | Free            | Block            | error
274// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
275// * for large values of n the SharedLock may block.
276std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
277class LOCKABLE ReaderWriterMutex : public BaseMutex {
278 public:
279  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
280  ~ReaderWriterMutex();
281
282  virtual bool IsReaderWriterMutex() const { return true; }
283
284  // Block until ReaderWriterMutex is free then acquire exclusive access.
285  void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
286  void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
287
288  // Release exclusive access.
289  void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
290  void WriterUnlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
291
292  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
293  // or false if timeout is reached.
294#if HAVE_TIMED_RWLOCK
295  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
296      EXCLUSIVE_TRYLOCK_FUNCTION(true);
297#endif
298
299  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
300  void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
301  void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
302
303  // Try to acquire share of ReaderWriterMutex.
304  bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
305
306  // Release a share of the access.
307  void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
308  void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
309
310  // Is the current thread the exclusive holder of the ReaderWriterMutex.
311  bool IsExclusiveHeld(const Thread* self) const;
312
313  // Assert the current thread has exclusive access to the ReaderWriterMutex.
314  void AssertExclusiveHeld(const Thread* self) {
315    if (kDebugLocking && (gAborting == 0)) {
316      CHECK(IsExclusiveHeld(self)) << *this;
317    }
318  }
319  void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
320
321  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
322  void AssertNotExclusiveHeld(const Thread* self) {
323    if (kDebugLocking && (gAborting == 0)) {
324      CHECK(!IsExclusiveHeld(self)) << *this;
325    }
326  }
327  void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
328
329  // Is the current thread a shared holder of the ReaderWriterMutex.
330  bool IsSharedHeld(const Thread* self) const;
331
332  // Assert the current thread has shared access to the ReaderWriterMutex.
333  void AssertSharedHeld(const Thread* self) {
334    if (kDebugLocking && (gAborting == 0)) {
335      // TODO: we can only assert this well when self != NULL.
336      CHECK(IsSharedHeld(self) || self == NULL) << *this;
337    }
338  }
339  void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
340
341  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
342  // mode.
343  void AssertNotHeld(const Thread* self) {
344    if (kDebugLocking && (gAborting == 0)) {
345      CHECK(!IsSharedHeld(self)) << *this;
346    }
347  }
348
349  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
350  // than the owner.
351  uint64_t GetExclusiveOwnerTid() const;
352
353  virtual void Dump(std::ostream& os) const;
354
355 private:
356#if ART_USE_FUTEXES
357  // -1 implies held exclusive, +ve shared held by state_ many owners.
358  AtomicInteger state_;
359  // Exclusive owner. Modification guarded by this mutex.
360  volatile uint64_t exclusive_owner_;
361  // Number of contenders waiting for a reader share.
362  AtomicInteger num_pending_readers_;
363  // Number of contenders waiting to be the writer.
364  AtomicInteger num_pending_writers_;
365#else
366  pthread_rwlock_t rwlock_;
367  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
368#endif
369  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
370};
371
372// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
373// (Signal) or all at once (Broadcast).
374class ConditionVariable {
375 public:
376  explicit ConditionVariable(const char* name, Mutex& mutex);
377  ~ConditionVariable();
378
379  void Broadcast(Thread* self);
380  void Signal(Thread* self);
381  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
382  //       pointer copy, thereby defeating annotalysis.
383  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
384  void TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
385  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
386  // when waiting.
387  // TODO: remove this.
388  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
389
390 private:
391  const char* const name_;
392  // The Mutex being used by waiters. It is an error to mix condition variables between different
393  // Mutexes.
394  Mutex& guard_;
395#if ART_USE_FUTEXES
396  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
397  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
398  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
399  // without guard_ held.
400  AtomicInteger sequence_;
401  // Number of threads that have come into to wait, not the length of the waiters on the futex as
402  // waiters may have been requeued onto guard_. Guarded by guard_.
403  volatile int32_t num_waiters_;
404#else
405  pthread_cond_t cond_;
406#endif
407  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
408};
409
410// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
411// upon destruction.
412class SCOPED_LOCKABLE MutexLock {
413 public:
414  explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) {
415    mu_.ExclusiveLock(self_);
416  }
417
418  ~MutexLock() UNLOCK_FUNCTION() {
419    mu_.ExclusiveUnlock(self_);
420  }
421
422 private:
423  Thread* const self_;
424  Mutex& mu_;
425  DISALLOW_COPY_AND_ASSIGN(MutexLock);
426};
427// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
428#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_declaration_missing_variable_name)
429
430// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
431// construction and releases it upon destruction.
432class SCOPED_LOCKABLE ReaderMutexLock {
433 public:
434  explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
435      self_(self), mu_(mu) {
436    mu_.SharedLock(self_);
437  }
438
439  ~ReaderMutexLock() UNLOCK_FUNCTION() {
440    mu_.SharedUnlock(self_);
441  }
442
443 private:
444  Thread* const self_;
445  ReaderWriterMutex& mu_;
446  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
447};
448// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
449// "ReaderMutexLock mu(lock)".
450#define ReaderMutexLock(x) COMPILE_ASSERT(0, reader_mutex_lock_declaration_missing_variable_name)
451
452// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
453// construction and releases it upon destruction.
454class SCOPED_LOCKABLE WriterMutexLock {
455 public:
456  explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
457      self_(self), mu_(mu) {
458    mu_.ExclusiveLock(self_);
459  }
460
461  ~WriterMutexLock() UNLOCK_FUNCTION() {
462    mu_.ExclusiveUnlock(self_);
463  }
464
465 private:
466  Thread* const self_;
467  ReaderWriterMutex& mu_;
468  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
469};
470// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
471// "WriterMutexLock mu(lock)".
472#define WriterMutexLock(x) COMPILE_ASSERT(0, writer_mutex_lock_declaration_missing_variable_name)
473
474// Global mutexes corresponding to the levels above.
475class Locks {
476 public:
477  static void Init();
478
479  // There's a potential race for two threads to try to suspend each other and for both of them
480  // to succeed and get blocked becoming runnable. This lock ensures that only one thread is
481  // requesting suspension of another at any time. As the the thread list suspend thread logic
482  // transitions to runnable, if the current thread were tried to be suspended then this thread
483  // would block holding this lock until it could safely request thread suspension of the other
484  // thread without that thread having a suspension request against this thread. This avoids a
485  // potential deadlock cycle.
486  static Mutex* thread_list_suspend_thread_lock_;
487
488  // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
489  // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
490  // a share on the mutator_lock_. The garbage collector may also execute with shared access but
491  // at times requires exclusive access to the heap (not to be confused with the heap meta-data
492  // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks
493  // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_
494  // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition
495  // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on
496  // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector)
497  // chance to acquire the lock.
498  //
499  // Thread suspension:
500  // Shared users                                  | Exclusive user
501  // (holding mutator lock and in kRunnable state) |   .. running ..
502  //   .. running ..                               | Request thread suspension by:
503  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
504  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
505  //   .. running ..                               |     all mutator threads
506  //   .. running ..                               |   - releasing thread_suspend_count_lock_
507  //   .. running ..                               | Block trying to acquire exclusive mutator lock
508  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
509  // suspend code.                                 |   .. blocked ..
510  // Change state to kSuspended                    |   .. blocked ..
511  // x: Release share on mutator_lock_             | Carry out exclusive access
512  // Acquire thread_suspend_count_lock_            |   .. exclusive ..
513  // while Thread::suspend_count_ > 0              |   .. exclusive ..
514  //   - wait on Thread::resume_cond_              |   .. exclusive ..
515  //     (releases thread_suspend_count_lock_)     |   .. exclusive ..
516  //   .. waiting ..                               | Release mutator_lock_
517  //   .. waiting ..                               | Request thread resumption by:
518  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
519  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
520  //   .. waiting ..                               |     all mutator threads
521  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
522  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
523  // Release thread_suspend_count_lock_            |  .. running ..
524  // Acquire share on mutator_lock_                |  .. running ..
525  //  - This could block but the thread still      |  .. running ..
526  //    has a state of kSuspended and so this      |  .. running ..
527  //    isn't an issue.                            |  .. running ..
528  // Acquire thread_suspend_count_lock_            |  .. running ..
529  //  - we poll here as we're transitioning into   |  .. running ..
530  //    kRunnable and an individual thread suspend |  .. running ..
531  //    request (e.g for debugging) won't try      |  .. running ..
532  //    to acquire the mutator lock (which would   |  .. running ..
533  //    block as we hold the mutator lock). This   |  .. running ..
534  //    poll ensures that if the suspender thought |  .. running ..
535  //    we were suspended by incrementing our      |  .. running ..
536  //    Thread::suspend_count_ and then reading    |  .. running ..
537  //    our state we go back to waiting on         |  .. running ..
538  //    Thread::resume_cond_.                      |  .. running ..
539  // can_go_runnable = Thread::suspend_count_ == 0 |  .. running ..
540  // Release thread_suspend_count_lock_            |  .. running ..
541  // if can_go_runnable                            |  .. running ..
542  //   Change state to kRunnable                   |  .. running ..
543  // else                                          |  .. running ..
544  //   Goto x                                      |  .. running ..
545  //  .. running ..                                |  .. running ..
546  static ReaderWriterMutex* mutator_lock_ ACQUIRED_AFTER(thread_list_suspend_thread_lock_);
547
548  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
549  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
550
551  // Guards shutdown of the runtime.
552  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
553
554  // Guards background profiler global state.
555  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
556
557  // Guards trace (ie traceview) requests.
558  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
559
560  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
561  // attaching and detaching.
562  static Mutex* thread_list_lock_ ACQUIRED_AFTER(trace_lock_);
563
564  // Guards breakpoints.
565  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
566
567  // Guards lists of classes within the class linker.
568  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
569
570  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
571  // doesn't try to hold a higher level Mutex.
572  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
573
574  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
575
576  // Guard the allocation/deallocation of thread ids.
577  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
578
579  // Guards modification of the LDT on x86.
580  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
581
582  // Guards intern table.
583  static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
584
585  // Have an exclusive aborting thread.
586  static Mutex* abort_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
587
588  // Allow mutual exclusion when manipulating Thread::suspend_count_.
589  // TODO: Does the trade-off of a per-thread lock make sense?
590  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
591
592  // One unexpected signal at a time lock.
593  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
594
595  // Guards the maps in mem_map.
596  static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
597
598  // Have an exclusive logging thread.
599  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
600};
601
602}  // namespace art
603
604#endif  // ART_RUNTIME_BASE_MUTEX_H_
605