mutex.h revision e5f13e57ff8fa36342beb33830b3ec5942a61cca
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class LOCKABLE ReaderWriterMutex;
47class ScopedContentionRecorder;
48class Thread;
49
50// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
51// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
52// partial ordering and thereby cause deadlock situations to fail checks.
53//
54// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
55enum LockLevel {
56  kLoggingLock = 0,
57  kMemMapsLock,
58  kSwapMutexesLock,
59  kUnexpectedSignalLock,
60  kThreadSuspendCountLock,
61  kAbortLock,
62  kJdwpSocketLock,
63  kRegionSpaceRegionLock,
64  kReferenceQueueSoftReferencesLock,
65  kReferenceQueuePhantomReferencesLock,
66  kReferenceQueueFinalizerReferencesLock,
67  kReferenceQueueWeakReferencesLock,
68  kReferenceQueueClearedReferencesLock,
69  kReferenceProcessorLock,
70  kJitCodeCacheLock,
71  kRosAllocGlobalLock,
72  kRosAllocBracketLock,
73  kRosAllocBulkFreeLock,
74  kAllocSpaceLock,
75  kBumpPointerSpaceBlockLock,
76  kDexFileMethodInlinerLock,
77  kDexFileToMethodInlinerMapLock,
78  kMarkSweepMarkStackLock,
79  kTransactionLogLock,
80  kInternTableLock,
81  kOatFileSecondaryLookupLock,
82  kDefaultMutexLevel,
83  kMarkSweepLargeObjectLock,
84  kPinTableLock,
85  kJdwpObjectRegistryLock,
86  kModifyLdtLock,
87  kAllocatedThreadIdsLock,
88  kMonitorPoolLock,
89  kClassLinkerClassesLock,
90  kBreakpointLock,
91  kMonitorLock,
92  kMonitorListLock,
93  kJniLoadLibraryLock,
94  kThreadListLock,
95  kBreakpointInvokeLock,
96  kAllocTrackerLock,
97  kDeoptimizationLock,
98  kProfilerLock,
99  kJdwpEventListLock,
100  kJdwpAttachLock,
101  kJdwpStartLock,
102  kRuntimeShutdownLock,
103  kTraceLock,
104  kHeapBitmapLock,
105  kMutatorLock,
106  kInstrumentEntrypointsLock,
107  kZygoteCreationLock,
108
109  kLockLevelCount  // Must come last.
110};
111std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
112
113const bool kDebugLocking = kIsDebugBuild;
114
115// Record Log contention information, dumpable via SIGQUIT.
116#ifdef ART_USE_FUTEXES
117// To enable lock contention logging, set this to true.
118const bool kLogLockContentions = false;
119#else
120// Keep this false as lock contention logging is supported only with
121// futex.
122const bool kLogLockContentions = false;
123#endif
124const size_t kContentionLogSize = 4;
125const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
126const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
127
128// Base class for all Mutex implementations
129class BaseMutex {
130 public:
131  const char* GetName() const {
132    return name_;
133  }
134
135  virtual bool IsMutex() const { return false; }
136  virtual bool IsReaderWriterMutex() const { return false; }
137
138  virtual void Dump(std::ostream& os) const = 0;
139
140  static void DumpAll(std::ostream& os);
141
142 protected:
143  friend class ConditionVariable;
144
145  BaseMutex(const char* name, LockLevel level);
146  virtual ~BaseMutex();
147  void RegisterAsLocked(Thread* self);
148  void RegisterAsUnlocked(Thread* self);
149  void CheckSafeToWait(Thread* self);
150
151  friend class ScopedContentionRecorder;
152
153  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
154  void DumpContention(std::ostream& os) const;
155
156  const LockLevel level_;  // Support for lock hierarchy.
157  const char* const name_;
158
159  // A log entry that records contention but makes no guarantee that either tid will be held live.
160  struct ContentionLogEntry {
161    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
162    uint64_t blocked_tid;
163    uint64_t owner_tid;
164    AtomicInteger count;
165  };
166  struct ContentionLogData {
167    ContentionLogEntry contention_log[kContentionLogSize];
168    // The next entry in the contention log to be updated. Value ranges from 0 to
169    // kContentionLogSize - 1.
170    AtomicInteger cur_content_log_entry;
171    // Number of times the Mutex has been contended.
172    AtomicInteger contention_count;
173    // Sum of time waited by all contenders in ns.
174    Atomic<uint64_t> wait_time;
175    void AddToWaitTime(uint64_t value);
176    ContentionLogData() : wait_time(0) {}
177  };
178  ContentionLogData contention_log_data_[kContentionLogDataSize];
179
180 public:
181  bool HasEverContended() const {
182    if (kLogLockContentions) {
183      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
184    }
185    return false;
186  }
187};
188
189// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
190// exclusive access to what it guards. A Mutex can be in one of two states:
191// - Free - not owned by any thread,
192// - Exclusive - owned by a single thread.
193//
194// The effect of locking and unlocking operations on the state is:
195// State     | ExclusiveLock | ExclusiveUnlock
196// -------------------------------------------
197// Free      | Exclusive     | error
198// Exclusive | Block*        | Free
199// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
200//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
201std::ostream& operator<<(std::ostream& os, const Mutex& mu);
202class LOCKABLE Mutex : public BaseMutex {
203 public:
204  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
205  ~Mutex();
206
207  virtual bool IsMutex() const { return true; }
208
209  // Block until mutex is free then acquire exclusive access.
210  void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
211  void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
212
213  // Returns true if acquires exclusive access, false otherwise.
214  bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
215  bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); }
216
217  // Release exclusive access.
218  void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
219  void Unlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
220
221  // Is the current thread the exclusive holder of the Mutex.
222  bool IsExclusiveHeld(const Thread* self) const;
223
224  // Assert that the Mutex is exclusively held by the current thread.
225  void AssertExclusiveHeld(const Thread* self) {
226    if (kDebugLocking && (gAborting == 0)) {
227      CHECK(IsExclusiveHeld(self)) << *this;
228    }
229  }
230  void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); }
231
232  // Assert that the Mutex is not held by the current thread.
233  void AssertNotHeldExclusive(const Thread* self) {
234    if (kDebugLocking && (gAborting == 0)) {
235      CHECK(!IsExclusiveHeld(self)) << *this;
236    }
237  }
238  void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); }
239
240  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
241  // than the owner.
242  uint64_t GetExclusiveOwnerTid() const;
243
244  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
245  unsigned int GetDepth() const {
246    return recursion_count_;
247  }
248
249  virtual void Dump(std::ostream& os) const;
250
251 private:
252#if ART_USE_FUTEXES
253  // 0 is unheld, 1 is held.
254  AtomicInteger state_;
255  // Exclusive owner.
256  volatile uint64_t exclusive_owner_;
257  // Number of waiting contenders.
258  AtomicInteger num_contenders_;
259#else
260  pthread_mutex_t mutex_;
261  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
262#endif
263  const bool recursive_;  // Can the lock be recursively held?
264  unsigned int recursion_count_;
265  friend class ConditionVariable;
266  DISALLOW_COPY_AND_ASSIGN(Mutex);
267};
268
269// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
270// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
271// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
272// condition variable. A ReaderWriterMutex can be in one of three states:
273// - Free - not owned by any thread,
274// - Exclusive - owned by a single thread,
275// - Shared(n) - shared amongst n threads.
276//
277// The effect of locking and unlocking operations on the state is:
278//
279// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
280// ----------------------------------------------------------------------------
281// Free      | Exclusive     | error           | SharedLock(1)    | error
282// Exclusive | Block         | Free            | Block            | error
283// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
284// * for large values of n the SharedLock may block.
285std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
286class LOCKABLE ReaderWriterMutex : public BaseMutex {
287 public:
288  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
289  ~ReaderWriterMutex();
290
291  virtual bool IsReaderWriterMutex() const { return true; }
292
293  // Block until ReaderWriterMutex is free then acquire exclusive access.
294  void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
295  void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
296
297  // Release exclusive access.
298  void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
299  void WriterUnlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
300
301  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
302  // or false if timeout is reached.
303#if HAVE_TIMED_RWLOCK
304  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
305      EXCLUSIVE_TRYLOCK_FUNCTION(true);
306#endif
307
308  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
309  void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
310  void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
311
312  // Try to acquire share of ReaderWriterMutex.
313  bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
314
315  // Release a share of the access.
316  void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
317  void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
318
319  // Is the current thread the exclusive holder of the ReaderWriterMutex.
320  bool IsExclusiveHeld(const Thread* self) const;
321
322  // Assert the current thread has exclusive access to the ReaderWriterMutex.
323  void AssertExclusiveHeld(const Thread* self) {
324    if (kDebugLocking && (gAborting == 0)) {
325      CHECK(IsExclusiveHeld(self)) << *this;
326    }
327  }
328  void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
329
330  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
331  void AssertNotExclusiveHeld(const Thread* self) {
332    if (kDebugLocking && (gAborting == 0)) {
333      CHECK(!IsExclusiveHeld(self)) << *this;
334    }
335  }
336  void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
337
338  // Is the current thread a shared holder of the ReaderWriterMutex.
339  bool IsSharedHeld(const Thread* self) const;
340
341  // Assert the current thread has shared access to the ReaderWriterMutex.
342  void AssertSharedHeld(const Thread* self) {
343    if (kDebugLocking && (gAborting == 0)) {
344      // TODO: we can only assert this well when self != NULL.
345      CHECK(IsSharedHeld(self) || self == NULL) << *this;
346    }
347  }
348  void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
349
350  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
351  // mode.
352  void AssertNotHeld(const Thread* self) {
353    if (kDebugLocking && (gAborting == 0)) {
354      CHECK(!IsSharedHeld(self)) << *this;
355    }
356  }
357
358  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
359  // than the owner.
360  uint64_t GetExclusiveOwnerTid() const;
361
362  virtual void Dump(std::ostream& os) const;
363
364 private:
365#if ART_USE_FUTEXES
366  // Out-of-inline path for handling contention for a SharedLock.
367  void HandleSharedLockContention(Thread* self, int32_t cur_state);
368
369  // -1 implies held exclusive, +ve shared held by state_ many owners.
370  AtomicInteger state_;
371  // Exclusive owner. Modification guarded by this mutex.
372  volatile uint64_t exclusive_owner_;
373  // Number of contenders waiting for a reader share.
374  AtomicInteger num_pending_readers_;
375  // Number of contenders waiting to be the writer.
376  AtomicInteger num_pending_writers_;
377#else
378  pthread_rwlock_t rwlock_;
379  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
380#endif
381  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
382};
383
384// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
385// (Signal) or all at once (Broadcast).
386class ConditionVariable {
387 public:
388  explicit ConditionVariable(const char* name, Mutex& mutex);
389  ~ConditionVariable();
390
391  void Broadcast(Thread* self);
392  void Signal(Thread* self);
393  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
394  //       pointer copy, thereby defeating annotalysis.
395  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
396  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
397  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
398  // when waiting.
399  // TODO: remove this.
400  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
401
402 private:
403  const char* const name_;
404  // The Mutex being used by waiters. It is an error to mix condition variables between different
405  // Mutexes.
406  Mutex& guard_;
407#if ART_USE_FUTEXES
408  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
409  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
410  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
411  // without guard_ held.
412  AtomicInteger sequence_;
413  // Number of threads that have come into to wait, not the length of the waiters on the futex as
414  // waiters may have been requeued onto guard_. Guarded by guard_.
415  volatile int32_t num_waiters_;
416#else
417  pthread_cond_t cond_;
418#endif
419  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
420};
421
422// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
423// upon destruction.
424class SCOPED_LOCKABLE MutexLock {
425 public:
426  explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) {
427    mu_.ExclusiveLock(self_);
428  }
429
430  ~MutexLock() UNLOCK_FUNCTION() {
431    mu_.ExclusiveUnlock(self_);
432  }
433
434 private:
435  Thread* const self_;
436  Mutex& mu_;
437  DISALLOW_COPY_AND_ASSIGN(MutexLock);
438};
439// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
440#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
441
442// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
443// construction and releases it upon destruction.
444class SCOPED_LOCKABLE ReaderMutexLock {
445 public:
446  explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
447      self_(self), mu_(mu) {
448    mu_.SharedLock(self_);
449  }
450
451  ~ReaderMutexLock() UNLOCK_FUNCTION() {
452    mu_.SharedUnlock(self_);
453  }
454
455 private:
456  Thread* const self_;
457  ReaderWriterMutex& mu_;
458  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
459};
460// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
461// "ReaderMutexLock mu(lock)".
462#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
463
464// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
465// construction and releases it upon destruction.
466class SCOPED_LOCKABLE WriterMutexLock {
467 public:
468  explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
469      self_(self), mu_(mu) {
470    mu_.ExclusiveLock(self_);
471  }
472
473  ~WriterMutexLock() UNLOCK_FUNCTION() {
474    mu_.ExclusiveUnlock(self_);
475  }
476
477 private:
478  Thread* const self_;
479  ReaderWriterMutex& mu_;
480  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
481};
482// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
483// "WriterMutexLock mu(lock)".
484#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
485
486// Global mutexes corresponding to the levels above.
487class Locks {
488 public:
489  static void Init();
490
491  // Guards allocation entrypoint instrumenting.
492  static Mutex* instrument_entrypoints_lock_;
493
494  // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
495  // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
496  // a share on the mutator_lock_. The garbage collector may also execute with shared access but
497  // at times requires exclusive access to the heap (not to be confused with the heap meta-data
498  // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks
499  // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_
500  // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition
501  // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on
502  // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector)
503  // chance to acquire the lock.
504  //
505  // Thread suspension:
506  // Shared users                                  | Exclusive user
507  // (holding mutator lock and in kRunnable state) |   .. running ..
508  //   .. running ..                               | Request thread suspension by:
509  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
510  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
511  //   .. running ..                               |     all mutator threads
512  //   .. running ..                               |   - releasing thread_suspend_count_lock_
513  //   .. running ..                               | Block trying to acquire exclusive mutator lock
514  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
515  // suspend code.                                 |   .. blocked ..
516  // Change state to kSuspended                    |   .. blocked ..
517  // x: Release share on mutator_lock_             | Carry out exclusive access
518  // Acquire thread_suspend_count_lock_            |   .. exclusive ..
519  // while Thread::suspend_count_ > 0              |   .. exclusive ..
520  //   - wait on Thread::resume_cond_              |   .. exclusive ..
521  //     (releases thread_suspend_count_lock_)     |   .. exclusive ..
522  //   .. waiting ..                               | Release mutator_lock_
523  //   .. waiting ..                               | Request thread resumption by:
524  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
525  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
526  //   .. waiting ..                               |     all mutator threads
527  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
528  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
529  // Release thread_suspend_count_lock_            |  .. running ..
530  // Acquire share on mutator_lock_                |  .. running ..
531  //  - This could block but the thread still      |  .. running ..
532  //    has a state of kSuspended and so this      |  .. running ..
533  //    isn't an issue.                            |  .. running ..
534  // Acquire thread_suspend_count_lock_            |  .. running ..
535  //  - we poll here as we're transitioning into   |  .. running ..
536  //    kRunnable and an individual thread suspend |  .. running ..
537  //    request (e.g for debugging) won't try      |  .. running ..
538  //    to acquire the mutator lock (which would   |  .. running ..
539  //    block as we hold the mutator lock). This   |  .. running ..
540  //    poll ensures that if the suspender thought |  .. running ..
541  //    we were suspended by incrementing our      |  .. running ..
542  //    Thread::suspend_count_ and then reading    |  .. running ..
543  //    our state we go back to waiting on         |  .. running ..
544  //    Thread::resume_cond_.                      |  .. running ..
545  // can_go_runnable = Thread::suspend_count_ == 0 |  .. running ..
546  // Release thread_suspend_count_lock_            |  .. running ..
547  // if can_go_runnable                            |  .. running ..
548  //   Change state to kRunnable                   |  .. running ..
549  // else                                          |  .. running ..
550  //   Goto x                                      |  .. running ..
551  //  .. running ..                                |  .. running ..
552  static ReaderWriterMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
553
554  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
555  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
556
557  // Guards shutdown of the runtime.
558  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
559
560  // Guards background profiler global state.
561  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
562
563  // Guards trace (ie traceview) requests.
564  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
565
566  // Guards debugger recent allocation records.
567  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
568
569  // Guards updates to instrumentation to ensure mutual exclusion of
570  // events like deoptimization requests.
571  // TODO: improve name, perhaps instrumentation_update_lock_.
572  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
573
574  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
575  // attaching and detaching.
576  static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
577
578  // Guards maintaining loading library data structures.
579  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
580
581  // Guards breakpoints.
582  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
583
584  // Guards lists of classes within the class linker.
585  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
586
587  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
588  // doesn't try to hold a higher level Mutex.
589  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
590
591  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
592
593  // Guard the allocation/deallocation of thread ids.
594  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
595
596  // Guards modification of the LDT on x86.
597  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
598
599  // Guards intern table.
600  static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
601
602  // Guards reference processor.
603  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
604
605  // Guards cleared references queue.
606  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
607
608  // Guards weak references queue.
609  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
610
611  // Guards finalizer references queue.
612  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
613
614  // Guards phantom references queue.
615  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
616
617  // Guards soft references queue.
618  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
619
620  // Have an exclusive aborting thread.
621  static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
622
623  // Allow mutual exclusion when manipulating Thread::suspend_count_.
624  // TODO: Does the trade-off of a per-thread lock make sense?
625  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
626
627  // One unexpected signal at a time lock.
628  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
629
630  // Guards the maps in mem_map.
631  static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
632
633  // Have an exclusive logging thread.
634  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
635};
636
637}  // namespace art
638
639#endif  // ART_RUNTIME_BASE_MUTEX_H_
640