mutex.h revision 2cebb24bfc3247d3e9be138a3350106737455918
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class LOCKABLE ReaderWriterMutex;
47class ScopedContentionRecorder;
48class Thread;
49
50// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
51// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
52// partial ordering and thereby cause deadlock situations to fail checks.
53//
54// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
55enum LockLevel {
56  kLoggingLock = 0,
57  kMemMapsLock,
58  kSwapMutexesLock,
59  kUnexpectedSignalLock,
60  kThreadSuspendCountLock,
61  kAbortLock,
62  kJdwpSocketLock,
63  kRegionSpaceRegionLock,
64  kReferenceQueueSoftReferencesLock,
65  kReferenceQueuePhantomReferencesLock,
66  kReferenceQueueFinalizerReferencesLock,
67  kReferenceQueueWeakReferencesLock,
68  kReferenceQueueClearedReferencesLock,
69  kReferenceProcessorLock,
70  kJitCodeCacheLock,
71  kRosAllocGlobalLock,
72  kRosAllocBracketLock,
73  kRosAllocBulkFreeLock,
74  kAllocSpaceLock,
75  kBumpPointerSpaceBlockLock,
76  kArenaPoolLock,
77  kDexFileMethodInlinerLock,
78  kDexFileToMethodInlinerMapLock,
79  kMarkSweepMarkStackLock,
80  kTransactionLogLock,
81  kInternTableLock,
82  kOatFileSecondaryLookupLock,
83  kDefaultMutexLevel,
84  kMarkSweepLargeObjectLock,
85  kPinTableLock,
86  kJdwpObjectRegistryLock,
87  kModifyLdtLock,
88  kAllocatedThreadIdsLock,
89  kMonitorPoolLock,
90  kMethodVerifiersLock,
91  kClassLinkerClassesLock,
92  kBreakpointLock,
93  kMonitorLock,
94  kMonitorListLock,
95  kJniLoadLibraryLock,
96  kThreadListLock,
97  kBreakpointInvokeLock,
98  kAllocTrackerLock,
99  kDeoptimizationLock,
100  kProfilerLock,
101  kJdwpShutdownLock,
102  kJdwpEventListLock,
103  kJdwpAttachLock,
104  kJdwpStartLock,
105  kRuntimeShutdownLock,
106  kTraceLock,
107  kHeapBitmapLock,
108  kMutatorLock,
109  kInstrumentEntrypointsLock,
110  kZygoteCreationLock,
111
112  kLockLevelCount  // Must come last.
113};
114std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
115
116const bool kDebugLocking = kIsDebugBuild;
117
118// Record Log contention information, dumpable via SIGQUIT.
119#ifdef ART_USE_FUTEXES
120// To enable lock contention logging, set this to true.
121const bool kLogLockContentions = false;
122#else
123// Keep this false as lock contention logging is supported only with
124// futex.
125const bool kLogLockContentions = false;
126#endif
127const size_t kContentionLogSize = 4;
128const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
129const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
130
131// Base class for all Mutex implementations
132class BaseMutex {
133 public:
134  const char* GetName() const {
135    return name_;
136  }
137
138  virtual bool IsMutex() const { return false; }
139  virtual bool IsReaderWriterMutex() const { return false; }
140
141  virtual void Dump(std::ostream& os) const = 0;
142
143  static void DumpAll(std::ostream& os);
144
145 protected:
146  friend class ConditionVariable;
147
148  BaseMutex(const char* name, LockLevel level);
149  virtual ~BaseMutex();
150  void RegisterAsLocked(Thread* self);
151  void RegisterAsUnlocked(Thread* self);
152  void CheckSafeToWait(Thread* self);
153
154  friend class ScopedContentionRecorder;
155
156  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
157  void DumpContention(std::ostream& os) const;
158
159  const LockLevel level_;  // Support for lock hierarchy.
160  const char* const name_;
161
162  // A log entry that records contention but makes no guarantee that either tid will be held live.
163  struct ContentionLogEntry {
164    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
165    uint64_t blocked_tid;
166    uint64_t owner_tid;
167    AtomicInteger count;
168  };
169  struct ContentionLogData {
170    ContentionLogEntry contention_log[kContentionLogSize];
171    // The next entry in the contention log to be updated. Value ranges from 0 to
172    // kContentionLogSize - 1.
173    AtomicInteger cur_content_log_entry;
174    // Number of times the Mutex has been contended.
175    AtomicInteger contention_count;
176    // Sum of time waited by all contenders in ns.
177    Atomic<uint64_t> wait_time;
178    void AddToWaitTime(uint64_t value);
179    ContentionLogData() : wait_time(0) {}
180  };
181  ContentionLogData contention_log_data_[kContentionLogDataSize];
182
183 public:
184  bool HasEverContended() const {
185    if (kLogLockContentions) {
186      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
187    }
188    return false;
189  }
190};
191
192// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
193// exclusive access to what it guards. A Mutex can be in one of two states:
194// - Free - not owned by any thread,
195// - Exclusive - owned by a single thread.
196//
197// The effect of locking and unlocking operations on the state is:
198// State     | ExclusiveLock | ExclusiveUnlock
199// -------------------------------------------
200// Free      | Exclusive     | error
201// Exclusive | Block*        | Free
202// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
203//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
204std::ostream& operator<<(std::ostream& os, const Mutex& mu);
205class LOCKABLE Mutex : public BaseMutex {
206 public:
207  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
208  ~Mutex();
209
210  virtual bool IsMutex() const { return true; }
211
212  // Block until mutex is free then acquire exclusive access.
213  void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
214  void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
215
216  // Returns true if acquires exclusive access, false otherwise.
217  bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
218  bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); }
219
220  // Release exclusive access.
221  void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
222  void Unlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
223
224  // Is the current thread the exclusive holder of the Mutex.
225  bool IsExclusiveHeld(const Thread* self) const;
226
227  // Assert that the Mutex is exclusively held by the current thread.
228  void AssertExclusiveHeld(const Thread* self) {
229    if (kDebugLocking && (gAborting == 0)) {
230      CHECK(IsExclusiveHeld(self)) << *this;
231    }
232  }
233  void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); }
234
235  // Assert that the Mutex is not held by the current thread.
236  void AssertNotHeldExclusive(const Thread* self) {
237    if (kDebugLocking && (gAborting == 0)) {
238      CHECK(!IsExclusiveHeld(self)) << *this;
239    }
240  }
241  void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); }
242
243  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
244  // than the owner.
245  uint64_t GetExclusiveOwnerTid() const;
246
247  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
248  unsigned int GetDepth() const {
249    return recursion_count_;
250  }
251
252  virtual void Dump(std::ostream& os) const;
253
254 private:
255#if ART_USE_FUTEXES
256  // 0 is unheld, 1 is held.
257  AtomicInteger state_;
258  // Exclusive owner.
259  volatile uint64_t exclusive_owner_;
260  // Number of waiting contenders.
261  AtomicInteger num_contenders_;
262#else
263  pthread_mutex_t mutex_;
264  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
265#endif
266  const bool recursive_;  // Can the lock be recursively held?
267  unsigned int recursion_count_;
268  friend class ConditionVariable;
269  DISALLOW_COPY_AND_ASSIGN(Mutex);
270};
271
272// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
273// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
274// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
275// condition variable. A ReaderWriterMutex can be in one of three states:
276// - Free - not owned by any thread,
277// - Exclusive - owned by a single thread,
278// - Shared(n) - shared amongst n threads.
279//
280// The effect of locking and unlocking operations on the state is:
281//
282// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
283// ----------------------------------------------------------------------------
284// Free      | Exclusive     | error           | SharedLock(1)    | error
285// Exclusive | Block         | Free            | Block            | error
286// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
287// * for large values of n the SharedLock may block.
288std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
289class LOCKABLE ReaderWriterMutex : public BaseMutex {
290 public:
291  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
292  ~ReaderWriterMutex();
293
294  virtual bool IsReaderWriterMutex() const { return true; }
295
296  // Block until ReaderWriterMutex is free then acquire exclusive access.
297  void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
298  void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
299
300  // Release exclusive access.
301  void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
302  void WriterUnlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
303
304  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
305  // or false if timeout is reached.
306#if HAVE_TIMED_RWLOCK
307  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
308      EXCLUSIVE_TRYLOCK_FUNCTION(true);
309#endif
310
311  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
312  void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
313  void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
314
315  // Try to acquire share of ReaderWriterMutex.
316  bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
317
318  // Release a share of the access.
319  void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
320  void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
321
322  // Is the current thread the exclusive holder of the ReaderWriterMutex.
323  bool IsExclusiveHeld(const Thread* self) const;
324
325  // Assert the current thread has exclusive access to the ReaderWriterMutex.
326  void AssertExclusiveHeld(const Thread* self) {
327    if (kDebugLocking && (gAborting == 0)) {
328      CHECK(IsExclusiveHeld(self)) << *this;
329    }
330  }
331  void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
332
333  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
334  void AssertNotExclusiveHeld(const Thread* self) {
335    if (kDebugLocking && (gAborting == 0)) {
336      CHECK(!IsExclusiveHeld(self)) << *this;
337    }
338  }
339  void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
340
341  // Is the current thread a shared holder of the ReaderWriterMutex.
342  bool IsSharedHeld(const Thread* self) const;
343
344  // Assert the current thread has shared access to the ReaderWriterMutex.
345  void AssertSharedHeld(const Thread* self) {
346    if (kDebugLocking && (gAborting == 0)) {
347      // TODO: we can only assert this well when self != null.
348      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
349    }
350  }
351  void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
352
353  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
354  // mode.
355  void AssertNotHeld(const Thread* self) {
356    if (kDebugLocking && (gAborting == 0)) {
357      CHECK(!IsSharedHeld(self)) << *this;
358    }
359  }
360
361  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
362  // than the owner.
363  uint64_t GetExclusiveOwnerTid() const;
364
365  virtual void Dump(std::ostream& os) const;
366
367 private:
368#if ART_USE_FUTEXES
369  // Out-of-inline path for handling contention for a SharedLock.
370  void HandleSharedLockContention(Thread* self, int32_t cur_state);
371
372  // -1 implies held exclusive, +ve shared held by state_ many owners.
373  AtomicInteger state_;
374  // Exclusive owner. Modification guarded by this mutex.
375  volatile uint64_t exclusive_owner_;
376  // Number of contenders waiting for a reader share.
377  AtomicInteger num_pending_readers_;
378  // Number of contenders waiting to be the writer.
379  AtomicInteger num_pending_writers_;
380#else
381  pthread_rwlock_t rwlock_;
382  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
383#endif
384  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
385};
386
387// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
388// (Signal) or all at once (Broadcast).
389class ConditionVariable {
390 public:
391  explicit ConditionVariable(const char* name, Mutex& mutex);
392  ~ConditionVariable();
393
394  void Broadcast(Thread* self);
395  void Signal(Thread* self);
396  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
397  //       pointer copy, thereby defeating annotalysis.
398  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
399  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
400  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
401  // when waiting.
402  // TODO: remove this.
403  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
404
405 private:
406  const char* const name_;
407  // The Mutex being used by waiters. It is an error to mix condition variables between different
408  // Mutexes.
409  Mutex& guard_;
410#if ART_USE_FUTEXES
411  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
412  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
413  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
414  // without guard_ held.
415  AtomicInteger sequence_;
416  // Number of threads that have come into to wait, not the length of the waiters on the futex as
417  // waiters may have been requeued onto guard_. Guarded by guard_.
418  volatile int32_t num_waiters_;
419#else
420  pthread_cond_t cond_;
421#endif
422  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
423};
424
425// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
426// upon destruction.
427class SCOPED_LOCKABLE MutexLock {
428 public:
429  explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) {
430    mu_.ExclusiveLock(self_);
431  }
432
433  ~MutexLock() UNLOCK_FUNCTION() {
434    mu_.ExclusiveUnlock(self_);
435  }
436
437 private:
438  Thread* const self_;
439  Mutex& mu_;
440  DISALLOW_COPY_AND_ASSIGN(MutexLock);
441};
442// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
443#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
444
445// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
446// construction and releases it upon destruction.
447class SCOPED_LOCKABLE ReaderMutexLock {
448 public:
449  explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
450      self_(self), mu_(mu) {
451    mu_.SharedLock(self_);
452  }
453
454  ~ReaderMutexLock() UNLOCK_FUNCTION() {
455    mu_.SharedUnlock(self_);
456  }
457
458 private:
459  Thread* const self_;
460  ReaderWriterMutex& mu_;
461  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
462};
463// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
464// "ReaderMutexLock mu(lock)".
465#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
466
467// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
468// construction and releases it upon destruction.
469class SCOPED_LOCKABLE WriterMutexLock {
470 public:
471  explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
472      self_(self), mu_(mu) {
473    mu_.ExclusiveLock(self_);
474  }
475
476  ~WriterMutexLock() UNLOCK_FUNCTION() {
477    mu_.ExclusiveUnlock(self_);
478  }
479
480 private:
481  Thread* const self_;
482  ReaderWriterMutex& mu_;
483  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
484};
485// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
486// "WriterMutexLock mu(lock)".
487#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
488
489// Global mutexes corresponding to the levels above.
490class Locks {
491 public:
492  static void Init();
493  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
494  // Guards allocation entrypoint instrumenting.
495  static Mutex* instrument_entrypoints_lock_;
496
497  // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
498  // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
499  // a share on the mutator_lock_. The garbage collector may also execute with shared access but
500  // at times requires exclusive access to the heap (not to be confused with the heap meta-data
501  // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks
502  // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_
503  // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition
504  // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on
505  // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector)
506  // chance to acquire the lock.
507  //
508  // Thread suspension:
509  // Shared users                                  | Exclusive user
510  // (holding mutator lock and in kRunnable state) |   .. running ..
511  //   .. running ..                               | Request thread suspension by:
512  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
513  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
514  //   .. running ..                               |     all mutator threads
515  //   .. running ..                               |   - releasing thread_suspend_count_lock_
516  //   .. running ..                               | Block trying to acquire exclusive mutator lock
517  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
518  // suspend code.                                 |   .. blocked ..
519  // Change state to kSuspended                    |   .. blocked ..
520  // x: Release share on mutator_lock_             | Carry out exclusive access
521  // Acquire thread_suspend_count_lock_            |   .. exclusive ..
522  // while Thread::suspend_count_ > 0              |   .. exclusive ..
523  //   - wait on Thread::resume_cond_              |   .. exclusive ..
524  //     (releases thread_suspend_count_lock_)     |   .. exclusive ..
525  //   .. waiting ..                               | Release mutator_lock_
526  //   .. waiting ..                               | Request thread resumption by:
527  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
528  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
529  //   .. waiting ..                               |     all mutator threads
530  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
531  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
532  // Release thread_suspend_count_lock_            |  .. running ..
533  // Acquire share on mutator_lock_                |  .. running ..
534  //  - This could block but the thread still      |  .. running ..
535  //    has a state of kSuspended and so this      |  .. running ..
536  //    isn't an issue.                            |  .. running ..
537  // Acquire thread_suspend_count_lock_            |  .. running ..
538  //  - we poll here as we're transitioning into   |  .. running ..
539  //    kRunnable and an individual thread suspend |  .. running ..
540  //    request (e.g for debugging) won't try      |  .. running ..
541  //    to acquire the mutator lock (which would   |  .. running ..
542  //    block as we hold the mutator lock). This   |  .. running ..
543  //    poll ensures that if the suspender thought |  .. running ..
544  //    we were suspended by incrementing our      |  .. running ..
545  //    Thread::suspend_count_ and then reading    |  .. running ..
546  //    our state we go back to waiting on         |  .. running ..
547  //    Thread::resume_cond_.                      |  .. running ..
548  // can_go_runnable = Thread::suspend_count_ == 0 |  .. running ..
549  // Release thread_suspend_count_lock_            |  .. running ..
550  // if can_go_runnable                            |  .. running ..
551  //   Change state to kRunnable                   |  .. running ..
552  // else                                          |  .. running ..
553  //   Goto x                                      |  .. running ..
554  //  .. running ..                                |  .. running ..
555  static ReaderWriterMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
556
557  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
558  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
559
560  // Guards shutdown of the runtime.
561  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
562
563  // Guards background profiler global state.
564  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
565
566  // Guards trace (ie traceview) requests.
567  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
568
569  // Guards debugger recent allocation records.
570  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
571
572  // Guards updates to instrumentation to ensure mutual exclusion of
573  // events like deoptimization requests.
574  // TODO: improve name, perhaps instrumentation_update_lock_.
575  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
576
577  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
578  // attaching and detaching.
579  static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
580
581  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
582  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
583
584  // Guards maintaining loading library data structures.
585  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
586
587  // Guards breakpoints.
588  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
589
590  // Guards lists of classes within the class linker.
591  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
592
593  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
594  // doesn't try to hold a higher level Mutex.
595  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
596
597  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
598
599  // Guard the allocation/deallocation of thread ids.
600  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
601
602  // Guards modification of the LDT on x86.
603  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
604
605  // Guards intern table.
606  static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
607
608  // Guards reference processor.
609  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
610
611  // Guards cleared references queue.
612  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
613
614  // Guards weak references queue.
615  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
616
617  // Guards finalizer references queue.
618  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
619
620  // Guards phantom references queue.
621  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
622
623  // Guards soft references queue.
624  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
625
626  // Have an exclusive aborting thread.
627  static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
628
629  // Allow mutual exclusion when manipulating Thread::suspend_count_.
630  // TODO: Does the trade-off of a per-thread lock make sense?
631  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
632
633  // One unexpected signal at a time lock.
634  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
635
636  // Guards the maps in mem_map.
637  static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
638
639  // Have an exclusive logging thread.
640  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
641};
642
643}  // namespace art
644
645#endif  // ART_RUNTIME_BASE_MUTEX_H_
646