mutex.h revision deaa1833e930ab8d8e0011b5267fcc2778a49099
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class SHARED_LOCKABLE ReaderWriterMutex;
47class SHARED_LOCKABLE MutatorMutex;
48class ScopedContentionRecorder;
49class Thread;
50
51// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
52// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
53// partial ordering and thereby cause deadlock situations to fail checks.
54//
55// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
56enum LockLevel {
57  kLoggingLock = 0,
58  kMemMapsLock,
59  kSwapMutexesLock,
60  kUnexpectedSignalLock,
61  kThreadSuspendCountLock,
62  kAbortLock,
63  kLambdaTableLock,
64  kJdwpSocketLock,
65  kRegionSpaceRegionLock,
66  kTransactionLogLock,
67  kMarkSweepMarkStackLock,
68  kJniWeakGlobalsLock,
69  kReferenceQueueSoftReferencesLock,
70  kReferenceQueuePhantomReferencesLock,
71  kReferenceQueueFinalizerReferencesLock,
72  kReferenceQueueWeakReferencesLock,
73  kReferenceQueueClearedReferencesLock,
74  kReferenceProcessorLock,
75  kJitCodeCacheLock,
76  kRosAllocGlobalLock,
77  kRosAllocBracketLock,
78  kRosAllocBulkFreeLock,
79  kAllocSpaceLock,
80  kBumpPointerSpaceBlockLock,
81  kArenaPoolLock,
82  kDexFileMethodInlinerLock,
83  kDexFileToMethodInlinerMapLock,
84  kInternTableLock,
85  kOatFileSecondaryLookupLock,
86  kTracingUniqueMethodsLock,
87  kTracingStreamingLock,
88  kDefaultMutexLevel,
89  kMarkSweepLargeObjectLock,
90  kPinTableLock,
91  kJdwpObjectRegistryLock,
92  kModifyLdtLock,
93  kAllocatedThreadIdsLock,
94  kMonitorPoolLock,
95  kMethodVerifiersLock,
96  kClassLinkerClassesLock,
97  kBreakpointLock,
98  kMonitorLock,
99  kMonitorListLock,
100  kJniLoadLibraryLock,
101  kThreadListLock,
102  kAllocTrackerLock,
103  kDeoptimizationLock,
104  kProfilerLock,
105  kJdwpShutdownLock,
106  kJdwpEventListLock,
107  kJdwpAttachLock,
108  kJdwpStartLock,
109  kRuntimeShutdownLock,
110  kTraceLock,
111  kHeapBitmapLock,
112  kMutatorLock,
113  kInstrumentEntrypointsLock,
114  kZygoteCreationLock,
115
116  kLockLevelCount  // Must come last.
117};
118std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
119
120const bool kDebugLocking = kIsDebugBuild;
121
122// Record Log contention information, dumpable via SIGQUIT.
123#ifdef ART_USE_FUTEXES
124// To enable lock contention logging, set this to true.
125const bool kLogLockContentions = false;
126#else
127// Keep this false as lock contention logging is supported only with
128// futex.
129const bool kLogLockContentions = false;
130#endif
131const size_t kContentionLogSize = 4;
132const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
133const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
134
135// Base class for all Mutex implementations
136class BaseMutex {
137 public:
138  const char* GetName() const {
139    return name_;
140  }
141
142  virtual bool IsMutex() const { return false; }
143  virtual bool IsReaderWriterMutex() const { return false; }
144  virtual bool IsMutatorMutex() const { return false; }
145
146  virtual void Dump(std::ostream& os) const = 0;
147
148  static void DumpAll(std::ostream& os);
149
150 protected:
151  friend class ConditionVariable;
152
153  BaseMutex(const char* name, LockLevel level);
154  virtual ~BaseMutex();
155  void RegisterAsLocked(Thread* self);
156  void RegisterAsUnlocked(Thread* self);
157  void CheckSafeToWait(Thread* self);
158
159  friend class ScopedContentionRecorder;
160
161  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
162  void DumpContention(std::ostream& os) const;
163
164  const LockLevel level_;  // Support for lock hierarchy.
165  const char* const name_;
166
167  // A log entry that records contention but makes no guarantee that either tid will be held live.
168  struct ContentionLogEntry {
169    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
170    uint64_t blocked_tid;
171    uint64_t owner_tid;
172    AtomicInteger count;
173  };
174  struct ContentionLogData {
175    ContentionLogEntry contention_log[kContentionLogSize];
176    // The next entry in the contention log to be updated. Value ranges from 0 to
177    // kContentionLogSize - 1.
178    AtomicInteger cur_content_log_entry;
179    // Number of times the Mutex has been contended.
180    AtomicInteger contention_count;
181    // Sum of time waited by all contenders in ns.
182    Atomic<uint64_t> wait_time;
183    void AddToWaitTime(uint64_t value);
184    ContentionLogData() : wait_time(0) {}
185  };
186  ContentionLogData contention_log_data_[kContentionLogDataSize];
187
188 public:
189  bool HasEverContended() const {
190    if (kLogLockContentions) {
191      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
192    }
193    return false;
194  }
195};
196
197// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
198// exclusive access to what it guards. A Mutex can be in one of two states:
199// - Free - not owned by any thread,
200// - Exclusive - owned by a single thread.
201//
202// The effect of locking and unlocking operations on the state is:
203// State     | ExclusiveLock | ExclusiveUnlock
204// -------------------------------------------
205// Free      | Exclusive     | error
206// Exclusive | Block*        | Free
207// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
208//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
209std::ostream& operator<<(std::ostream& os, const Mutex& mu);
210class LOCKABLE Mutex : public BaseMutex {
211 public:
212  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
213  ~Mutex();
214
215  virtual bool IsMutex() const { return true; }
216
217  // Block until mutex is free then acquire exclusive access.
218  void ExclusiveLock(Thread* self) ACQUIRE();
219  void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
220
221  // Returns true if acquires exclusive access, false otherwise.
222  bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
223  bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
224
225  // Release exclusive access.
226  void ExclusiveUnlock(Thread* self) RELEASE();
227  void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
228
229  // Is the current thread the exclusive holder of the Mutex.
230  bool IsExclusiveHeld(const Thread* self) const;
231
232  // Assert that the Mutex is exclusively held by the current thread.
233  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
234    if (kDebugLocking && (gAborting == 0)) {
235      CHECK(IsExclusiveHeld(self)) << *this;
236    }
237  }
238  void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
239
240  // Assert that the Mutex is not held by the current thread.
241  void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
242    if (kDebugLocking && (gAborting == 0)) {
243      CHECK(!IsExclusiveHeld(self)) << *this;
244    }
245  }
246  void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
247    AssertNotHeldExclusive(self);
248  }
249
250  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
251  // than the owner.
252  uint64_t GetExclusiveOwnerTid() const;
253
254  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
255  unsigned int GetDepth() const {
256    return recursion_count_;
257  }
258
259  virtual void Dump(std::ostream& os) const;
260
261  // For negative capabilities in clang annotations.
262  const Mutex& operator!() const { return *this; }
263
264 private:
265#if ART_USE_FUTEXES
266  // 0 is unheld, 1 is held.
267  AtomicInteger state_;
268  // Exclusive owner.
269  volatile uint64_t exclusive_owner_;
270  // Number of waiting contenders.
271  AtomicInteger num_contenders_;
272#else
273  pthread_mutex_t mutex_;
274  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
275#endif
276  const bool recursive_;  // Can the lock be recursively held?
277  unsigned int recursion_count_;
278  friend class ConditionVariable;
279  DISALLOW_COPY_AND_ASSIGN(Mutex);
280};
281
282// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
283// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
284// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
285// condition variable. A ReaderWriterMutex can be in one of three states:
286// - Free - not owned by any thread,
287// - Exclusive - owned by a single thread,
288// - Shared(n) - shared amongst n threads.
289//
290// The effect of locking and unlocking operations on the state is:
291//
292// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
293// ----------------------------------------------------------------------------
294// Free      | Exclusive     | error           | SharedLock(1)    | error
295// Exclusive | Block         | Free            | Block            | error
296// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
297// * for large values of n the SharedLock may block.
298std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
299class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
300 public:
301  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
302  ~ReaderWriterMutex();
303
304  virtual bool IsReaderWriterMutex() const { return true; }
305
306  // Block until ReaderWriterMutex is free then acquire exclusive access.
307  void ExclusiveLock(Thread* self) ACQUIRE();
308  void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
309
310  // Release exclusive access.
311  void ExclusiveUnlock(Thread* self) RELEASE();
312  void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
313
314  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
315  // or false if timeout is reached.
316#if HAVE_TIMED_RWLOCK
317  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
318      EXCLUSIVE_TRYLOCK_FUNCTION(true);
319#endif
320
321  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
322  void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
323  void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
324
325  // Try to acquire share of ReaderWriterMutex.
326  bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
327
328  // Release a share of the access.
329  void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
330  void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
331
332  // Is the current thread the exclusive holder of the ReaderWriterMutex.
333  bool IsExclusiveHeld(const Thread* self) const;
334
335  // Assert the current thread has exclusive access to the ReaderWriterMutex.
336  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
337    if (kDebugLocking && (gAborting == 0)) {
338      CHECK(IsExclusiveHeld(self)) << *this;
339    }
340  }
341  void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
342
343  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
344  void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
345    if (kDebugLocking && (gAborting == 0)) {
346      CHECK(!IsExclusiveHeld(self)) << *this;
347    }
348  }
349  void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
350    AssertNotExclusiveHeld(self);
351  }
352
353  // Is the current thread a shared holder of the ReaderWriterMutex.
354  bool IsSharedHeld(const Thread* self) const;
355
356  // Assert the current thread has shared access to the ReaderWriterMutex.
357  void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
358    if (kDebugLocking && (gAborting == 0)) {
359      // TODO: we can only assert this well when self != null.
360      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
361    }
362  }
363  void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
364    AssertSharedHeld(self);
365  }
366
367  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
368  // mode.
369  void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
370    if (kDebugLocking && (gAborting == 0)) {
371      CHECK(!IsSharedHeld(self)) << *this;
372    }
373  }
374
375  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
376  // than the owner.
377  uint64_t GetExclusiveOwnerTid() const;
378
379  virtual void Dump(std::ostream& os) const;
380
381  // For negative capabilities in clang annotations.
382  const ReaderWriterMutex& operator!() const { return *this; }
383
384 private:
385#if ART_USE_FUTEXES
386  // Out-of-inline path for handling contention for a SharedLock.
387  void HandleSharedLockContention(Thread* self, int32_t cur_state);
388
389  // -1 implies held exclusive, +ve shared held by state_ many owners.
390  AtomicInteger state_;
391  // Exclusive owner. Modification guarded by this mutex.
392  volatile uint64_t exclusive_owner_;
393  // Number of contenders waiting for a reader share.
394  AtomicInteger num_pending_readers_;
395  // Number of contenders waiting to be the writer.
396  AtomicInteger num_pending_writers_;
397#else
398  pthread_rwlock_t rwlock_;
399  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
400#endif
401  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
402};
403
404// MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
405// Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
406// thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
407// held by any mutator threads. However, a thread in the kRunnable state is considered to have
408// shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
409// state have associated implications on lock ownership. Extra methods to handle the state
410// transitions have been added to the interface but are only accessible to the methods dealing
411// with state transitions. The thread state and flags attributes are used to ensure thread state
412// transitions are consistent with the permitted behaviour of the mutex.
413//
414// *) The most important consequence of this behaviour is that all threads must be in one of the
415// suspended states before exclusive ownership of the mutator mutex is sought.
416//
417std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
418class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
419 public:
420  explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
421    : ReaderWriterMutex(name, level) {}
422  ~MutatorMutex() {}
423
424  virtual bool IsMutatorMutex() const { return true; }
425
426  // For negative capabilities in clang annotations.
427  const MutatorMutex& operator!() const { return *this; }
428
429 private:
430  friend class Thread;
431  void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
432  void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
433
434  DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
435};
436
437// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
438// (Signal) or all at once (Broadcast).
439class ConditionVariable {
440 public:
441  ConditionVariable(const char* name, Mutex& mutex);
442  ~ConditionVariable();
443
444  void Broadcast(Thread* self);
445  void Signal(Thread* self);
446  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
447  //       pointer copy, thereby defeating annotalysis.
448  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
449  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
450  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
451  // when waiting.
452  // TODO: remove this.
453  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
454
455 private:
456  const char* const name_;
457  // The Mutex being used by waiters. It is an error to mix condition variables between different
458  // Mutexes.
459  Mutex& guard_;
460#if ART_USE_FUTEXES
461  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
462  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
463  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
464  // without guard_ held.
465  AtomicInteger sequence_;
466  // Number of threads that have come into to wait, not the length of the waiters on the futex as
467  // waiters may have been requeued onto guard_. Guarded by guard_.
468  volatile int32_t num_waiters_;
469#else
470  pthread_cond_t cond_;
471#endif
472  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
473};
474
475// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
476// upon destruction.
477class SCOPED_CAPABILITY MutexLock {
478 public:
479  MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
480    mu_.ExclusiveLock(self_);
481  }
482
483  ~MutexLock() RELEASE() {
484    mu_.ExclusiveUnlock(self_);
485  }
486
487 private:
488  Thread* const self_;
489  Mutex& mu_;
490  DISALLOW_COPY_AND_ASSIGN(MutexLock);
491};
492// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
493#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
494
495// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
496// construction and releases it upon destruction.
497class SCOPED_CAPABILITY ReaderMutexLock {
498 public:
499  ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) :
500      self_(self), mu_(mu) {
501    mu_.SharedLock(self_);
502  }
503
504  ~ReaderMutexLock() RELEASE() {
505    mu_.SharedUnlock(self_);
506  }
507
508 private:
509  Thread* const self_;
510  ReaderWriterMutex& mu_;
511  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
512};
513// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
514// "ReaderMutexLock mu(lock)".
515#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
516
517// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
518// construction and releases it upon destruction.
519class SCOPED_CAPABILITY WriterMutexLock {
520 public:
521  WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
522      self_(self), mu_(mu) {
523    mu_.ExclusiveLock(self_);
524  }
525
526  ~WriterMutexLock() UNLOCK_FUNCTION() {
527    mu_.ExclusiveUnlock(self_);
528  }
529
530 private:
531  Thread* const self_;
532  ReaderWriterMutex& mu_;
533  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
534};
535// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
536// "WriterMutexLock mu(lock)".
537#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
538
539// For StartNoThreadSuspension and EndNoThreadSuspension.
540class CAPABILITY("role") Role {
541 public:
542  void Acquire() ACQUIRE() {}
543  void Release() RELEASE() {}
544  const Role& operator!() const { return *this; }
545};
546
547class Uninterruptible : public Role {
548};
549
550// Global mutexes corresponding to the levels above.
551class Locks {
552 public:
553  static void Init();
554  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
555  // Guards allocation entrypoint instrumenting.
556  static Mutex* instrument_entrypoints_lock_;
557
558  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
559  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
560  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
561  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
562  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
563  //
564  // Thread suspension:
565  // mutator thread                                | GC/Debugger
566  //   .. running ..                               |   .. running ..
567  //   .. running ..                               | Request thread suspension by:
568  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
569  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
570  //   .. running ..                               |     all mutator threads
571  //   .. running ..                               |   - releasing thread_suspend_count_lock_
572  //   .. running ..                               | Block wait for all threads to pass a barrier
573  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
574  // suspend code.                                 |   .. blocked ..
575  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
576  // x: Acquire thread_suspend_count_lock_         |   .. running ..
577  // while Thread::suspend_count_ > 0              |   .. running ..
578  //   - wait on Thread::resume_cond_              |   .. running ..
579  //     (releases thread_suspend_count_lock_)     |   .. running ..
580  //   .. waiting ..                               | Request thread resumption by:
581  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
582  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
583  //   .. waiting ..                               |     all mutator threads
584  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
585  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
586  // Release thread_suspend_count_lock_            |  .. running ..
587  // Change to kRunnable                           |  .. running ..
588  //  - this uses a CAS operation to ensure the    |  .. running ..
589  //    suspend request flag isn't raised as the   |  .. running ..
590  //    state is changed                           |  .. running ..
591  //  - if the CAS operation fails then goto x     |  .. running ..
592  //  .. running ..                                |  .. running ..
593  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
594
595  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
596  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
597
598  // Guards shutdown of the runtime.
599  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
600
601  // Guards background profiler global state.
602  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
603
604  // Guards trace (ie traceview) requests.
605  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
606
607  // Guards debugger recent allocation records.
608  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
609
610  // Guards updates to instrumentation to ensure mutual exclusion of
611  // events like deoptimization requests.
612  // TODO: improve name, perhaps instrumentation_update_lock_.
613  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
614
615  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
616  // attaching and detaching.
617  static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
618
619  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
620  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
621
622  // Guards maintaining loading library data structures.
623  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
624
625  // Guards breakpoints.
626  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
627
628  // Guards lists of classes within the class linker.
629  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
630
631  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
632  // doesn't try to hold a higher level Mutex.
633  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
634
635  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
636
637  // Guard the allocation/deallocation of thread ids.
638  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
639
640  // Guards modification of the LDT on x86.
641  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
642
643  // Guards intern table.
644  static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
645
646  // Guards reference processor.
647  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
648
649  // Guards cleared references queue.
650  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
651
652  // Guards weak references queue.
653  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
654
655  // Guards finalizer references queue.
656  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
657
658  // Guards phantom references queue.
659  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
660
661  // Guards soft references queue.
662  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
663
664  // Have an exclusive aborting thread.
665  static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
666
667  // Allow mutual exclusion when manipulating Thread::suspend_count_.
668  // TODO: Does the trade-off of a per-thread lock make sense?
669  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
670
671  // One unexpected signal at a time lock.
672  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
673
674  // Guards the maps in mem_map.
675  static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
676
677  // Have an exclusive logging thread.
678  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
679
680  // Allow reader-writer mutual exclusion on the boxed table of lambda objects.
681  // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
682  static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
683};
684
685class Roles {
686 public:
687  // Uninterruptible means that the thread may not become suspended.
688  static Uninterruptible uninterruptible_;
689};
690
691}  // namespace art
692
693#endif  // ART_RUNTIME_BASE_MUTEX_H_
694