mutex.h revision 4e2cb098017bf073335ebb02b1bc0a36828cd720
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class SHARED_LOCKABLE ReaderWriterMutex;
47class SHARED_LOCKABLE MutatorMutex;
48class ScopedContentionRecorder;
49class Thread;
50
51// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
52// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
53// partial ordering and thereby cause deadlock situations to fail checks.
54//
55// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
56enum LockLevel {
57  kLoggingLock = 0,
58  kMemMapsLock,
59  kSwapMutexesLock,
60  kUnexpectedSignalLock,
61  kThreadSuspendCountLock,
62  kAbortLock,
63  kLambdaTableLock,
64  kJdwpSocketLock,
65  kRegionSpaceRegionLock,
66  kTransactionLogLock,
67  kReferenceQueueSoftReferencesLock,
68  kReferenceQueuePhantomReferencesLock,
69  kReferenceQueueFinalizerReferencesLock,
70  kReferenceQueueWeakReferencesLock,
71  kReferenceQueueClearedReferencesLock,
72  kReferenceProcessorLock,
73  kJitCodeCacheLock,
74  kRosAllocGlobalLock,
75  kRosAllocBracketLock,
76  kRosAllocBulkFreeLock,
77  kAllocSpaceLock,
78  kBumpPointerSpaceBlockLock,
79  kArenaPoolLock,
80  kDexFileMethodInlinerLock,
81  kDexFileToMethodInlinerMapLock,
82  kMarkSweepMarkStackLock,
83  kInternTableLock,
84  kOatFileSecondaryLookupLock,
85  kTracingUniqueMethodsLock,
86  kTracingStreamingLock,
87  kDefaultMutexLevel,
88  kMarkSweepLargeObjectLock,
89  kPinTableLock,
90  kJdwpObjectRegistryLock,
91  kModifyLdtLock,
92  kAllocatedThreadIdsLock,
93  kMonitorPoolLock,
94  kMethodVerifiersLock,
95  kClassLinkerClassesLock,
96  kBreakpointLock,
97  kMonitorLock,
98  kMonitorListLock,
99  kJniLoadLibraryLock,
100  kThreadListLock,
101  kAllocTrackerLock,
102  kDeoptimizationLock,
103  kProfilerLock,
104  kJdwpShutdownLock,
105  kJdwpEventListLock,
106  kJdwpAttachLock,
107  kJdwpStartLock,
108  kRuntimeShutdownLock,
109  kTraceLock,
110  kHeapBitmapLock,
111  kMutatorLock,
112  kInstrumentEntrypointsLock,
113  kZygoteCreationLock,
114
115  kLockLevelCount  // Must come last.
116};
117std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
118
119const bool kDebugLocking = kIsDebugBuild;
120
121// Record Log contention information, dumpable via SIGQUIT.
122#ifdef ART_USE_FUTEXES
123// To enable lock contention logging, set this to true.
124const bool kLogLockContentions = false;
125#else
126// Keep this false as lock contention logging is supported only with
127// futex.
128const bool kLogLockContentions = false;
129#endif
130const size_t kContentionLogSize = 4;
131const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
132const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
133
134// Base class for all Mutex implementations
135class BaseMutex {
136 public:
137  const char* GetName() const {
138    return name_;
139  }
140
141  virtual bool IsMutex() const { return false; }
142  virtual bool IsReaderWriterMutex() const { return false; }
143  virtual bool IsMutatorMutex() const { return false; }
144
145  virtual void Dump(std::ostream& os) const = 0;
146
147  static void DumpAll(std::ostream& os);
148
149 protected:
150  friend class ConditionVariable;
151
152  BaseMutex(const char* name, LockLevel level);
153  virtual ~BaseMutex();
154  void RegisterAsLocked(Thread* self);
155  void RegisterAsUnlocked(Thread* self);
156  void CheckSafeToWait(Thread* self);
157
158  friend class ScopedContentionRecorder;
159
160  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
161  void DumpContention(std::ostream& os) const;
162
163  const LockLevel level_;  // Support for lock hierarchy.
164  const char* const name_;
165
166  // A log entry that records contention but makes no guarantee that either tid will be held live.
167  struct ContentionLogEntry {
168    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
169    uint64_t blocked_tid;
170    uint64_t owner_tid;
171    AtomicInteger count;
172  };
173  struct ContentionLogData {
174    ContentionLogEntry contention_log[kContentionLogSize];
175    // The next entry in the contention log to be updated. Value ranges from 0 to
176    // kContentionLogSize - 1.
177    AtomicInteger cur_content_log_entry;
178    // Number of times the Mutex has been contended.
179    AtomicInteger contention_count;
180    // Sum of time waited by all contenders in ns.
181    Atomic<uint64_t> wait_time;
182    void AddToWaitTime(uint64_t value);
183    ContentionLogData() : wait_time(0) {}
184  };
185  ContentionLogData contention_log_data_[kContentionLogDataSize];
186
187 public:
188  bool HasEverContended() const {
189    if (kLogLockContentions) {
190      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
191    }
192    return false;
193  }
194};
195
196// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
197// exclusive access to what it guards. A Mutex can be in one of two states:
198// - Free - not owned by any thread,
199// - Exclusive - owned by a single thread.
200//
201// The effect of locking and unlocking operations on the state is:
202// State     | ExclusiveLock | ExclusiveUnlock
203// -------------------------------------------
204// Free      | Exclusive     | error
205// Exclusive | Block*        | Free
206// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
207//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
208std::ostream& operator<<(std::ostream& os, const Mutex& mu);
209class LOCKABLE Mutex : public BaseMutex {
210 public:
211  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
212  ~Mutex();
213
214  virtual bool IsMutex() const { return true; }
215
216  // Block until mutex is free then acquire exclusive access.
217  void ExclusiveLock(Thread* self) ACQUIRE();
218  void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
219
220  // Returns true if acquires exclusive access, false otherwise.
221  bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
222  bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
223
224  // Release exclusive access.
225  void ExclusiveUnlock(Thread* self) RELEASE();
226  void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
227
228  // Is the current thread the exclusive holder of the Mutex.
229  bool IsExclusiveHeld(const Thread* self) const;
230
231  // Assert that the Mutex is exclusively held by the current thread.
232  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
233    if (kDebugLocking && (gAborting == 0)) {
234      CHECK(IsExclusiveHeld(self)) << *this;
235    }
236  }
237  void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
238
239  // Assert that the Mutex is not held by the current thread.
240  void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
241    if (kDebugLocking && (gAborting == 0)) {
242      CHECK(!IsExclusiveHeld(self)) << *this;
243    }
244  }
245  void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
246    AssertNotHeldExclusive(self);
247  }
248
249  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
250  // than the owner.
251  uint64_t GetExclusiveOwnerTid() const;
252
253  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
254  unsigned int GetDepth() const {
255    return recursion_count_;
256  }
257
258  virtual void Dump(std::ostream& os) const;
259
260  // For negative capabilities in clang annotations.
261  const Mutex& operator!() const { return *this; }
262
263 private:
264#if ART_USE_FUTEXES
265  // 0 is unheld, 1 is held.
266  AtomicInteger state_;
267  // Exclusive owner.
268  volatile uint64_t exclusive_owner_;
269  // Number of waiting contenders.
270  AtomicInteger num_contenders_;
271#else
272  pthread_mutex_t mutex_;
273  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
274#endif
275  const bool recursive_;  // Can the lock be recursively held?
276  unsigned int recursion_count_;
277  friend class ConditionVariable;
278  DISALLOW_COPY_AND_ASSIGN(Mutex);
279};
280
281// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
282// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
283// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
284// condition variable. A ReaderWriterMutex can be in one of three states:
285// - Free - not owned by any thread,
286// - Exclusive - owned by a single thread,
287// - Shared(n) - shared amongst n threads.
288//
289// The effect of locking and unlocking operations on the state is:
290//
291// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
292// ----------------------------------------------------------------------------
293// Free      | Exclusive     | error           | SharedLock(1)    | error
294// Exclusive | Block         | Free            | Block            | error
295// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
296// * for large values of n the SharedLock may block.
297std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
298class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
299 public:
300  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
301  ~ReaderWriterMutex();
302
303  virtual bool IsReaderWriterMutex() const { return true; }
304
305  // Block until ReaderWriterMutex is free then acquire exclusive access.
306  void ExclusiveLock(Thread* self) ACQUIRE();
307  void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
308
309  // Release exclusive access.
310  void ExclusiveUnlock(Thread* self) RELEASE();
311  void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
312
313  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
314  // or false if timeout is reached.
315#if HAVE_TIMED_RWLOCK
316  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
317      EXCLUSIVE_TRYLOCK_FUNCTION(true);
318#endif
319
320  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
321  void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
322  void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
323
324  // Try to acquire share of ReaderWriterMutex.
325  bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
326
327  // Release a share of the access.
328  void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
329  void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
330
331  // Is the current thread the exclusive holder of the ReaderWriterMutex.
332  bool IsExclusiveHeld(const Thread* self) const;
333
334  // Assert the current thread has exclusive access to the ReaderWriterMutex.
335  void AssertExclusiveHeld(const Thread* self) {
336    if (kDebugLocking && (gAborting == 0)) {
337      CHECK(IsExclusiveHeld(self)) << *this;
338    }
339  }
340  void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
341
342  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
343  void AssertNotExclusiveHeld(const Thread* self) {
344    if (kDebugLocking && (gAborting == 0)) {
345      CHECK(!IsExclusiveHeld(self)) << *this;
346    }
347  }
348  void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
349
350  // Is the current thread a shared holder of the ReaderWriterMutex.
351  bool IsSharedHeld(const Thread* self) const;
352
353  // Assert the current thread has shared access to the ReaderWriterMutex.
354  void AssertSharedHeld(const Thread* self) {
355    if (kDebugLocking && (gAborting == 0)) {
356      // TODO: we can only assert this well when self != null.
357      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
358    }
359  }
360  void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
361
362  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
363  // mode.
364  void AssertNotHeld(const Thread* self) {
365    if (kDebugLocking && (gAborting == 0)) {
366      CHECK(!IsSharedHeld(self)) << *this;
367    }
368  }
369
370  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
371  // than the owner.
372  uint64_t GetExclusiveOwnerTid() const;
373
374  virtual void Dump(std::ostream& os) const;
375
376  // For negative capabilities in clang annotations.
377  const ReaderWriterMutex& operator!() const { return *this; }
378
379 private:
380#if ART_USE_FUTEXES
381  // Out-of-inline path for handling contention for a SharedLock.
382  void HandleSharedLockContention(Thread* self, int32_t cur_state);
383
384  // -1 implies held exclusive, +ve shared held by state_ many owners.
385  AtomicInteger state_;
386  // Exclusive owner. Modification guarded by this mutex.
387  volatile uint64_t exclusive_owner_;
388  // Number of contenders waiting for a reader share.
389  AtomicInteger num_pending_readers_;
390  // Number of contenders waiting to be the writer.
391  AtomicInteger num_pending_writers_;
392#else
393  pthread_rwlock_t rwlock_;
394  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
395#endif
396  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
397};
398
399// MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
400// Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
401// thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
402// held by any mutator threads. However, a thread in the kRunnable state is considered to have
403// shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
404// state have associated implications on lock ownership. Extra methods to handle the state
405// transitions have been added to the interface but are only accessible to the methods dealing
406// with state transitions. The thread state and flags attributes are used to ensure thread state
407// transitions are consistent with the permitted behaviour of the mutex.
408//
409// *) The most important consequence of this behaviour is that all threads must be in one of the
410// suspended states before exclusive ownership of the mutator mutex is sought.
411//
412std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
413class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
414 public:
415  explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
416    : ReaderWriterMutex(name, level) {}
417  ~MutatorMutex() {}
418
419  virtual bool IsMutatorMutex() const { return true; }
420
421  // For negative capabilities in clang annotations.
422  const MutatorMutex& operator!() const { return *this; }
423
424 private:
425  friend class Thread;
426  void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
427  void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
428
429  DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
430};
431
432// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
433// (Signal) or all at once (Broadcast).
434class ConditionVariable {
435 public:
436  explicit ConditionVariable(const char* name, Mutex& mutex);
437  ~ConditionVariable();
438
439  void Broadcast(Thread* self);
440  void Signal(Thread* self);
441  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
442  //       pointer copy, thereby defeating annotalysis.
443  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
444  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
445  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
446  // when waiting.
447  // TODO: remove this.
448  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
449
450 private:
451  const char* const name_;
452  // The Mutex being used by waiters. It is an error to mix condition variables between different
453  // Mutexes.
454  Mutex& guard_;
455#if ART_USE_FUTEXES
456  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
457  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
458  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
459  // without guard_ held.
460  AtomicInteger sequence_;
461  // Number of threads that have come into to wait, not the length of the waiters on the futex as
462  // waiters may have been requeued onto guard_. Guarded by guard_.
463  volatile int32_t num_waiters_;
464#else
465  pthread_cond_t cond_;
466#endif
467  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
468};
469
470// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
471// upon destruction.
472class SCOPED_CAPABILITY MutexLock {
473 public:
474  explicit MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
475    mu_.ExclusiveLock(self_);
476  }
477
478  ~MutexLock() RELEASE() {
479    mu_.ExclusiveUnlock(self_);
480  }
481
482 private:
483  Thread* const self_;
484  Mutex& mu_;
485  DISALLOW_COPY_AND_ASSIGN(MutexLock);
486};
487// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
488#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
489
490// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
491// construction and releases it upon destruction.
492class SCOPED_CAPABILITY ReaderMutexLock {
493 public:
494  explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) :
495      self_(self), mu_(mu) {
496    mu_.SharedLock(self_);
497  }
498
499  ~ReaderMutexLock() RELEASE() {
500    mu_.SharedUnlock(self_);
501  }
502
503 private:
504  Thread* const self_;
505  ReaderWriterMutex& mu_;
506  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
507};
508// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
509// "ReaderMutexLock mu(lock)".
510#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
511
512// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
513// construction and releases it upon destruction.
514class SCOPED_CAPABILITY WriterMutexLock {
515 public:
516  explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
517      self_(self), mu_(mu) {
518    mu_.ExclusiveLock(self_);
519  }
520
521  ~WriterMutexLock() UNLOCK_FUNCTION() {
522    mu_.ExclusiveUnlock(self_);
523  }
524
525 private:
526  Thread* const self_;
527  ReaderWriterMutex& mu_;
528  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
529};
530// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
531// "WriterMutexLock mu(lock)".
532#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
533
534// For StartNoThreadSuspension and EndNoThreadSuspension.
535class CAPABILITY("role") Role {
536 public:
537  void Acquire() ACQUIRE() {}
538  void Release() RELEASE() {}
539  const Role& operator!() const { return *this; }
540};
541
542class Uninterruptible : public Role {
543};
544
545// Global mutexes corresponding to the levels above.
546class Locks {
547 public:
548  static void Init();
549  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
550  // Guards allocation entrypoint instrumenting.
551  static Mutex* instrument_entrypoints_lock_;
552
553  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
554  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
555  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
556  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
557  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
558  //
559  // Thread suspension:
560  // mutator thread                                | GC/Debugger
561  //   .. running ..                               |   .. running ..
562  //   .. running ..                               | Request thread suspension by:
563  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
564  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
565  //   .. running ..                               |     all mutator threads
566  //   .. running ..                               |   - releasing thread_suspend_count_lock_
567  //   .. running ..                               | Block wait for all threads to pass a barrier
568  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
569  // suspend code.                                 |   .. blocked ..
570  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
571  // x: Acquire thread_suspend_count_lock_         |   .. running ..
572  // while Thread::suspend_count_ > 0              |   .. running ..
573  //   - wait on Thread::resume_cond_              |   .. running ..
574  //     (releases thread_suspend_count_lock_)     |   .. running ..
575  //   .. waiting ..                               | Request thread resumption by:
576  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
577  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
578  //   .. waiting ..                               |     all mutator threads
579  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
580  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
581  // Release thread_suspend_count_lock_            |  .. running ..
582  // Change to kRunnable                           |  .. running ..
583  //  - this uses a CAS operation to ensure the    |  .. running ..
584  //    suspend request flag isn't raised as the   |  .. running ..
585  //    state is changed                           |  .. running ..
586  //  - if the CAS operation fails then goto x     |  .. running ..
587  //  .. running ..                                |  .. running ..
588  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
589
590  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
591  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
592
593  // Guards shutdown of the runtime.
594  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
595
596  // Guards background profiler global state.
597  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
598
599  // Guards trace (ie traceview) requests.
600  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
601
602  // Guards debugger recent allocation records.
603  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
604
605  // Guards updates to instrumentation to ensure mutual exclusion of
606  // events like deoptimization requests.
607  // TODO: improve name, perhaps instrumentation_update_lock_.
608  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
609
610  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
611  // attaching and detaching.
612  static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
613
614  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
615  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
616
617  // Guards maintaining loading library data structures.
618  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
619
620  // Guards breakpoints.
621  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
622
623  // Guards lists of classes within the class linker.
624  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
625
626  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
627  // doesn't try to hold a higher level Mutex.
628  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
629
630  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
631
632  // Guard the allocation/deallocation of thread ids.
633  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
634
635  // Guards modification of the LDT on x86.
636  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
637
638  // Guards intern table.
639  static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
640
641  // Guards reference processor.
642  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
643
644  // Guards cleared references queue.
645  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
646
647  // Guards weak references queue.
648  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
649
650  // Guards finalizer references queue.
651  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
652
653  // Guards phantom references queue.
654  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
655
656  // Guards soft references queue.
657  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
658
659  // Have an exclusive aborting thread.
660  static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
661
662  // Allow mutual exclusion when manipulating Thread::suspend_count_.
663  // TODO: Does the trade-off of a per-thread lock make sense?
664  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
665
666  // One unexpected signal at a time lock.
667  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
668
669  // Guards the maps in mem_map.
670  static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
671
672  // Have an exclusive logging thread.
673  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
674
675  // Allow reader-writer mutual exclusion on the boxed table of lambda objects.
676  // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
677  static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
678};
679
680class Roles {
681 public:
682  static Uninterruptible uninterruptible_;
683};
684
685}  // namespace art
686
687#endif  // ART_RUNTIME_BASE_MUTEX_H_
688