mutex.h revision 457e874459ae638145cab6d572e34d48480e39d2
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class SHARED_LOCKABLE ReaderWriterMutex;
47class SHARED_LOCKABLE MutatorMutex;
48class ScopedContentionRecorder;
49class Thread;
50
51// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
52// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
53// partial ordering and thereby cause deadlock situations to fail checks.
54//
55// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
56enum LockLevel {
57  kLoggingLock = 0,
58  kMemMapsLock,
59  kSwapMutexesLock,
60  kUnexpectedSignalLock,
61  kThreadSuspendCountLock,
62  kAbortLock,
63  kLambdaClassTableLock,
64  kLambdaTableLock,
65  kJdwpSocketLock,
66  kRegionSpaceRegionLock,
67  kTransactionLogLock,
68  kMarkSweepMarkStackLock,
69  kJniWeakGlobalsLock,
70  kReferenceQueueSoftReferencesLock,
71  kReferenceQueuePhantomReferencesLock,
72  kReferenceQueueFinalizerReferencesLock,
73  kReferenceQueueWeakReferencesLock,
74  kReferenceQueueClearedReferencesLock,
75  kReferenceProcessorLock,
76  kJitCodeCacheLock,
77  kRosAllocGlobalLock,
78  kRosAllocBracketLock,
79  kRosAllocBulkFreeLock,
80  kAllocSpaceLock,
81  kBumpPointerSpaceBlockLock,
82  kArenaPoolLock,
83  kDexFileMethodInlinerLock,
84  kDexFileToMethodInlinerMapLock,
85  kInternTableLock,
86  kOatFileSecondaryLookupLock,
87  kOatFileCountLock,
88  kOatFileManagerLock,
89  kTracingUniqueMethodsLock,
90  kTracingStreamingLock,
91  kDefaultMutexLevel,
92  kMarkSweepLargeObjectLock,
93  kPinTableLock,
94  kJdwpObjectRegistryLock,
95  kModifyLdtLock,
96  kAllocatedThreadIdsLock,
97  kMonitorPoolLock,
98  kMethodVerifiersLock,
99  kClassLinkerClassesLock,
100  kBreakpointLock,
101  kMonitorLock,
102  kMonitorListLock,
103  kJniLoadLibraryLock,
104  kThreadListLock,
105  kInterpreterStringInitMapLock,
106  kAllocTrackerLock,
107  kDeoptimizationLock,
108  kProfilerLock,
109  kJdwpShutdownLock,
110  kJdwpEventListLock,
111  kJdwpAttachLock,
112  kJdwpStartLock,
113  kRuntimeShutdownLock,
114  kTraceLock,
115  kHeapBitmapLock,
116  kMutatorLock,
117  kInstrumentEntrypointsLock,
118  kZygoteCreationLock,
119
120  kLockLevelCount  // Must come last.
121};
122std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
123
124const bool kDebugLocking = kIsDebugBuild;
125
126// Record Log contention information, dumpable via SIGQUIT.
127#ifdef ART_USE_FUTEXES
128// To enable lock contention logging, set this to true.
129const bool kLogLockContentions = false;
130#else
131// Keep this false as lock contention logging is supported only with
132// futex.
133const bool kLogLockContentions = false;
134#endif
135const size_t kContentionLogSize = 4;
136const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
137const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
138
139// Base class for all Mutex implementations
140class BaseMutex {
141 public:
142  const char* GetName() const {
143    return name_;
144  }
145
146  virtual bool IsMutex() const { return false; }
147  virtual bool IsReaderWriterMutex() const { return false; }
148  virtual bool IsMutatorMutex() const { return false; }
149
150  virtual void Dump(std::ostream& os) const = 0;
151
152  static void DumpAll(std::ostream& os);
153
154 protected:
155  friend class ConditionVariable;
156
157  BaseMutex(const char* name, LockLevel level);
158  virtual ~BaseMutex();
159  void RegisterAsLocked(Thread* self);
160  void RegisterAsUnlocked(Thread* self);
161  void CheckSafeToWait(Thread* self);
162
163  friend class ScopedContentionRecorder;
164
165  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
166  void DumpContention(std::ostream& os) const;
167
168  const LockLevel level_;  // Support for lock hierarchy.
169  const char* const name_;
170
171  // A log entry that records contention but makes no guarantee that either tid will be held live.
172  struct ContentionLogEntry {
173    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
174    uint64_t blocked_tid;
175    uint64_t owner_tid;
176    AtomicInteger count;
177  };
178  struct ContentionLogData {
179    ContentionLogEntry contention_log[kContentionLogSize];
180    // The next entry in the contention log to be updated. Value ranges from 0 to
181    // kContentionLogSize - 1.
182    AtomicInteger cur_content_log_entry;
183    // Number of times the Mutex has been contended.
184    AtomicInteger contention_count;
185    // Sum of time waited by all contenders in ns.
186    Atomic<uint64_t> wait_time;
187    void AddToWaitTime(uint64_t value);
188    ContentionLogData() : wait_time(0) {}
189  };
190  ContentionLogData contention_log_data_[kContentionLogDataSize];
191
192 public:
193  bool HasEverContended() const {
194    if (kLogLockContentions) {
195      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
196    }
197    return false;
198  }
199};
200
201// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
202// exclusive access to what it guards. A Mutex can be in one of two states:
203// - Free - not owned by any thread,
204// - Exclusive - owned by a single thread.
205//
206// The effect of locking and unlocking operations on the state is:
207// State     | ExclusiveLock | ExclusiveUnlock
208// -------------------------------------------
209// Free      | Exclusive     | error
210// Exclusive | Block*        | Free
211// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
212//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
213std::ostream& operator<<(std::ostream& os, const Mutex& mu);
214class LOCKABLE Mutex : public BaseMutex {
215 public:
216  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
217  ~Mutex();
218
219  virtual bool IsMutex() const { return true; }
220
221  // Block until mutex is free then acquire exclusive access.
222  void ExclusiveLock(Thread* self) ACQUIRE();
223  void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
224
225  // Returns true if acquires exclusive access, false otherwise.
226  bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
227  bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
228
229  // Release exclusive access.
230  void ExclusiveUnlock(Thread* self) RELEASE();
231  void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
232
233  // Is the current thread the exclusive holder of the Mutex.
234  bool IsExclusiveHeld(const Thread* self) const;
235
236  // Assert that the Mutex is exclusively held by the current thread.
237  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
238    if (kDebugLocking && (gAborting == 0)) {
239      CHECK(IsExclusiveHeld(self)) << *this;
240    }
241  }
242  void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
243
244  // Assert that the Mutex is not held by the current thread.
245  void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
246    if (kDebugLocking && (gAborting == 0)) {
247      CHECK(!IsExclusiveHeld(self)) << *this;
248    }
249  }
250  void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
251    AssertNotHeldExclusive(self);
252  }
253
254  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
255  // than the owner.
256  uint64_t GetExclusiveOwnerTid() const;
257
258  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
259  unsigned int GetDepth() const {
260    return recursion_count_;
261  }
262
263  virtual void Dump(std::ostream& os) const;
264
265  // For negative capabilities in clang annotations.
266  const Mutex& operator!() const { return *this; }
267
268 private:
269#if ART_USE_FUTEXES
270  // 0 is unheld, 1 is held.
271  AtomicInteger state_;
272  // Exclusive owner.
273  volatile uint64_t exclusive_owner_;
274  // Number of waiting contenders.
275  AtomicInteger num_contenders_;
276#else
277  pthread_mutex_t mutex_;
278  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
279#endif
280  const bool recursive_;  // Can the lock be recursively held?
281  unsigned int recursion_count_;
282  friend class ConditionVariable;
283  DISALLOW_COPY_AND_ASSIGN(Mutex);
284};
285
286// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
287// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
288// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
289// condition variable. A ReaderWriterMutex can be in one of three states:
290// - Free - not owned by any thread,
291// - Exclusive - owned by a single thread,
292// - Shared(n) - shared amongst n threads.
293//
294// The effect of locking and unlocking operations on the state is:
295//
296// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
297// ----------------------------------------------------------------------------
298// Free      | Exclusive     | error           | SharedLock(1)    | error
299// Exclusive | Block         | Free            | Block            | error
300// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
301// * for large values of n the SharedLock may block.
302std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
303class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
304 public:
305  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
306  ~ReaderWriterMutex();
307
308  virtual bool IsReaderWriterMutex() const { return true; }
309
310  // Block until ReaderWriterMutex is free then acquire exclusive access.
311  void ExclusiveLock(Thread* self) ACQUIRE();
312  void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
313
314  // Release exclusive access.
315  void ExclusiveUnlock(Thread* self) RELEASE();
316  void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
317
318  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
319  // or false if timeout is reached.
320#if HAVE_TIMED_RWLOCK
321  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
322      EXCLUSIVE_TRYLOCK_FUNCTION(true);
323#endif
324
325  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
326  void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
327  void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
328
329  // Try to acquire share of ReaderWriterMutex.
330  bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
331
332  // Release a share of the access.
333  void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
334  void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
335
336  // Is the current thread the exclusive holder of the ReaderWriterMutex.
337  bool IsExclusiveHeld(const Thread* self) const;
338
339  // Assert the current thread has exclusive access to the ReaderWriterMutex.
340  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
341    if (kDebugLocking && (gAborting == 0)) {
342      CHECK(IsExclusiveHeld(self)) << *this;
343    }
344  }
345  void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
346
347  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
348  void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
349    if (kDebugLocking && (gAborting == 0)) {
350      CHECK(!IsExclusiveHeld(self)) << *this;
351    }
352  }
353  void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
354    AssertNotExclusiveHeld(self);
355  }
356
357  // Is the current thread a shared holder of the ReaderWriterMutex.
358  bool IsSharedHeld(const Thread* self) const;
359
360  // Assert the current thread has shared access to the ReaderWriterMutex.
361  void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
362    if (kDebugLocking && (gAborting == 0)) {
363      // TODO: we can only assert this well when self != null.
364      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
365    }
366  }
367  void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
368    AssertSharedHeld(self);
369  }
370
371  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
372  // mode.
373  void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
374    if (kDebugLocking && (gAborting == 0)) {
375      CHECK(!IsSharedHeld(self)) << *this;
376    }
377  }
378
379  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
380  // than the owner.
381  uint64_t GetExclusiveOwnerTid() const;
382
383  virtual void Dump(std::ostream& os) const;
384
385  // For negative capabilities in clang annotations.
386  const ReaderWriterMutex& operator!() const { return *this; }
387
388 private:
389#if ART_USE_FUTEXES
390  // Out-of-inline path for handling contention for a SharedLock.
391  void HandleSharedLockContention(Thread* self, int32_t cur_state);
392
393  // -1 implies held exclusive, +ve shared held by state_ many owners.
394  AtomicInteger state_;
395  // Exclusive owner. Modification guarded by this mutex.
396  volatile uint64_t exclusive_owner_;
397  // Number of contenders waiting for a reader share.
398  AtomicInteger num_pending_readers_;
399  // Number of contenders waiting to be the writer.
400  AtomicInteger num_pending_writers_;
401#else
402  pthread_rwlock_t rwlock_;
403  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
404#endif
405  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
406};
407
408// MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
409// Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
410// thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
411// held by any mutator threads. However, a thread in the kRunnable state is considered to have
412// shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
413// state have associated implications on lock ownership. Extra methods to handle the state
414// transitions have been added to the interface but are only accessible to the methods dealing
415// with state transitions. The thread state and flags attributes are used to ensure thread state
416// transitions are consistent with the permitted behaviour of the mutex.
417//
418// *) The most important consequence of this behaviour is that all threads must be in one of the
419// suspended states before exclusive ownership of the mutator mutex is sought.
420//
421std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
422class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
423 public:
424  explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
425    : ReaderWriterMutex(name, level) {}
426  ~MutatorMutex() {}
427
428  virtual bool IsMutatorMutex() const { return true; }
429
430  // For negative capabilities in clang annotations.
431  const MutatorMutex& operator!() const { return *this; }
432
433 private:
434  friend class Thread;
435  void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
436  void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
437
438  DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
439};
440
441// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
442// (Signal) or all at once (Broadcast).
443class ConditionVariable {
444 public:
445  ConditionVariable(const char* name, Mutex& mutex);
446  ~ConditionVariable();
447
448  void Broadcast(Thread* self);
449  void Signal(Thread* self);
450  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
451  //       pointer copy, thereby defeating annotalysis.
452  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
453  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
454  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
455  // when waiting.
456  // TODO: remove this.
457  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
458
459 private:
460  const char* const name_;
461  // The Mutex being used by waiters. It is an error to mix condition variables between different
462  // Mutexes.
463  Mutex& guard_;
464#if ART_USE_FUTEXES
465  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
466  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
467  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
468  // without guard_ held.
469  AtomicInteger sequence_;
470  // Number of threads that have come into to wait, not the length of the waiters on the futex as
471  // waiters may have been requeued onto guard_. Guarded by guard_.
472  volatile int32_t num_waiters_;
473#else
474  pthread_cond_t cond_;
475#endif
476  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
477};
478
479// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
480// upon destruction.
481class SCOPED_CAPABILITY MutexLock {
482 public:
483  MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
484    mu_.ExclusiveLock(self_);
485  }
486
487  ~MutexLock() RELEASE() {
488    mu_.ExclusiveUnlock(self_);
489  }
490
491 private:
492  Thread* const self_;
493  Mutex& mu_;
494  DISALLOW_COPY_AND_ASSIGN(MutexLock);
495};
496// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
497#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
498
499// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
500// construction and releases it upon destruction.
501class SCOPED_CAPABILITY ReaderMutexLock {
502 public:
503  ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) :
504      self_(self), mu_(mu) {
505    mu_.SharedLock(self_);
506  }
507
508  ~ReaderMutexLock() RELEASE() {
509    mu_.SharedUnlock(self_);
510  }
511
512 private:
513  Thread* const self_;
514  ReaderWriterMutex& mu_;
515  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
516};
517// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
518// "ReaderMutexLock mu(lock)".
519#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
520
521// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
522// construction and releases it upon destruction.
523class SCOPED_CAPABILITY WriterMutexLock {
524 public:
525  WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
526      self_(self), mu_(mu) {
527    mu_.ExclusiveLock(self_);
528  }
529
530  ~WriterMutexLock() UNLOCK_FUNCTION() {
531    mu_.ExclusiveUnlock(self_);
532  }
533
534 private:
535  Thread* const self_;
536  ReaderWriterMutex& mu_;
537  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
538};
539// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
540// "WriterMutexLock mu(lock)".
541#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
542
543// For StartNoThreadSuspension and EndNoThreadSuspension.
544class CAPABILITY("role") Role {
545 public:
546  void Acquire() ACQUIRE() {}
547  void Release() RELEASE() {}
548  const Role& operator!() const { return *this; }
549};
550
551class Uninterruptible : public Role {
552};
553
554// Global mutexes corresponding to the levels above.
555class Locks {
556 public:
557  static void Init();
558  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
559  // Guards allocation entrypoint instrumenting.
560  static Mutex* instrument_entrypoints_lock_;
561
562  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
563  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
564  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
565  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
566  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
567  //
568  // Thread suspension:
569  // mutator thread                                | GC/Debugger
570  //   .. running ..                               |   .. running ..
571  //   .. running ..                               | Request thread suspension by:
572  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
573  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
574  //   .. running ..                               |     all mutator threads
575  //   .. running ..                               |   - releasing thread_suspend_count_lock_
576  //   .. running ..                               | Block wait for all threads to pass a barrier
577  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
578  // suspend code.                                 |   .. blocked ..
579  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
580  // x: Acquire thread_suspend_count_lock_         |   .. running ..
581  // while Thread::suspend_count_ > 0              |   .. running ..
582  //   - wait on Thread::resume_cond_              |   .. running ..
583  //     (releases thread_suspend_count_lock_)     |   .. running ..
584  //   .. waiting ..                               | Request thread resumption by:
585  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
586  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
587  //   .. waiting ..                               |     all mutator threads
588  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
589  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
590  // Release thread_suspend_count_lock_            |  .. running ..
591  // Change to kRunnable                           |  .. running ..
592  //  - this uses a CAS operation to ensure the    |  .. running ..
593  //    suspend request flag isn't raised as the   |  .. running ..
594  //    state is changed                           |  .. running ..
595  //  - if the CAS operation fails then goto x     |  .. running ..
596  //  .. running ..                                |  .. running ..
597  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
598
599  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
600  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
601
602  // Guards shutdown of the runtime.
603  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
604
605  // Guards background profiler global state.
606  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
607
608  // Guards trace (ie traceview) requests.
609  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
610
611  // Guards debugger recent allocation records.
612  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
613
614  // Guards updates to instrumentation to ensure mutual exclusion of
615  // events like deoptimization requests.
616  // TODO: improve name, perhaps instrumentation_update_lock_.
617  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
618
619  // Guards String initializer register map in interpreter.
620  static Mutex* interpreter_string_init_map_lock_ ACQUIRED_AFTER(deoptimization_lock_);
621
622  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
623  // attaching and detaching.
624  static Mutex* thread_list_lock_ ACQUIRED_AFTER(interpreter_string_init_map_lock_);
625
626  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
627  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
628
629  // Guards maintaining loading library data structures.
630  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
631
632  // Guards breakpoints.
633  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
634
635  // Guards lists of classes within the class linker.
636  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
637
638  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
639  // doesn't try to hold a higher level Mutex.
640  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
641
642  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
643
644  // Guard the allocation/deallocation of thread ids.
645  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
646
647  // Guards modification of the LDT on x86.
648  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
649
650  // Guards opened oat files in OatFileManager.
651  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
652
653  // Guards opened oat files in OatFileManager.
654  static ReaderWriterMutex* oat_file_count_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
655
656  // Guards intern table.
657  static Mutex* intern_table_lock_ ACQUIRED_AFTER(oat_file_count_lock_);
658
659  // Guards reference processor.
660  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
661
662  // Guards cleared references queue.
663  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
664
665  // Guards weak references queue.
666  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
667
668  // Guards finalizer references queue.
669  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
670
671  // Guards phantom references queue.
672  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
673
674  // Guards soft references queue.
675  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
676
677  // Have an exclusive aborting thread.
678  static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
679
680  // Allow mutual exclusion when manipulating Thread::suspend_count_.
681  // TODO: Does the trade-off of a per-thread lock make sense?
682  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
683
684  // One unexpected signal at a time lock.
685  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
686
687  // Guards the maps in mem_map.
688  static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
689
690  // Have an exclusive logging thread.
691  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
692
693  // Allow reader-writer mutual exclusion on the boxed table of lambda objects.
694  // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
695  static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
696
697  // Allow reader-writer mutual exclusion on the boxed table of lambda proxy classes.
698  // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
699  static Mutex* lambda_class_table_lock_ ACQUIRED_AFTER(lambda_table_lock_);
700};
701
702class Roles {
703 public:
704  // Uninterruptible means that the thread may not become suspended.
705  static Uninterruptible uninterruptible_;
706};
707
708}  // namespace art
709
710#endif  // ART_RUNTIME_BASE_MUTEX_H_
711