mutex.h revision c8089540ccf0f1c43d8db3828f21d489b28a4013
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class SHARED_LOCKABLE ReaderWriterMutex;
47class SHARED_LOCKABLE MutatorMutex;
48class ScopedContentionRecorder;
49class Thread;
50
51// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
52// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
53// partial ordering and thereby cause deadlock situations to fail checks.
54//
55// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
56enum LockLevel {
57  kLoggingLock = 0,
58  kMemMapsLock,
59  kSwapMutexesLock,
60  kUnexpectedSignalLock,
61  kThreadSuspendCountLock,
62  kAbortLock,
63  kJdwpAdbStateLock,
64  kJdwpSocketLock,
65  kRegionSpaceRegionLock,
66  kRosAllocGlobalLock,
67  kRosAllocBracketLock,
68  kRosAllocBulkFreeLock,
69  kMarkSweepMarkStackLock,
70  kTransactionLogLock,
71  kJniFunctionTableLock,
72  kJniWeakGlobalsLock,
73  kJniGlobalsLock,
74  kReferenceQueueSoftReferencesLock,
75  kReferenceQueuePhantomReferencesLock,
76  kReferenceQueueFinalizerReferencesLock,
77  kReferenceQueueWeakReferencesLock,
78  kReferenceQueueClearedReferencesLock,
79  kReferenceProcessorLock,
80  kJitDebugInterfaceLock,
81  kAllocSpaceLock,
82  kBumpPointerSpaceBlockLock,
83  kArenaPoolLock,
84  kInternTableLock,
85  kOatFileSecondaryLookupLock,
86  kHostDlOpenHandlesLock,
87  kVerifierDepsLock,
88  kOatFileManagerLock,
89  kTracingUniqueMethodsLock,
90  kTracingStreamingLock,
91  kDeoptimizedMethodsLock,
92  kClassLoaderClassesLock,
93  kDefaultMutexLevel,
94  kDexLock,
95  kMarkSweepLargeObjectLock,
96  kJdwpObjectRegistryLock,
97  kModifyLdtLock,
98  kAllocatedThreadIdsLock,
99  kMonitorPoolLock,
100  kClassLinkerClassesLock,  // TODO rename.
101  kJitCodeCacheLock,
102  kCHALock,
103  kBreakpointLock,
104  kMonitorLock,
105  kMonitorListLock,
106  kJniLoadLibraryLock,
107  kThreadListLock,
108  kAllocTrackerLock,
109  kDeoptimizationLock,
110  kProfilerLock,
111  kJdwpShutdownLock,
112  kJdwpEventListLock,
113  kJdwpAttachLock,
114  kJdwpStartLock,
115  kRuntimeShutdownLock,
116  kTraceLock,
117  kHeapBitmapLock,
118  kMutatorLock,
119  kInstrumentEntrypointsLock,
120  kZygoteCreationLock,
121
122  kLockLevelCount  // Must come last.
123};
124std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
125
126const bool kDebugLocking = kIsDebugBuild;
127
128// Record Log contention information, dumpable via SIGQUIT.
129#ifdef ART_USE_FUTEXES
130// To enable lock contention logging, set this to true.
131const bool kLogLockContentions = false;
132#else
133// Keep this false as lock contention logging is supported only with
134// futex.
135const bool kLogLockContentions = false;
136#endif
137const size_t kContentionLogSize = 4;
138const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
139const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
140
141// Base class for all Mutex implementations
142class BaseMutex {
143 public:
144  const char* GetName() const {
145    return name_;
146  }
147
148  virtual bool IsMutex() const { return false; }
149  virtual bool IsReaderWriterMutex() const { return false; }
150  virtual bool IsMutatorMutex() const { return false; }
151
152  virtual void Dump(std::ostream& os) const = 0;
153
154  static void DumpAll(std::ostream& os);
155
156 protected:
157  friend class ConditionVariable;
158
159  BaseMutex(const char* name, LockLevel level);
160  virtual ~BaseMutex();
161  void RegisterAsLocked(Thread* self);
162  void RegisterAsUnlocked(Thread* self);
163  void CheckSafeToWait(Thread* self);
164
165  friend class ScopedContentionRecorder;
166
167  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
168  void DumpContention(std::ostream& os) const;
169
170  const LockLevel level_;  // Support for lock hierarchy.
171  const char* const name_;
172
173  // A log entry that records contention but makes no guarantee that either tid will be held live.
174  struct ContentionLogEntry {
175    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
176    uint64_t blocked_tid;
177    uint64_t owner_tid;
178    AtomicInteger count;
179  };
180  struct ContentionLogData {
181    ContentionLogEntry contention_log[kContentionLogSize];
182    // The next entry in the contention log to be updated. Value ranges from 0 to
183    // kContentionLogSize - 1.
184    AtomicInteger cur_content_log_entry;
185    // Number of times the Mutex has been contended.
186    AtomicInteger contention_count;
187    // Sum of time waited by all contenders in ns.
188    Atomic<uint64_t> wait_time;
189    void AddToWaitTime(uint64_t value);
190    ContentionLogData() : wait_time(0) {}
191  };
192  ContentionLogData contention_log_data_[kContentionLogDataSize];
193
194 public:
195  bool HasEverContended() const {
196    if (kLogLockContentions) {
197      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
198    }
199    return false;
200  }
201};
202
203// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
204// exclusive access to what it guards. A Mutex can be in one of two states:
205// - Free - not owned by any thread,
206// - Exclusive - owned by a single thread.
207//
208// The effect of locking and unlocking operations on the state is:
209// State     | ExclusiveLock | ExclusiveUnlock
210// -------------------------------------------
211// Free      | Exclusive     | error
212// Exclusive | Block*        | Free
213// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
214//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
215std::ostream& operator<<(std::ostream& os, const Mutex& mu);
216class LOCKABLE Mutex : public BaseMutex {
217 public:
218  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
219  ~Mutex();
220
221  virtual bool IsMutex() const { return true; }
222
223  // Block until mutex is free then acquire exclusive access.
224  void ExclusiveLock(Thread* self) ACQUIRE();
225  void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
226
227  // Returns true if acquires exclusive access, false otherwise.
228  bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
229  bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
230
231  // Release exclusive access.
232  void ExclusiveUnlock(Thread* self) RELEASE();
233  void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
234
235  // Is the current thread the exclusive holder of the Mutex.
236  bool IsExclusiveHeld(const Thread* self) const;
237
238  // Assert that the Mutex is exclusively held by the current thread.
239  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
240    if (kDebugLocking && (gAborting == 0)) {
241      CHECK(IsExclusiveHeld(self)) << *this;
242    }
243  }
244  void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
245
246  // Assert that the Mutex is not held by the current thread.
247  void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
248    if (kDebugLocking && (gAborting == 0)) {
249      CHECK(!IsExclusiveHeld(self)) << *this;
250    }
251  }
252  void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
253    AssertNotHeldExclusive(self);
254  }
255
256  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
257  // than the owner.
258  uint64_t GetExclusiveOwnerTid() const;
259
260  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
261  unsigned int GetDepth() const {
262    return recursion_count_;
263  }
264
265  virtual void Dump(std::ostream& os) const;
266
267  // For negative capabilities in clang annotations.
268  const Mutex& operator!() const { return *this; }
269
270 private:
271#if ART_USE_FUTEXES
272  // 0 is unheld, 1 is held.
273  AtomicInteger state_;
274  // Exclusive owner.
275  volatile uint64_t exclusive_owner_;
276  // Number of waiting contenders.
277  AtomicInteger num_contenders_;
278#else
279  pthread_mutex_t mutex_;
280  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
281#endif
282  const bool recursive_;  // Can the lock be recursively held?
283  unsigned int recursion_count_;
284  friend class ConditionVariable;
285  DISALLOW_COPY_AND_ASSIGN(Mutex);
286};
287
288// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
289// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
290// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
291// condition variable. A ReaderWriterMutex can be in one of three states:
292// - Free - not owned by any thread,
293// - Exclusive - owned by a single thread,
294// - Shared(n) - shared amongst n threads.
295//
296// The effect of locking and unlocking operations on the state is:
297//
298// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
299// ----------------------------------------------------------------------------
300// Free      | Exclusive     | error           | SharedLock(1)    | error
301// Exclusive | Block         | Free            | Block            | error
302// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
303// * for large values of n the SharedLock may block.
304std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
305class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
306 public:
307  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
308  ~ReaderWriterMutex();
309
310  virtual bool IsReaderWriterMutex() const { return true; }
311
312  // Block until ReaderWriterMutex is free then acquire exclusive access.
313  void ExclusiveLock(Thread* self) ACQUIRE();
314  void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
315
316  // Release exclusive access.
317  void ExclusiveUnlock(Thread* self) RELEASE();
318  void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
319
320  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
321  // or false if timeout is reached.
322#if HAVE_TIMED_RWLOCK
323  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
324      EXCLUSIVE_TRYLOCK_FUNCTION(true);
325#endif
326
327  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
328  void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
329  void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
330
331  // Try to acquire share of ReaderWriterMutex.
332  bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
333
334  // Release a share of the access.
335  void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
336  void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
337
338  // Is the current thread the exclusive holder of the ReaderWriterMutex.
339  bool IsExclusiveHeld(const Thread* self) const;
340
341  // Assert the current thread has exclusive access to the ReaderWriterMutex.
342  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
343    if (kDebugLocking && (gAborting == 0)) {
344      CHECK(IsExclusiveHeld(self)) << *this;
345    }
346  }
347  void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
348
349  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
350  void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
351    if (kDebugLocking && (gAborting == 0)) {
352      CHECK(!IsExclusiveHeld(self)) << *this;
353    }
354  }
355  void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
356    AssertNotExclusiveHeld(self);
357  }
358
359  // Is the current thread a shared holder of the ReaderWriterMutex.
360  bool IsSharedHeld(const Thread* self) const;
361
362  // Assert the current thread has shared access to the ReaderWriterMutex.
363  void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
364    if (kDebugLocking && (gAborting == 0)) {
365      // TODO: we can only assert this well when self != null.
366      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
367    }
368  }
369  void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
370    AssertSharedHeld(self);
371  }
372
373  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
374  // mode.
375  void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
376    if (kDebugLocking && (gAborting == 0)) {
377      CHECK(!IsSharedHeld(self)) << *this;
378    }
379  }
380
381  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
382  // than the owner.
383  uint64_t GetExclusiveOwnerTid() const;
384
385  virtual void Dump(std::ostream& os) const;
386
387  // For negative capabilities in clang annotations.
388  const ReaderWriterMutex& operator!() const { return *this; }
389
390 private:
391#if ART_USE_FUTEXES
392  // Out-of-inline path for handling contention for a SharedLock.
393  void HandleSharedLockContention(Thread* self, int32_t cur_state);
394
395  // -1 implies held exclusive, +ve shared held by state_ many owners.
396  AtomicInteger state_;
397  // Exclusive owner. Modification guarded by this mutex.
398  volatile uint64_t exclusive_owner_;
399  // Number of contenders waiting for a reader share.
400  AtomicInteger num_pending_readers_;
401  // Number of contenders waiting to be the writer.
402  AtomicInteger num_pending_writers_;
403#else
404  pthread_rwlock_t rwlock_;
405  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
406#endif
407  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
408};
409
410// MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
411// Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
412// thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
413// held by any mutator threads. However, a thread in the kRunnable state is considered to have
414// shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
415// state have associated implications on lock ownership. Extra methods to handle the state
416// transitions have been added to the interface but are only accessible to the methods dealing
417// with state transitions. The thread state and flags attributes are used to ensure thread state
418// transitions are consistent with the permitted behaviour of the mutex.
419//
420// *) The most important consequence of this behaviour is that all threads must be in one of the
421// suspended states before exclusive ownership of the mutator mutex is sought.
422//
423std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
424class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
425 public:
426  explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
427    : ReaderWriterMutex(name, level) {}
428  ~MutatorMutex() {}
429
430  virtual bool IsMutatorMutex() const { return true; }
431
432  // For negative capabilities in clang annotations.
433  const MutatorMutex& operator!() const { return *this; }
434
435 private:
436  friend class Thread;
437  void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
438  void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
439
440  DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
441};
442
443// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
444// (Signal) or all at once (Broadcast).
445class ConditionVariable {
446 public:
447  ConditionVariable(const char* name, Mutex& mutex);
448  ~ConditionVariable();
449
450  void Broadcast(Thread* self);
451  void Signal(Thread* self);
452  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
453  //       pointer copy, thereby defeating annotalysis.
454  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
455  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
456  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
457  // when waiting.
458  // TODO: remove this.
459  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
460
461 private:
462  const char* const name_;
463  // The Mutex being used by waiters. It is an error to mix condition variables between different
464  // Mutexes.
465  Mutex& guard_;
466#if ART_USE_FUTEXES
467  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
468  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
469  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
470  // without guard_ held.
471  AtomicInteger sequence_;
472  // Number of threads that have come into to wait, not the length of the waiters on the futex as
473  // waiters may have been requeued onto guard_. Guarded by guard_.
474  volatile int32_t num_waiters_;
475#else
476  pthread_cond_t cond_;
477#endif
478  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
479};
480
481// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
482// upon destruction.
483class SCOPED_CAPABILITY MutexLock {
484 public:
485  MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
486    mu_.ExclusiveLock(self_);
487  }
488
489  ~MutexLock() RELEASE() {
490    mu_.ExclusiveUnlock(self_);
491  }
492
493 private:
494  Thread* const self_;
495  Mutex& mu_;
496  DISALLOW_COPY_AND_ASSIGN(MutexLock);
497};
498// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
499#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
500
501// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
502// construction and releases it upon destruction.
503class SCOPED_CAPABILITY ReaderMutexLock {
504 public:
505  ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) :
506      self_(self), mu_(mu) {
507    mu_.SharedLock(self_);
508  }
509
510  ~ReaderMutexLock() RELEASE() {
511    mu_.SharedUnlock(self_);
512  }
513
514 private:
515  Thread* const self_;
516  ReaderWriterMutex& mu_;
517  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
518};
519// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
520// "ReaderMutexLock mu(lock)".
521#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
522
523// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
524// construction and releases it upon destruction.
525class SCOPED_CAPABILITY WriterMutexLock {
526 public:
527  WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
528      self_(self), mu_(mu) {
529    mu_.ExclusiveLock(self_);
530  }
531
532  ~WriterMutexLock() UNLOCK_FUNCTION() {
533    mu_.ExclusiveUnlock(self_);
534  }
535
536 private:
537  Thread* const self_;
538  ReaderWriterMutex& mu_;
539  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
540};
541// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
542// "WriterMutexLock mu(lock)".
543#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
544
545// For StartNoThreadSuspension and EndNoThreadSuspension.
546class CAPABILITY("role") Role {
547 public:
548  void Acquire() ACQUIRE() {}
549  void Release() RELEASE() {}
550  const Role& operator!() const { return *this; }
551};
552
553class Uninterruptible : public Role {
554};
555
556// Global mutexes corresponding to the levels above.
557class Locks {
558 public:
559  static void Init();
560  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
561
562  // Destroying various lock types can emit errors that vary depending upon
563  // whether the client (art::Runtime) is currently active.  Allow the client
564  // to set a callback that is used to check when it is acceptable to call
565  // Abort.  The default behavior is that the client *is not* able to call
566  // Abort if no callback is established.
567  using ClientCallback = bool();
568  static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
569  // Checks for whether it is safe to call Abort() without using locks.
570  static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
571
572
573  // Guards allocation entrypoint instrumenting.
574  static Mutex* instrument_entrypoints_lock_;
575
576  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
577  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
578  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
579  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
580  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
581  //
582  // Thread suspension:
583  // mutator thread                                | GC/Debugger
584  //   .. running ..                               |   .. running ..
585  //   .. running ..                               | Request thread suspension by:
586  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
587  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
588  //   .. running ..                               |     all mutator threads
589  //   .. running ..                               |   - releasing thread_suspend_count_lock_
590  //   .. running ..                               | Block wait for all threads to pass a barrier
591  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
592  // suspend code.                                 |   .. blocked ..
593  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
594  // x: Acquire thread_suspend_count_lock_         |   .. running ..
595  // while Thread::suspend_count_ > 0              |   .. running ..
596  //   - wait on Thread::resume_cond_              |   .. running ..
597  //     (releases thread_suspend_count_lock_)     |   .. running ..
598  //   .. waiting ..                               | Request thread resumption by:
599  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
600  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
601  //   .. waiting ..                               |     all mutator threads
602  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
603  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
604  // Release thread_suspend_count_lock_            |  .. running ..
605  // Change to kRunnable                           |  .. running ..
606  //  - this uses a CAS operation to ensure the    |  .. running ..
607  //    suspend request flag isn't raised as the   |  .. running ..
608  //    state is changed                           |  .. running ..
609  //  - if the CAS operation fails then goto x     |  .. running ..
610  //  .. running ..                                |  .. running ..
611  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
612
613  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
614  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
615
616  // Guards shutdown of the runtime.
617  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
618
619  // Guards background profiler global state.
620  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
621
622  // Guards trace (ie traceview) requests.
623  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
624
625  // Guards debugger recent allocation records.
626  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
627
628  // Guards updates to instrumentation to ensure mutual exclusion of
629  // events like deoptimization requests.
630  // TODO: improve name, perhaps instrumentation_update_lock_.
631  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
632
633  // Guards Class Hierarchy Analysis (CHA).
634  static Mutex* cha_lock_ ACQUIRED_AFTER(deoptimization_lock_);
635
636  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
637  // attaching and detaching.
638  static Mutex* thread_list_lock_ ACQUIRED_AFTER(cha_lock_);
639
640  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
641  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
642
643  // Guards maintaining loading library data structures.
644  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
645
646  // Guards breakpoints.
647  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
648
649  // Guards lists of classes within the class linker.
650  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
651
652  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
653  // doesn't try to hold a higher level Mutex.
654  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
655
656  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
657
658  // Guard the allocation/deallocation of thread ids.
659  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
660
661  // Guards modification of the LDT on x86.
662  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
663
664  static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
665
666  // Guards opened oat files in OatFileManager.
667  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
668
669  // Guards extra string entries for VerifierDeps.
670  static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
671
672  // Guards dlopen_handles_ in DlOpenOatFile.
673  static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
674
675  // Guards intern table.
676  static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
677
678  // Guards reference processor.
679  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
680
681  // Guards cleared references queue.
682  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
683
684  // Guards weak references queue.
685  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
686
687  // Guards finalizer references queue.
688  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
689
690  // Guards phantom references queue.
691  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
692
693  // Guards soft references queue.
694  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
695
696  // Guard accesses to the JNI Global Reference table.
697  static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
698
699  // Guard accesses to the JNI Weak Global Reference table.
700  static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
701
702  // Guard accesses to the JNI function table override.
703  static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
704
705  // Have an exclusive aborting thread.
706  static Mutex* abort_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
707
708  // Allow mutual exclusion when manipulating Thread::suspend_count_.
709  // TODO: Does the trade-off of a per-thread lock make sense?
710  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
711
712  // One unexpected signal at a time lock.
713  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
714
715  // Guards the maps in mem_map.
716  static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
717
718  // Have an exclusive logging thread.
719  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
720};
721
722class Roles {
723 public:
724  // Uninterruptible means that the thread may not become suspended.
725  static Uninterruptible uninterruptible_;
726};
727
728}  // namespace art
729
730#endif  // ART_RUNTIME_BASE_MUTEX_H_
731