mutex.h revision 0866f4ed6338faa4a193b7e819fc7cd72bd7b0ae
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class SHARED_LOCKABLE ReaderWriterMutex;
47class SHARED_LOCKABLE MutatorMutex;
48class ScopedContentionRecorder;
49class Thread;
50
51// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
52// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
53// partial ordering and thereby cause deadlock situations to fail checks.
54//
55// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
56enum LockLevel {
57  kLoggingLock = 0,
58  kMemMapsLock,
59  kSwapMutexesLock,
60  kUnexpectedSignalLock,
61  kThreadSuspendCountLock,
62  kAbortLock,
63  kLambdaTableLock,
64  kJdwpSocketLock,
65  kRegionSpaceRegionLock,
66  kRosAllocGlobalLock,
67  kRosAllocBracketLock,
68  kRosAllocBulkFreeLock,
69  kMarkSweepMarkStackLock,
70  kTransactionLogLock,
71  kJniWeakGlobalsLock,
72  kReferenceQueueSoftReferencesLock,
73  kReferenceQueuePhantomReferencesLock,
74  kReferenceQueueFinalizerReferencesLock,
75  kReferenceQueueWeakReferencesLock,
76  kReferenceQueueClearedReferencesLock,
77  kReferenceProcessorLock,
78  kJitDebugInterfaceLock,
79  kJitCodeCacheLock,
80  kAllocSpaceLock,
81  kBumpPointerSpaceBlockLock,
82  kArenaPoolLock,
83  kDexFileMethodInlinerLock,
84  kDexFileToMethodInlinerMapLock,
85  kInternTableLock,
86  kOatFileSecondaryLookupLock,
87  kOatFileCountLock,
88  kOatFileManagerLock,
89  kTracingUniqueMethodsLock,
90  kTracingStreamingLock,
91  kDefaultMutexLevel,
92  kMarkSweepLargeObjectLock,
93  kPinTableLock,
94  kJdwpObjectRegistryLock,
95  kModifyLdtLock,
96  kAllocatedThreadIdsLock,
97  kMonitorPoolLock,
98  kMethodVerifiersLock,
99  kClassLinkerClassesLock,
100  kBreakpointLock,
101  kMonitorLock,
102  kMonitorListLock,
103  kJniLoadLibraryLock,
104  kThreadListLock,
105  kAllocTrackerLock,
106  kDeoptimizationLock,
107  kProfilerLock,
108  kJdwpShutdownLock,
109  kJdwpEventListLock,
110  kJdwpAttachLock,
111  kJdwpStartLock,
112  kRuntimeShutdownLock,
113  kTraceLock,
114  kHeapBitmapLock,
115  kMutatorLock,
116  kInstrumentEntrypointsLock,
117  kZygoteCreationLock,
118
119  kLockLevelCount  // Must come last.
120};
121std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
122
123const bool kDebugLocking = kIsDebugBuild;
124
125// Record Log contention information, dumpable via SIGQUIT.
126#ifdef ART_USE_FUTEXES
127// To enable lock contention logging, set this to true.
128const bool kLogLockContentions = false;
129#else
130// Keep this false as lock contention logging is supported only with
131// futex.
132const bool kLogLockContentions = false;
133#endif
134const size_t kContentionLogSize = 4;
135const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
136const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
137
138// Base class for all Mutex implementations
139class BaseMutex {
140 public:
141  const char* GetName() const {
142    return name_;
143  }
144
145  virtual bool IsMutex() const { return false; }
146  virtual bool IsReaderWriterMutex() const { return false; }
147  virtual bool IsMutatorMutex() const { return false; }
148
149  virtual void Dump(std::ostream& os) const = 0;
150
151  static void DumpAll(std::ostream& os);
152
153 protected:
154  friend class ConditionVariable;
155
156  BaseMutex(const char* name, LockLevel level);
157  virtual ~BaseMutex();
158  void RegisterAsLocked(Thread* self);
159  void RegisterAsUnlocked(Thread* self);
160  void CheckSafeToWait(Thread* self);
161
162  friend class ScopedContentionRecorder;
163
164  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
165  void DumpContention(std::ostream& os) const;
166
167  const LockLevel level_;  // Support for lock hierarchy.
168  const char* const name_;
169
170  // A log entry that records contention but makes no guarantee that either tid will be held live.
171  struct ContentionLogEntry {
172    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
173    uint64_t blocked_tid;
174    uint64_t owner_tid;
175    AtomicInteger count;
176  };
177  struct ContentionLogData {
178    ContentionLogEntry contention_log[kContentionLogSize];
179    // The next entry in the contention log to be updated. Value ranges from 0 to
180    // kContentionLogSize - 1.
181    AtomicInteger cur_content_log_entry;
182    // Number of times the Mutex has been contended.
183    AtomicInteger contention_count;
184    // Sum of time waited by all contenders in ns.
185    Atomic<uint64_t> wait_time;
186    void AddToWaitTime(uint64_t value);
187    ContentionLogData() : wait_time(0) {}
188  };
189  ContentionLogData contention_log_data_[kContentionLogDataSize];
190
191 public:
192  bool HasEverContended() const {
193    if (kLogLockContentions) {
194      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
195    }
196    return false;
197  }
198};
199
200// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
201// exclusive access to what it guards. A Mutex can be in one of two states:
202// - Free - not owned by any thread,
203// - Exclusive - owned by a single thread.
204//
205// The effect of locking and unlocking operations on the state is:
206// State     | ExclusiveLock | ExclusiveUnlock
207// -------------------------------------------
208// Free      | Exclusive     | error
209// Exclusive | Block*        | Free
210// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
211//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
212std::ostream& operator<<(std::ostream& os, const Mutex& mu);
213class LOCKABLE Mutex : public BaseMutex {
214 public:
215  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
216  ~Mutex();
217
218  virtual bool IsMutex() const { return true; }
219
220  // Block until mutex is free then acquire exclusive access.
221  void ExclusiveLock(Thread* self) ACQUIRE();
222  void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
223
224  // Returns true if acquires exclusive access, false otherwise.
225  bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
226  bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
227
228  // Release exclusive access.
229  void ExclusiveUnlock(Thread* self) RELEASE();
230  void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
231
232  // Is the current thread the exclusive holder of the Mutex.
233  bool IsExclusiveHeld(const Thread* self) const;
234
235  // Assert that the Mutex is exclusively held by the current thread.
236  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
237    if (kDebugLocking && (gAborting == 0)) {
238      CHECK(IsExclusiveHeld(self)) << *this;
239    }
240  }
241  void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
242
243  // Assert that the Mutex is not held by the current thread.
244  void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
245    if (kDebugLocking && (gAborting == 0)) {
246      CHECK(!IsExclusiveHeld(self)) << *this;
247    }
248  }
249  void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
250    AssertNotHeldExclusive(self);
251  }
252
253  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
254  // than the owner.
255  uint64_t GetExclusiveOwnerTid() const;
256
257  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
258  unsigned int GetDepth() const {
259    return recursion_count_;
260  }
261
262  virtual void Dump(std::ostream& os) const;
263
264  // For negative capabilities in clang annotations.
265  const Mutex& operator!() const { return *this; }
266
267 private:
268#if ART_USE_FUTEXES
269  // 0 is unheld, 1 is held.
270  AtomicInteger state_;
271  // Exclusive owner.
272  volatile uint64_t exclusive_owner_;
273  // Number of waiting contenders.
274  AtomicInteger num_contenders_;
275#else
276  pthread_mutex_t mutex_;
277  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
278#endif
279  const bool recursive_;  // Can the lock be recursively held?
280  unsigned int recursion_count_;
281  friend class ConditionVariable;
282  DISALLOW_COPY_AND_ASSIGN(Mutex);
283};
284
285// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
286// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
287// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
288// condition variable. A ReaderWriterMutex can be in one of three states:
289// - Free - not owned by any thread,
290// - Exclusive - owned by a single thread,
291// - Shared(n) - shared amongst n threads.
292//
293// The effect of locking and unlocking operations on the state is:
294//
295// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
296// ----------------------------------------------------------------------------
297// Free      | Exclusive     | error           | SharedLock(1)    | error
298// Exclusive | Block         | Free            | Block            | error
299// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
300// * for large values of n the SharedLock may block.
301std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
302class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
303 public:
304  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
305  ~ReaderWriterMutex();
306
307  virtual bool IsReaderWriterMutex() const { return true; }
308
309  // Block until ReaderWriterMutex is free then acquire exclusive access.
310  void ExclusiveLock(Thread* self) ACQUIRE();
311  void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
312
313  // Release exclusive access.
314  void ExclusiveUnlock(Thread* self) RELEASE();
315  void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
316
317  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
318  // or false if timeout is reached.
319#if HAVE_TIMED_RWLOCK
320  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
321      EXCLUSIVE_TRYLOCK_FUNCTION(true);
322#endif
323
324  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
325  void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
326  void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
327
328  // Try to acquire share of ReaderWriterMutex.
329  bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
330
331  // Release a share of the access.
332  void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
333  void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
334
335  // Is the current thread the exclusive holder of the ReaderWriterMutex.
336  bool IsExclusiveHeld(const Thread* self) const;
337
338  // Assert the current thread has exclusive access to the ReaderWriterMutex.
339  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
340    if (kDebugLocking && (gAborting == 0)) {
341      CHECK(IsExclusiveHeld(self)) << *this;
342    }
343  }
344  void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
345
346  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
347  void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
348    if (kDebugLocking && (gAborting == 0)) {
349      CHECK(!IsExclusiveHeld(self)) << *this;
350    }
351  }
352  void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
353    AssertNotExclusiveHeld(self);
354  }
355
356  // Is the current thread a shared holder of the ReaderWriterMutex.
357  bool IsSharedHeld(const Thread* self) const;
358
359  // Assert the current thread has shared access to the ReaderWriterMutex.
360  void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
361    if (kDebugLocking && (gAborting == 0)) {
362      // TODO: we can only assert this well when self != null.
363      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
364    }
365  }
366  void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
367    AssertSharedHeld(self);
368  }
369
370  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
371  // mode.
372  void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
373    if (kDebugLocking && (gAborting == 0)) {
374      CHECK(!IsSharedHeld(self)) << *this;
375    }
376  }
377
378  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
379  // than the owner.
380  uint64_t GetExclusiveOwnerTid() const;
381
382  virtual void Dump(std::ostream& os) const;
383
384  // For negative capabilities in clang annotations.
385  const ReaderWriterMutex& operator!() const { return *this; }
386
387 private:
388#if ART_USE_FUTEXES
389  // Out-of-inline path for handling contention for a SharedLock.
390  void HandleSharedLockContention(Thread* self, int32_t cur_state);
391
392  // -1 implies held exclusive, +ve shared held by state_ many owners.
393  AtomicInteger state_;
394  // Exclusive owner. Modification guarded by this mutex.
395  volatile uint64_t exclusive_owner_;
396  // Number of contenders waiting for a reader share.
397  AtomicInteger num_pending_readers_;
398  // Number of contenders waiting to be the writer.
399  AtomicInteger num_pending_writers_;
400#else
401  pthread_rwlock_t rwlock_;
402  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
403#endif
404  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
405};
406
407// MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
408// Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
409// thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
410// held by any mutator threads. However, a thread in the kRunnable state is considered to have
411// shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
412// state have associated implications on lock ownership. Extra methods to handle the state
413// transitions have been added to the interface but are only accessible to the methods dealing
414// with state transitions. The thread state and flags attributes are used to ensure thread state
415// transitions are consistent with the permitted behaviour of the mutex.
416//
417// *) The most important consequence of this behaviour is that all threads must be in one of the
418// suspended states before exclusive ownership of the mutator mutex is sought.
419//
420std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
421class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
422 public:
423  explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
424    : ReaderWriterMutex(name, level) {}
425  ~MutatorMutex() {}
426
427  virtual bool IsMutatorMutex() const { return true; }
428
429  // For negative capabilities in clang annotations.
430  const MutatorMutex& operator!() const { return *this; }
431
432 private:
433  friend class Thread;
434  void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
435  void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
436
437  DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
438};
439
440// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
441// (Signal) or all at once (Broadcast).
442class ConditionVariable {
443 public:
444  ConditionVariable(const char* name, Mutex& mutex);
445  ~ConditionVariable();
446
447  void Broadcast(Thread* self);
448  void Signal(Thread* self);
449  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
450  //       pointer copy, thereby defeating annotalysis.
451  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
452  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
453  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
454  // when waiting.
455  // TODO: remove this.
456  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
457
458 private:
459  const char* const name_;
460  // The Mutex being used by waiters. It is an error to mix condition variables between different
461  // Mutexes.
462  Mutex& guard_;
463#if ART_USE_FUTEXES
464  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
465  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
466  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
467  // without guard_ held.
468  AtomicInteger sequence_;
469  // Number of threads that have come into to wait, not the length of the waiters on the futex as
470  // waiters may have been requeued onto guard_. Guarded by guard_.
471  volatile int32_t num_waiters_;
472#else
473  pthread_cond_t cond_;
474#endif
475  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
476};
477
478// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
479// upon destruction.
480class SCOPED_CAPABILITY MutexLock {
481 public:
482  MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
483    mu_.ExclusiveLock(self_);
484  }
485
486  ~MutexLock() RELEASE() {
487    mu_.ExclusiveUnlock(self_);
488  }
489
490 private:
491  Thread* const self_;
492  Mutex& mu_;
493  DISALLOW_COPY_AND_ASSIGN(MutexLock);
494};
495// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
496#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
497
498// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
499// construction and releases it upon destruction.
500class SCOPED_CAPABILITY ReaderMutexLock {
501 public:
502  ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) :
503      self_(self), mu_(mu) {
504    mu_.SharedLock(self_);
505  }
506
507  ~ReaderMutexLock() RELEASE() {
508    mu_.SharedUnlock(self_);
509  }
510
511 private:
512  Thread* const self_;
513  ReaderWriterMutex& mu_;
514  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
515};
516// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
517// "ReaderMutexLock mu(lock)".
518#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
519
520// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
521// construction and releases it upon destruction.
522class SCOPED_CAPABILITY WriterMutexLock {
523 public:
524  WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
525      self_(self), mu_(mu) {
526    mu_.ExclusiveLock(self_);
527  }
528
529  ~WriterMutexLock() UNLOCK_FUNCTION() {
530    mu_.ExclusiveUnlock(self_);
531  }
532
533 private:
534  Thread* const self_;
535  ReaderWriterMutex& mu_;
536  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
537};
538// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
539// "WriterMutexLock mu(lock)".
540#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
541
542// For StartNoThreadSuspension and EndNoThreadSuspension.
543class CAPABILITY("role") Role {
544 public:
545  void Acquire() ACQUIRE() {}
546  void Release() RELEASE() {}
547  const Role& operator!() const { return *this; }
548};
549
550class Uninterruptible : public Role {
551};
552
553// Global mutexes corresponding to the levels above.
554class Locks {
555 public:
556  static void Init();
557  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
558  // Guards allocation entrypoint instrumenting.
559  static Mutex* instrument_entrypoints_lock_;
560
561  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
562  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
563  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
564  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
565  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
566  //
567  // Thread suspension:
568  // mutator thread                                | GC/Debugger
569  //   .. running ..                               |   .. running ..
570  //   .. running ..                               | Request thread suspension by:
571  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
572  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
573  //   .. running ..                               |     all mutator threads
574  //   .. running ..                               |   - releasing thread_suspend_count_lock_
575  //   .. running ..                               | Block wait for all threads to pass a barrier
576  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
577  // suspend code.                                 |   .. blocked ..
578  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
579  // x: Acquire thread_suspend_count_lock_         |   .. running ..
580  // while Thread::suspend_count_ > 0              |   .. running ..
581  //   - wait on Thread::resume_cond_              |   .. running ..
582  //     (releases thread_suspend_count_lock_)     |   .. running ..
583  //   .. waiting ..                               | Request thread resumption by:
584  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
585  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
586  //   .. waiting ..                               |     all mutator threads
587  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
588  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
589  // Release thread_suspend_count_lock_            |  .. running ..
590  // Change to kRunnable                           |  .. running ..
591  //  - this uses a CAS operation to ensure the    |  .. running ..
592  //    suspend request flag isn't raised as the   |  .. running ..
593  //    state is changed                           |  .. running ..
594  //  - if the CAS operation fails then goto x     |  .. running ..
595  //  .. running ..                                |  .. running ..
596  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
597
598  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
599  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
600
601  // Guards shutdown of the runtime.
602  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
603
604  // Guards background profiler global state.
605  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
606
607  // Guards trace (ie traceview) requests.
608  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
609
610  // Guards debugger recent allocation records.
611  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
612
613  // Guards updates to instrumentation to ensure mutual exclusion of
614  // events like deoptimization requests.
615  // TODO: improve name, perhaps instrumentation_update_lock_.
616  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
617
618  // Guards String initializer register map in interpreter.
619  static Mutex* interpreter_string_init_map_lock_ ACQUIRED_AFTER(deoptimization_lock_);
620
621  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
622  // attaching and detaching.
623  static Mutex* thread_list_lock_ ACQUIRED_AFTER(interpreter_string_init_map_lock_);
624
625  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
626  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
627
628  // Guards maintaining loading library data structures.
629  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
630
631  // Guards breakpoints.
632  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
633
634  // Guards lists of classes within the class linker.
635  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
636
637  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
638  // doesn't try to hold a higher level Mutex.
639  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
640
641  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
642
643  // Guard the allocation/deallocation of thread ids.
644  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
645
646  // Guards modification of the LDT on x86.
647  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
648
649  // Guards opened oat files in OatFileManager.
650  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
651
652  // Guards opened oat files in OatFileManager.
653  static ReaderWriterMutex* oat_file_count_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
654
655  // Guards intern table.
656  static Mutex* intern_table_lock_ ACQUIRED_AFTER(oat_file_count_lock_);
657
658  // Guards reference processor.
659  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
660
661  // Guards cleared references queue.
662  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
663
664  // Guards weak references queue.
665  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
666
667  // Guards finalizer references queue.
668  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
669
670  // Guards phantom references queue.
671  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
672
673  // Guards soft references queue.
674  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
675
676  // Have an exclusive aborting thread.
677  static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
678
679  // Allow mutual exclusion when manipulating Thread::suspend_count_.
680  // TODO: Does the trade-off of a per-thread lock make sense?
681  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
682
683  // One unexpected signal at a time lock.
684  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
685
686  // Guards the maps in mem_map.
687  static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
688
689  // Have an exclusive logging thread.
690  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
691
692  // Allow reader-writer mutual exclusion on the boxed table of lambda objects.
693  // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
694  static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
695};
696
697class Roles {
698 public:
699  // Uninterruptible means that the thread may not become suspended.
700  static Uninterruptible uninterruptible_;
701};
702
703}  // namespace art
704
705#endif  // ART_RUNTIME_BASE_MUTEX_H_
706