mutex.h revision f9c6fc610b27887f832e453a0da1789187293408
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class SHARED_LOCKABLE ReaderWriterMutex;
47class SHARED_LOCKABLE MutatorMutex;
48class ScopedContentionRecorder;
49class Thread;
50
51// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
52// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
53// partial ordering and thereby cause deadlock situations to fail checks.
54//
55// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
56enum LockLevel {
57  kLoggingLock = 0,
58  kMemMapsLock,
59  kSwapMutexesLock,
60  kUnexpectedSignalLock,
61  kThreadSuspendCountLock,
62  kAbortLock,
63  kLambdaTableLock,
64  kJdwpSocketLock,
65  kRegionSpaceRegionLock,
66  kTransactionLogLock,
67  kMarkSweepMarkStackLock,
68  kJniWeakGlobalsLock,
69  kReferenceQueueSoftReferencesLock,
70  kReferenceQueuePhantomReferencesLock,
71  kReferenceQueueFinalizerReferencesLock,
72  kReferenceQueueWeakReferencesLock,
73  kReferenceQueueClearedReferencesLock,
74  kReferenceProcessorLock,
75  kJitCodeCacheLock,
76  kRosAllocGlobalLock,
77  kRosAllocBracketLock,
78  kRosAllocBulkFreeLock,
79  kAllocSpaceLock,
80  kBumpPointerSpaceBlockLock,
81  kArenaPoolLock,
82  kDexFileMethodInlinerLock,
83  kDexFileToMethodInlinerMapLock,
84  kInternTableLock,
85  kOatFileSecondaryLookupLock,
86  kOatFileManagerLock,
87  kTracingUniqueMethodsLock,
88  kTracingStreamingLock,
89  kDefaultMutexLevel,
90  kMarkSweepLargeObjectLock,
91  kPinTableLock,
92  kJdwpObjectRegistryLock,
93  kModifyLdtLock,
94  kAllocatedThreadIdsLock,
95  kMonitorPoolLock,
96  kMethodVerifiersLock,
97  kClassLinkerClassesLock,
98  kBreakpointLock,
99  kMonitorLock,
100  kMonitorListLock,
101  kJniLoadLibraryLock,
102  kThreadListLock,
103  kInterpreterStringInitMapLock,
104  kAllocTrackerLock,
105  kDeoptimizationLock,
106  kProfilerLock,
107  kJdwpShutdownLock,
108  kJdwpEventListLock,
109  kJdwpAttachLock,
110  kJdwpStartLock,
111  kRuntimeShutdownLock,
112  kTraceLock,
113  kHeapBitmapLock,
114  kMutatorLock,
115  kInstrumentEntrypointsLock,
116  kZygoteCreationLock,
117
118  kLockLevelCount  // Must come last.
119};
120std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
121
122const bool kDebugLocking = kIsDebugBuild;
123
124// Record Log contention information, dumpable via SIGQUIT.
125#ifdef ART_USE_FUTEXES
126// To enable lock contention logging, set this to true.
127const bool kLogLockContentions = false;
128#else
129// Keep this false as lock contention logging is supported only with
130// futex.
131const bool kLogLockContentions = false;
132#endif
133const size_t kContentionLogSize = 4;
134const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
135const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
136
137// Base class for all Mutex implementations
138class BaseMutex {
139 public:
140  const char* GetName() const {
141    return name_;
142  }
143
144  virtual bool IsMutex() const { return false; }
145  virtual bool IsReaderWriterMutex() const { return false; }
146  virtual bool IsMutatorMutex() const { return false; }
147
148  virtual void Dump(std::ostream& os) const = 0;
149
150  static void DumpAll(std::ostream& os);
151
152 protected:
153  friend class ConditionVariable;
154
155  BaseMutex(const char* name, LockLevel level);
156  virtual ~BaseMutex();
157  void RegisterAsLocked(Thread* self);
158  void RegisterAsUnlocked(Thread* self);
159  void CheckSafeToWait(Thread* self);
160
161  friend class ScopedContentionRecorder;
162
163  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
164  void DumpContention(std::ostream& os) const;
165
166  const LockLevel level_;  // Support for lock hierarchy.
167  const char* const name_;
168
169  // A log entry that records contention but makes no guarantee that either tid will be held live.
170  struct ContentionLogEntry {
171    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
172    uint64_t blocked_tid;
173    uint64_t owner_tid;
174    AtomicInteger count;
175  };
176  struct ContentionLogData {
177    ContentionLogEntry contention_log[kContentionLogSize];
178    // The next entry in the contention log to be updated. Value ranges from 0 to
179    // kContentionLogSize - 1.
180    AtomicInteger cur_content_log_entry;
181    // Number of times the Mutex has been contended.
182    AtomicInteger contention_count;
183    // Sum of time waited by all contenders in ns.
184    Atomic<uint64_t> wait_time;
185    void AddToWaitTime(uint64_t value);
186    ContentionLogData() : wait_time(0) {}
187  };
188  ContentionLogData contention_log_data_[kContentionLogDataSize];
189
190 public:
191  bool HasEverContended() const {
192    if (kLogLockContentions) {
193      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
194    }
195    return false;
196  }
197};
198
199// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
200// exclusive access to what it guards. A Mutex can be in one of two states:
201// - Free - not owned by any thread,
202// - Exclusive - owned by a single thread.
203//
204// The effect of locking and unlocking operations on the state is:
205// State     | ExclusiveLock | ExclusiveUnlock
206// -------------------------------------------
207// Free      | Exclusive     | error
208// Exclusive | Block*        | Free
209// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
210//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
211std::ostream& operator<<(std::ostream& os, const Mutex& mu);
212class LOCKABLE Mutex : public BaseMutex {
213 public:
214  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
215  ~Mutex();
216
217  virtual bool IsMutex() const { return true; }
218
219  // Block until mutex is free then acquire exclusive access.
220  void ExclusiveLock(Thread* self) ACQUIRE();
221  void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
222
223  // Returns true if acquires exclusive access, false otherwise.
224  bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
225  bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
226
227  // Release exclusive access.
228  void ExclusiveUnlock(Thread* self) RELEASE();
229  void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
230
231  // Is the current thread the exclusive holder of the Mutex.
232  bool IsExclusiveHeld(const Thread* self) const;
233
234  // Assert that the Mutex is exclusively held by the current thread.
235  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
236    if (kDebugLocking && (gAborting == 0)) {
237      CHECK(IsExclusiveHeld(self)) << *this;
238    }
239  }
240  void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
241
242  // Assert that the Mutex is not held by the current thread.
243  void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
244    if (kDebugLocking && (gAborting == 0)) {
245      CHECK(!IsExclusiveHeld(self)) << *this;
246    }
247  }
248  void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
249    AssertNotHeldExclusive(self);
250  }
251
252  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
253  // than the owner.
254  uint64_t GetExclusiveOwnerTid() const;
255
256  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
257  unsigned int GetDepth() const {
258    return recursion_count_;
259  }
260
261  virtual void Dump(std::ostream& os) const;
262
263  // For negative capabilities in clang annotations.
264  const Mutex& operator!() const { return *this; }
265
266 private:
267#if ART_USE_FUTEXES
268  // 0 is unheld, 1 is held.
269  AtomicInteger state_;
270  // Exclusive owner.
271  volatile uint64_t exclusive_owner_;
272  // Number of waiting contenders.
273  AtomicInteger num_contenders_;
274#else
275  pthread_mutex_t mutex_;
276  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
277#endif
278  const bool recursive_;  // Can the lock be recursively held?
279  unsigned int recursion_count_;
280  friend class ConditionVariable;
281  DISALLOW_COPY_AND_ASSIGN(Mutex);
282};
283
284// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
285// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
286// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
287// condition variable. A ReaderWriterMutex can be in one of three states:
288// - Free - not owned by any thread,
289// - Exclusive - owned by a single thread,
290// - Shared(n) - shared amongst n threads.
291//
292// The effect of locking and unlocking operations on the state is:
293//
294// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
295// ----------------------------------------------------------------------------
296// Free      | Exclusive     | error           | SharedLock(1)    | error
297// Exclusive | Block         | Free            | Block            | error
298// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
299// * for large values of n the SharedLock may block.
300std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
301class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
302 public:
303  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
304  ~ReaderWriterMutex();
305
306  virtual bool IsReaderWriterMutex() const { return true; }
307
308  // Block until ReaderWriterMutex is free then acquire exclusive access.
309  void ExclusiveLock(Thread* self) ACQUIRE();
310  void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
311
312  // Release exclusive access.
313  void ExclusiveUnlock(Thread* self) RELEASE();
314  void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
315
316  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
317  // or false if timeout is reached.
318#if HAVE_TIMED_RWLOCK
319  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
320      EXCLUSIVE_TRYLOCK_FUNCTION(true);
321#endif
322
323  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
324  void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
325  void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
326
327  // Try to acquire share of ReaderWriterMutex.
328  bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
329
330  // Release a share of the access.
331  void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
332  void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
333
334  // Is the current thread the exclusive holder of the ReaderWriterMutex.
335  bool IsExclusiveHeld(const Thread* self) const;
336
337  // Assert the current thread has exclusive access to the ReaderWriterMutex.
338  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
339    if (kDebugLocking && (gAborting == 0)) {
340      CHECK(IsExclusiveHeld(self)) << *this;
341    }
342  }
343  void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
344
345  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
346  void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
347    if (kDebugLocking && (gAborting == 0)) {
348      CHECK(!IsExclusiveHeld(self)) << *this;
349    }
350  }
351  void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
352    AssertNotExclusiveHeld(self);
353  }
354
355  // Is the current thread a shared holder of the ReaderWriterMutex.
356  bool IsSharedHeld(const Thread* self) const;
357
358  // Assert the current thread has shared access to the ReaderWriterMutex.
359  void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
360    if (kDebugLocking && (gAborting == 0)) {
361      // TODO: we can only assert this well when self != null.
362      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
363    }
364  }
365  void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
366    AssertSharedHeld(self);
367  }
368
369  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
370  // mode.
371  void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
372    if (kDebugLocking && (gAborting == 0)) {
373      CHECK(!IsSharedHeld(self)) << *this;
374    }
375  }
376
377  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
378  // than the owner.
379  uint64_t GetExclusiveOwnerTid() const;
380
381  virtual void Dump(std::ostream& os) const;
382
383  // For negative capabilities in clang annotations.
384  const ReaderWriterMutex& operator!() const { return *this; }
385
386 private:
387#if ART_USE_FUTEXES
388  // Out-of-inline path for handling contention for a SharedLock.
389  void HandleSharedLockContention(Thread* self, int32_t cur_state);
390
391  // -1 implies held exclusive, +ve shared held by state_ many owners.
392  AtomicInteger state_;
393  // Exclusive owner. Modification guarded by this mutex.
394  volatile uint64_t exclusive_owner_;
395  // Number of contenders waiting for a reader share.
396  AtomicInteger num_pending_readers_;
397  // Number of contenders waiting to be the writer.
398  AtomicInteger num_pending_writers_;
399#else
400  pthread_rwlock_t rwlock_;
401  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
402#endif
403  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
404};
405
406// MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
407// Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
408// thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
409// held by any mutator threads. However, a thread in the kRunnable state is considered to have
410// shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
411// state have associated implications on lock ownership. Extra methods to handle the state
412// transitions have been added to the interface but are only accessible to the methods dealing
413// with state transitions. The thread state and flags attributes are used to ensure thread state
414// transitions are consistent with the permitted behaviour of the mutex.
415//
416// *) The most important consequence of this behaviour is that all threads must be in one of the
417// suspended states before exclusive ownership of the mutator mutex is sought.
418//
419std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
420class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
421 public:
422  explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
423    : ReaderWriterMutex(name, level) {}
424  ~MutatorMutex() {}
425
426  virtual bool IsMutatorMutex() const { return true; }
427
428  // For negative capabilities in clang annotations.
429  const MutatorMutex& operator!() const { return *this; }
430
431 private:
432  friend class Thread;
433  void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
434  void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
435
436  DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
437};
438
439// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
440// (Signal) or all at once (Broadcast).
441class ConditionVariable {
442 public:
443  ConditionVariable(const char* name, Mutex& mutex);
444  ~ConditionVariable();
445
446  void Broadcast(Thread* self);
447  void Signal(Thread* self);
448  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
449  //       pointer copy, thereby defeating annotalysis.
450  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
451  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
452  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
453  // when waiting.
454  // TODO: remove this.
455  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
456
457 private:
458  const char* const name_;
459  // The Mutex being used by waiters. It is an error to mix condition variables between different
460  // Mutexes.
461  Mutex& guard_;
462#if ART_USE_FUTEXES
463  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
464  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
465  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
466  // without guard_ held.
467  AtomicInteger sequence_;
468  // Number of threads that have come into to wait, not the length of the waiters on the futex as
469  // waiters may have been requeued onto guard_. Guarded by guard_.
470  volatile int32_t num_waiters_;
471#else
472  pthread_cond_t cond_;
473#endif
474  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
475};
476
477// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
478// upon destruction.
479class SCOPED_CAPABILITY MutexLock {
480 public:
481  MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
482    mu_.ExclusiveLock(self_);
483  }
484
485  ~MutexLock() RELEASE() {
486    mu_.ExclusiveUnlock(self_);
487  }
488
489 private:
490  Thread* const self_;
491  Mutex& mu_;
492  DISALLOW_COPY_AND_ASSIGN(MutexLock);
493};
494// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
495#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
496
497// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
498// construction and releases it upon destruction.
499class SCOPED_CAPABILITY ReaderMutexLock {
500 public:
501  ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) :
502      self_(self), mu_(mu) {
503    mu_.SharedLock(self_);
504  }
505
506  ~ReaderMutexLock() RELEASE() {
507    mu_.SharedUnlock(self_);
508  }
509
510 private:
511  Thread* const self_;
512  ReaderWriterMutex& mu_;
513  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
514};
515// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
516// "ReaderMutexLock mu(lock)".
517#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
518
519// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
520// construction and releases it upon destruction.
521class SCOPED_CAPABILITY WriterMutexLock {
522 public:
523  WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
524      self_(self), mu_(mu) {
525    mu_.ExclusiveLock(self_);
526  }
527
528  ~WriterMutexLock() UNLOCK_FUNCTION() {
529    mu_.ExclusiveUnlock(self_);
530  }
531
532 private:
533  Thread* const self_;
534  ReaderWriterMutex& mu_;
535  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
536};
537// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
538// "WriterMutexLock mu(lock)".
539#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
540
541// For StartNoThreadSuspension and EndNoThreadSuspension.
542class CAPABILITY("role") Role {
543 public:
544  void Acquire() ACQUIRE() {}
545  void Release() RELEASE() {}
546  const Role& operator!() const { return *this; }
547};
548
549class Uninterruptible : public Role {
550};
551
552// Global mutexes corresponding to the levels above.
553class Locks {
554 public:
555  static void Init();
556  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
557  // Guards allocation entrypoint instrumenting.
558  static Mutex* instrument_entrypoints_lock_;
559
560  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
561  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
562  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
563  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
564  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
565  //
566  // Thread suspension:
567  // mutator thread                                | GC/Debugger
568  //   .. running ..                               |   .. running ..
569  //   .. running ..                               | Request thread suspension by:
570  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
571  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
572  //   .. running ..                               |     all mutator threads
573  //   .. running ..                               |   - releasing thread_suspend_count_lock_
574  //   .. running ..                               | Block wait for all threads to pass a barrier
575  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
576  // suspend code.                                 |   .. blocked ..
577  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
578  // x: Acquire thread_suspend_count_lock_         |   .. running ..
579  // while Thread::suspend_count_ > 0              |   .. running ..
580  //   - wait on Thread::resume_cond_              |   .. running ..
581  //     (releases thread_suspend_count_lock_)     |   .. running ..
582  //   .. waiting ..                               | Request thread resumption by:
583  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
584  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
585  //   .. waiting ..                               |     all mutator threads
586  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
587  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
588  // Release thread_suspend_count_lock_            |  .. running ..
589  // Change to kRunnable                           |  .. running ..
590  //  - this uses a CAS operation to ensure the    |  .. running ..
591  //    suspend request flag isn't raised as the   |  .. running ..
592  //    state is changed                           |  .. running ..
593  //  - if the CAS operation fails then goto x     |  .. running ..
594  //  .. running ..                                |  .. running ..
595  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
596
597  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
598  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
599
600  // Guards shutdown of the runtime.
601  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
602
603  // Guards background profiler global state.
604  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
605
606  // Guards trace (ie traceview) requests.
607  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
608
609  // Guards debugger recent allocation records.
610  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
611
612  // Guards updates to instrumentation to ensure mutual exclusion of
613  // events like deoptimization requests.
614  // TODO: improve name, perhaps instrumentation_update_lock_.
615  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
616
617  // Guards String initializer register map in interpreter.
618  static Mutex* interpreter_string_init_map_lock_ ACQUIRED_AFTER(deoptimization_lock_);
619
620  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
621  // attaching and detaching.
622  static Mutex* thread_list_lock_ ACQUIRED_AFTER(interpreter_string_init_map_lock_);
623
624  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
625  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
626
627  // Guards maintaining loading library data structures.
628  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
629
630  // Guards breakpoints.
631  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
632
633  // Guards lists of classes within the class linker.
634  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
635
636  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
637  // doesn't try to hold a higher level Mutex.
638  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
639
640  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
641
642  // Guard the allocation/deallocation of thread ids.
643  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
644
645  // Guards modification of the LDT on x86.
646  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
647
648  // Guards opened oat files in OatFileManager.
649  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
650
651  // Guards intern table.
652  static Mutex* intern_table_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
653
654  // Guards reference processor.
655  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
656
657  // Guards cleared references queue.
658  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
659
660  // Guards weak references queue.
661  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
662
663  // Guards finalizer references queue.
664  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
665
666  // Guards phantom references queue.
667  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
668
669  // Guards soft references queue.
670  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
671
672  // Have an exclusive aborting thread.
673  static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
674
675  // Allow mutual exclusion when manipulating Thread::suspend_count_.
676  // TODO: Does the trade-off of a per-thread lock make sense?
677  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
678
679  // One unexpected signal at a time lock.
680  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
681
682  // Guards the maps in mem_map.
683  static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
684
685  // Have an exclusive logging thread.
686  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
687
688  // Allow reader-writer mutual exclusion on the boxed table of lambda objects.
689  // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
690  static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
691};
692
693class Roles {
694 public:
695  // Uninterruptible means that the thread may not become suspended.
696  static Uninterruptible uninterruptible_;
697};
698
699}  // namespace art
700
701#endif  // ART_RUNTIME_BASE_MUTEX_H_
702