mutex.h revision b486a98aadc95d80548953410cf23edba62259fa
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class SHARED_LOCKABLE ReaderWriterMutex;
47class SHARED_LOCKABLE MutatorMutex;
48class ScopedContentionRecorder;
49class Thread;
50
51// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
52// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
53// partial ordering and thereby cause deadlock situations to fail checks.
54//
55// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
56enum LockLevel {
57  kLoggingLock = 0,
58  kSwapMutexesLock,
59  kUnexpectedSignalLock,
60  kThreadSuspendCountLock,
61  kAbortLock,
62  kJdwpAdbStateLock,
63  kJdwpSocketLock,
64  kRegionSpaceRegionLock,
65  kMarkSweepMarkStackLock,
66  kRosAllocGlobalLock,
67  kRosAllocBracketLock,
68  kRosAllocBulkFreeLock,
69  kTaggingLockLevel,
70  kTransactionLogLock,
71  kJniFunctionTableLock,
72  kJniWeakGlobalsLock,
73  kJniGlobalsLock,
74  kReferenceQueueSoftReferencesLock,
75  kReferenceQueuePhantomReferencesLock,
76  kReferenceQueueFinalizerReferencesLock,
77  kReferenceQueueWeakReferencesLock,
78  kReferenceQueueClearedReferencesLock,
79  kReferenceProcessorLock,
80  kJitDebugInterfaceLock,
81  kAllocSpaceLock,
82  kBumpPointerSpaceBlockLock,
83  kArenaPoolLock,
84  kInternTableLock,
85  kOatFileSecondaryLookupLock,
86  kHostDlOpenHandlesLock,
87  kVerifierDepsLock,
88  kOatFileManagerLock,
89  kTracingUniqueMethodsLock,
90  kTracingStreamingLock,
91  kDeoptimizedMethodsLock,
92  kClassLoaderClassesLock,
93  kDefaultMutexLevel,
94  kDexLock,
95  kMarkSweepLargeObjectLock,
96  kJdwpObjectRegistryLock,
97  kModifyLdtLock,
98  kAllocatedThreadIdsLock,
99  kMonitorPoolLock,
100  kClassLinkerClassesLock,  // TODO rename.
101  kJitCodeCacheLock,
102  kCHALock,
103  kBreakpointLock,
104  kMonitorLock,
105  kMonitorListLock,
106  kJniLoadLibraryLock,
107  kThreadListLock,
108  kAllocTrackerLock,
109  kDeoptimizationLock,
110  kProfilerLock,
111  kJdwpShutdownLock,
112  kJdwpEventListLock,
113  kJdwpAttachLock,
114  kJdwpStartLock,
115  kRuntimeShutdownLock,
116  kTraceLock,
117  kHeapBitmapLock,
118  kMutatorLock,
119  kInstrumentEntrypointsLock,
120  kZygoteCreationLock,
121
122  kLockLevelCount  // Must come last.
123};
124std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
125
126const bool kDebugLocking = kIsDebugBuild;
127
128// Record Log contention information, dumpable via SIGQUIT.
129#ifdef ART_USE_FUTEXES
130// To enable lock contention logging, set this to true.
131const bool kLogLockContentions = false;
132#else
133// Keep this false as lock contention logging is supported only with
134// futex.
135const bool kLogLockContentions = false;
136#endif
137const size_t kContentionLogSize = 4;
138const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
139const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
140
141// Base class for all Mutex implementations
142class BaseMutex {
143 public:
144  const char* GetName() const {
145    return name_;
146  }
147
148  virtual bool IsMutex() const { return false; }
149  virtual bool IsReaderWriterMutex() const { return false; }
150  virtual bool IsMutatorMutex() const { return false; }
151
152  virtual void Dump(std::ostream& os) const = 0;
153
154  static void DumpAll(std::ostream& os);
155
156  bool ShouldRespondToEmptyCheckpointRequest() const {
157    return should_respond_to_empty_checkpoint_request_;
158  }
159
160  void SetShouldRespondToEmptyCheckpointRequest(bool value) {
161    should_respond_to_empty_checkpoint_request_ = value;
162  }
163
164  virtual void WakeupToRespondToEmptyCheckpoint() = 0;
165
166 protected:
167  friend class ConditionVariable;
168
169  BaseMutex(const char* name, LockLevel level);
170  virtual ~BaseMutex();
171  void RegisterAsLocked(Thread* self);
172  void RegisterAsUnlocked(Thread* self);
173  void CheckSafeToWait(Thread* self);
174
175  friend class ScopedContentionRecorder;
176
177  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
178  void DumpContention(std::ostream& os) const;
179
180  const LockLevel level_;  // Support for lock hierarchy.
181  const char* const name_;
182  bool should_respond_to_empty_checkpoint_request_;
183
184  // A log entry that records contention but makes no guarantee that either tid will be held live.
185  struct ContentionLogEntry {
186    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
187    uint64_t blocked_tid;
188    uint64_t owner_tid;
189    AtomicInteger count;
190  };
191  struct ContentionLogData {
192    ContentionLogEntry contention_log[kContentionLogSize];
193    // The next entry in the contention log to be updated. Value ranges from 0 to
194    // kContentionLogSize - 1.
195    AtomicInteger cur_content_log_entry;
196    // Number of times the Mutex has been contended.
197    AtomicInteger contention_count;
198    // Sum of time waited by all contenders in ns.
199    Atomic<uint64_t> wait_time;
200    void AddToWaitTime(uint64_t value);
201    ContentionLogData() : wait_time(0) {}
202  };
203  ContentionLogData contention_log_data_[kContentionLogDataSize];
204
205 public:
206  bool HasEverContended() const {
207    if (kLogLockContentions) {
208      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
209    }
210    return false;
211  }
212};
213
214// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
215// exclusive access to what it guards. A Mutex can be in one of two states:
216// - Free - not owned by any thread,
217// - Exclusive - owned by a single thread.
218//
219// The effect of locking and unlocking operations on the state is:
220// State     | ExclusiveLock | ExclusiveUnlock
221// -------------------------------------------
222// Free      | Exclusive     | error
223// Exclusive | Block*        | Free
224// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
225//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
226std::ostream& operator<<(std::ostream& os, const Mutex& mu);
227class LOCKABLE Mutex : public BaseMutex {
228 public:
229  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
230  ~Mutex();
231
232  virtual bool IsMutex() const { return true; }
233
234  // Block until mutex is free then acquire exclusive access.
235  void ExclusiveLock(Thread* self) ACQUIRE();
236  void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
237
238  // Returns true if acquires exclusive access, false otherwise.
239  bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
240  bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
241
242  // Release exclusive access.
243  void ExclusiveUnlock(Thread* self) RELEASE();
244  void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
245
246  // Is the current thread the exclusive holder of the Mutex.
247  ALWAYS_INLINE bool IsExclusiveHeld(const Thread* self) const;
248
249  // Assert that the Mutex is exclusively held by the current thread.
250  ALWAYS_INLINE void AssertExclusiveHeld(const Thread* self) const ASSERT_CAPABILITY(this);
251  ALWAYS_INLINE void AssertHeld(const Thread* self) const ASSERT_CAPABILITY(this);
252
253  // Assert that the Mutex is not held by the current thread.
254  void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
255    if (kDebugLocking && (gAborting == 0)) {
256      CHECK(!IsExclusiveHeld(self)) << *this;
257    }
258  }
259  void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
260    AssertNotHeldExclusive(self);
261  }
262
263  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
264  // than the owner.
265  uint64_t GetExclusiveOwnerTid() const;
266
267  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
268  unsigned int GetDepth() const {
269    return recursion_count_;
270  }
271
272  virtual void Dump(std::ostream& os) const;
273
274  // For negative capabilities in clang annotations.
275  const Mutex& operator!() const { return *this; }
276
277  void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
278
279 private:
280#if ART_USE_FUTEXES
281  // 0 is unheld, 1 is held.
282  AtomicInteger state_;
283  // Exclusive owner.
284  volatile uint64_t exclusive_owner_;
285  // Number of waiting contenders.
286  AtomicInteger num_contenders_;
287#else
288  pthread_mutex_t mutex_;
289  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
290#endif
291  const bool recursive_;  // Can the lock be recursively held?
292  unsigned int recursion_count_;
293  friend class ConditionVariable;
294  DISALLOW_COPY_AND_ASSIGN(Mutex);
295};
296
297// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
298// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
299// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
300// condition variable. A ReaderWriterMutex can be in one of three states:
301// - Free - not owned by any thread,
302// - Exclusive - owned by a single thread,
303// - Shared(n) - shared amongst n threads.
304//
305// The effect of locking and unlocking operations on the state is:
306//
307// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
308// ----------------------------------------------------------------------------
309// Free      | Exclusive     | error           | SharedLock(1)    | error
310// Exclusive | Block         | Free            | Block            | error
311// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
312// * for large values of n the SharedLock may block.
313std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
314class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
315 public:
316  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
317  ~ReaderWriterMutex();
318
319  virtual bool IsReaderWriterMutex() const { return true; }
320
321  // Block until ReaderWriterMutex is free then acquire exclusive access.
322  void ExclusiveLock(Thread* self) ACQUIRE();
323  void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
324
325  // Release exclusive access.
326  void ExclusiveUnlock(Thread* self) RELEASE();
327  void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
328
329  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
330  // or false if timeout is reached.
331#if HAVE_TIMED_RWLOCK
332  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
333      EXCLUSIVE_TRYLOCK_FUNCTION(true);
334#endif
335
336  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
337  void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
338  void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
339
340  // Try to acquire share of ReaderWriterMutex.
341  bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
342
343  // Release a share of the access.
344  void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
345  void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
346
347  // Is the current thread the exclusive holder of the ReaderWriterMutex.
348  ALWAYS_INLINE bool IsExclusiveHeld(const Thread* self) const;
349
350  // Assert the current thread has exclusive access to the ReaderWriterMutex.
351  ALWAYS_INLINE void AssertExclusiveHeld(const Thread* self) const ASSERT_CAPABILITY(this);
352  ALWAYS_INLINE void AssertWriterHeld(const Thread* self) const ASSERT_CAPABILITY(this);
353
354  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
355  void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
356    if (kDebugLocking && (gAborting == 0)) {
357      CHECK(!IsExclusiveHeld(self)) << *this;
358    }
359  }
360  void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
361    AssertNotExclusiveHeld(self);
362  }
363
364  // Is the current thread a shared holder of the ReaderWriterMutex.
365  bool IsSharedHeld(const Thread* self) const;
366
367  // Assert the current thread has shared access to the ReaderWriterMutex.
368  ALWAYS_INLINE void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
369    if (kDebugLocking && (gAborting == 0)) {
370      // TODO: we can only assert this well when self != null.
371      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
372    }
373  }
374  ALWAYS_INLINE void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
375    AssertSharedHeld(self);
376  }
377
378  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
379  // mode.
380  ALWAYS_INLINE void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
381    if (kDebugLocking && (gAborting == 0)) {
382      CHECK(!IsSharedHeld(self)) << *this;
383    }
384  }
385
386  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
387  // than the owner.
388  uint64_t GetExclusiveOwnerTid() const;
389
390  virtual void Dump(std::ostream& os) const;
391
392  // For negative capabilities in clang annotations.
393  const ReaderWriterMutex& operator!() const { return *this; }
394
395  void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
396
397 private:
398#if ART_USE_FUTEXES
399  // Out-of-inline path for handling contention for a SharedLock.
400  void HandleSharedLockContention(Thread* self, int32_t cur_state);
401
402  // -1 implies held exclusive, +ve shared held by state_ many owners.
403  AtomicInteger state_;
404  // Exclusive owner. Modification guarded by this mutex.
405  volatile uint64_t exclusive_owner_;
406  // Number of contenders waiting for a reader share.
407  AtomicInteger num_pending_readers_;
408  // Number of contenders waiting to be the writer.
409  AtomicInteger num_pending_writers_;
410#else
411  pthread_rwlock_t rwlock_;
412  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
413#endif
414  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
415};
416
417// MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
418// Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
419// thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
420// held by any mutator threads. However, a thread in the kRunnable state is considered to have
421// shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
422// state have associated implications on lock ownership. Extra methods to handle the state
423// transitions have been added to the interface but are only accessible to the methods dealing
424// with state transitions. The thread state and flags attributes are used to ensure thread state
425// transitions are consistent with the permitted behaviour of the mutex.
426//
427// *) The most important consequence of this behaviour is that all threads must be in one of the
428// suspended states before exclusive ownership of the mutator mutex is sought.
429//
430std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
431class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
432 public:
433  explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
434    : ReaderWriterMutex(name, level) {}
435  ~MutatorMutex() {}
436
437  virtual bool IsMutatorMutex() const { return true; }
438
439  // For negative capabilities in clang annotations.
440  const MutatorMutex& operator!() const { return *this; }
441
442 private:
443  friend class Thread;
444  void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
445  void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
446
447  DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
448};
449
450// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
451// (Signal) or all at once (Broadcast).
452class ConditionVariable {
453 public:
454  ConditionVariable(const char* name, Mutex& mutex);
455  ~ConditionVariable();
456
457  void Broadcast(Thread* self);
458  void Signal(Thread* self);
459  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
460  //       pointer copy, thereby defeating annotalysis.
461  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
462  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
463  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
464  // when waiting.
465  // TODO: remove this.
466  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
467
468 private:
469  const char* const name_;
470  // The Mutex being used by waiters. It is an error to mix condition variables between different
471  // Mutexes.
472  Mutex& guard_;
473#if ART_USE_FUTEXES
474  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
475  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
476  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
477  // without guard_ held.
478  AtomicInteger sequence_;
479  // Number of threads that have come into to wait, not the length of the waiters on the futex as
480  // waiters may have been requeued onto guard_. Guarded by guard_.
481  volatile int32_t num_waiters_;
482#else
483  pthread_cond_t cond_;
484#endif
485  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
486};
487
488// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
489// upon destruction.
490class SCOPED_CAPABILITY MutexLock {
491 public:
492  MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
493    mu_.ExclusiveLock(self_);
494  }
495
496  ~MutexLock() RELEASE() {
497    mu_.ExclusiveUnlock(self_);
498  }
499
500 private:
501  Thread* const self_;
502  Mutex& mu_;
503  DISALLOW_COPY_AND_ASSIGN(MutexLock);
504};
505// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
506#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
507
508// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
509// construction and releases it upon destruction.
510class SCOPED_CAPABILITY ReaderMutexLock {
511 public:
512  ALWAYS_INLINE ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu);
513
514  ALWAYS_INLINE ~ReaderMutexLock() RELEASE();
515
516 private:
517  Thread* const self_;
518  ReaderWriterMutex& mu_;
519  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
520};
521
522// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
523// construction and releases it upon destruction.
524class SCOPED_CAPABILITY WriterMutexLock {
525 public:
526  WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
527      self_(self), mu_(mu) {
528    mu_.ExclusiveLock(self_);
529  }
530
531  ~WriterMutexLock() UNLOCK_FUNCTION() {
532    mu_.ExclusiveUnlock(self_);
533  }
534
535 private:
536  Thread* const self_;
537  ReaderWriterMutex& mu_;
538  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
539};
540// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
541// "WriterMutexLock mu(lock)".
542#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
543
544// For StartNoThreadSuspension and EndNoThreadSuspension.
545class CAPABILITY("role") Role {
546 public:
547  void Acquire() ACQUIRE() {}
548  void Release() RELEASE() {}
549  const Role& operator!() const { return *this; }
550};
551
552class Uninterruptible : public Role {
553};
554
555// Global mutexes corresponding to the levels above.
556class Locks {
557 public:
558  static void Init();
559  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
560
561  // Destroying various lock types can emit errors that vary depending upon
562  // whether the client (art::Runtime) is currently active.  Allow the client
563  // to set a callback that is used to check when it is acceptable to call
564  // Abort.  The default behavior is that the client *is not* able to call
565  // Abort if no callback is established.
566  using ClientCallback = bool();
567  static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
568  // Checks for whether it is safe to call Abort() without using locks.
569  static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
570
571  // Add a mutex to expected_mutexes_on_weak_ref_access_.
572  static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
573  // Remove a mutex from expected_mutexes_on_weak_ref_access_.
574  static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
575  // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
576  static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
577
578  // Guards allocation entrypoint instrumenting.
579  static Mutex* instrument_entrypoints_lock_;
580
581  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
582  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
583  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
584  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
585  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
586  //
587  // Thread suspension:
588  // mutator thread                                | GC/Debugger
589  //   .. running ..                               |   .. running ..
590  //   .. running ..                               | Request thread suspension by:
591  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
592  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
593  //   .. running ..                               |     all mutator threads
594  //   .. running ..                               |   - releasing thread_suspend_count_lock_
595  //   .. running ..                               | Block wait for all threads to pass a barrier
596  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
597  // suspend code.                                 |   .. blocked ..
598  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
599  // x: Acquire thread_suspend_count_lock_         |   .. running ..
600  // while Thread::suspend_count_ > 0              |   .. running ..
601  //   - wait on Thread::resume_cond_              |   .. running ..
602  //     (releases thread_suspend_count_lock_)     |   .. running ..
603  //   .. waiting ..                               | Request thread resumption by:
604  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
605  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
606  //   .. waiting ..                               |     all mutator threads
607  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
608  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
609  // Release thread_suspend_count_lock_            |  .. running ..
610  // Change to kRunnable                           |  .. running ..
611  //  - this uses a CAS operation to ensure the    |  .. running ..
612  //    suspend request flag isn't raised as the   |  .. running ..
613  //    state is changed                           |  .. running ..
614  //  - if the CAS operation fails then goto x     |  .. running ..
615  //  .. running ..                                |  .. running ..
616  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
617
618  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
619  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
620
621  // Guards shutdown of the runtime.
622  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
623
624  // Guards background profiler global state.
625  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
626
627  // Guards trace (ie traceview) requests.
628  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
629
630  // Guards debugger recent allocation records.
631  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
632
633  // Guards updates to instrumentation to ensure mutual exclusion of
634  // events like deoptimization requests.
635  // TODO: improve name, perhaps instrumentation_update_lock_.
636  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
637
638  // Guards Class Hierarchy Analysis (CHA).
639  static Mutex* cha_lock_ ACQUIRED_AFTER(deoptimization_lock_);
640
641  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
642  // attaching and detaching.
643  static Mutex* thread_list_lock_ ACQUIRED_AFTER(cha_lock_);
644
645  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
646  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
647
648  // Guards maintaining loading library data structures.
649  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
650
651  // Guards breakpoints.
652  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
653
654  // Guards lists of classes within the class linker.
655  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
656
657  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
658  // doesn't try to hold a higher level Mutex.
659  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
660
661  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
662
663  // Guard the allocation/deallocation of thread ids.
664  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
665
666  // Guards modification of the LDT on x86.
667  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
668
669  static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
670
671  // Guards opened oat files in OatFileManager.
672  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
673
674  // Guards extra string entries for VerifierDeps.
675  static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
676
677  // Guards dlopen_handles_ in DlOpenOatFile.
678  static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
679
680  // Guards intern table.
681  static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
682
683  // Guards reference processor.
684  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
685
686  // Guards cleared references queue.
687  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
688
689  // Guards weak references queue.
690  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
691
692  // Guards finalizer references queue.
693  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
694
695  // Guards phantom references queue.
696  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
697
698  // Guards soft references queue.
699  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
700
701  // Guard accesses to the JNI Global Reference table.
702  static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
703
704  // Guard accesses to the JNI Weak Global Reference table.
705  static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
706
707  // Guard accesses to the JNI function table override.
708  static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
709
710  // Have an exclusive aborting thread.
711  static Mutex* abort_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
712
713  // Allow mutual exclusion when manipulating Thread::suspend_count_.
714  // TODO: Does the trade-off of a per-thread lock make sense?
715  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
716
717  // One unexpected signal at a time lock.
718  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
719
720  // Have an exclusive logging thread.
721  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
722
723  // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
724  // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
725  // encounter an unexpected mutex on accessing weak refs,
726  // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
727  static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
728  static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
729  class ScopedExpectedMutexesOnWeakRefAccessLock;
730};
731
732class Roles {
733 public:
734  // Uninterruptible means that the thread may not become suspended.
735  static Uninterruptible uninterruptible_;
736};
737
738}  // namespace art
739
740#endif  // ART_RUNTIME_BASE_MUTEX_H_
741