mutex.h revision a222404a5832ab16786931576d52825d08eed3ca
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class SHARED_LOCKABLE ReaderWriterMutex;
47class SHARED_LOCKABLE MutatorMutex;
48class ScopedContentionRecorder;
49class Thread;
50
51// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
52// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
53// partial ordering and thereby cause deadlock situations to fail checks.
54//
55// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
56enum LockLevel {
57  kLoggingLock = 0,
58  kSwapMutexesLock,
59  kUnexpectedSignalLock,
60  kThreadSuspendCountLock,
61  kAbortLock,
62  kJdwpAdbStateLock,
63  kJdwpSocketLock,
64  kRegionSpaceRegionLock,
65  kRosAllocGlobalLock,
66  kRosAllocBracketLock,
67  kRosAllocBulkFreeLock,
68  kMarkSweepMarkStackLock,
69  kTransactionLogLock,
70  kJniFunctionTableLock,
71  kJniWeakGlobalsLock,
72  kJniGlobalsLock,
73  kReferenceQueueSoftReferencesLock,
74  kReferenceQueuePhantomReferencesLock,
75  kReferenceQueueFinalizerReferencesLock,
76  kReferenceQueueWeakReferencesLock,
77  kReferenceQueueClearedReferencesLock,
78  kReferenceProcessorLock,
79  kJitDebugInterfaceLock,
80  kAllocSpaceLock,
81  kBumpPointerSpaceBlockLock,
82  kArenaPoolLock,
83  kInternTableLock,
84  kOatFileSecondaryLookupLock,
85  kHostDlOpenHandlesLock,
86  kVerifierDepsLock,
87  kOatFileManagerLock,
88  kTracingUniqueMethodsLock,
89  kTracingStreamingLock,
90  kDeoptimizedMethodsLock,
91  kClassLoaderClassesLock,
92  kDefaultMutexLevel,
93  kDexLock,
94  kMarkSweepLargeObjectLock,
95  kJdwpObjectRegistryLock,
96  kModifyLdtLock,
97  kAllocatedThreadIdsLock,
98  kMonitorPoolLock,
99  kClassLinkerClassesLock,  // TODO rename.
100  kJitCodeCacheLock,
101  kCHALock,
102  kBreakpointLock,
103  kMonitorLock,
104  kMonitorListLock,
105  kJniLoadLibraryLock,
106  kThreadListLock,
107  kAllocTrackerLock,
108  kDeoptimizationLock,
109  kProfilerLock,
110  kJdwpShutdownLock,
111  kJdwpEventListLock,
112  kJdwpAttachLock,
113  kJdwpStartLock,
114  kRuntimeShutdownLock,
115  kTraceLock,
116  kHeapBitmapLock,
117  kMutatorLock,
118  kInstrumentEntrypointsLock,
119  kZygoteCreationLock,
120
121  kLockLevelCount  // Must come last.
122};
123std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
124
125const bool kDebugLocking = kIsDebugBuild;
126
127// Record Log contention information, dumpable via SIGQUIT.
128#ifdef ART_USE_FUTEXES
129// To enable lock contention logging, set this to true.
130const bool kLogLockContentions = false;
131#else
132// Keep this false as lock contention logging is supported only with
133// futex.
134const bool kLogLockContentions = false;
135#endif
136const size_t kContentionLogSize = 4;
137const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
138const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
139
140// Base class for all Mutex implementations
141class BaseMutex {
142 public:
143  const char* GetName() const {
144    return name_;
145  }
146
147  virtual bool IsMutex() const { return false; }
148  virtual bool IsReaderWriterMutex() const { return false; }
149  virtual bool IsMutatorMutex() const { return false; }
150
151  virtual void Dump(std::ostream& os) const = 0;
152
153  static void DumpAll(std::ostream& os);
154
155  bool ShouldRespondToEmptyCheckpointRequest() const {
156    return should_respond_to_empty_checkpoint_request_;
157  }
158
159  void SetShouldRespondToEmptyCheckpointRequest(bool value) {
160    should_respond_to_empty_checkpoint_request_ = value;
161  }
162
163  virtual void WakeupToRespondToEmptyCheckpoint() = 0;
164
165 protected:
166  friend class ConditionVariable;
167
168  BaseMutex(const char* name, LockLevel level);
169  virtual ~BaseMutex();
170  void RegisterAsLocked(Thread* self);
171  void RegisterAsUnlocked(Thread* self);
172  void CheckSafeToWait(Thread* self);
173
174  friend class ScopedContentionRecorder;
175
176  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
177  void DumpContention(std::ostream& os) const;
178
179  const LockLevel level_;  // Support for lock hierarchy.
180  const char* const name_;
181  bool should_respond_to_empty_checkpoint_request_;
182
183  // A log entry that records contention but makes no guarantee that either tid will be held live.
184  struct ContentionLogEntry {
185    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
186    uint64_t blocked_tid;
187    uint64_t owner_tid;
188    AtomicInteger count;
189  };
190  struct ContentionLogData {
191    ContentionLogEntry contention_log[kContentionLogSize];
192    // The next entry in the contention log to be updated. Value ranges from 0 to
193    // kContentionLogSize - 1.
194    AtomicInteger cur_content_log_entry;
195    // Number of times the Mutex has been contended.
196    AtomicInteger contention_count;
197    // Sum of time waited by all contenders in ns.
198    Atomic<uint64_t> wait_time;
199    void AddToWaitTime(uint64_t value);
200    ContentionLogData() : wait_time(0) {}
201  };
202  ContentionLogData contention_log_data_[kContentionLogDataSize];
203
204 public:
205  bool HasEverContended() const {
206    if (kLogLockContentions) {
207      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
208    }
209    return false;
210  }
211};
212
213// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
214// exclusive access to what it guards. A Mutex can be in one of two states:
215// - Free - not owned by any thread,
216// - Exclusive - owned by a single thread.
217//
218// The effect of locking and unlocking operations on the state is:
219// State     | ExclusiveLock | ExclusiveUnlock
220// -------------------------------------------
221// Free      | Exclusive     | error
222// Exclusive | Block*        | Free
223// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
224//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
225std::ostream& operator<<(std::ostream& os, const Mutex& mu);
226class LOCKABLE Mutex : public BaseMutex {
227 public:
228  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
229  ~Mutex();
230
231  virtual bool IsMutex() const { return true; }
232
233  // Block until mutex is free then acquire exclusive access.
234  void ExclusiveLock(Thread* self) ACQUIRE();
235  void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
236
237  // Returns true if acquires exclusive access, false otherwise.
238  bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
239  bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
240
241  // Release exclusive access.
242  void ExclusiveUnlock(Thread* self) RELEASE();
243  void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
244
245  // Is the current thread the exclusive holder of the Mutex.
246  bool IsExclusiveHeld(const Thread* self) const;
247
248  // Assert that the Mutex is exclusively held by the current thread.
249  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
250    if (kDebugLocking && (gAborting == 0)) {
251      CHECK(IsExclusiveHeld(self)) << *this;
252    }
253  }
254  void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
255
256  // Assert that the Mutex is not held by the current thread.
257  void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
258    if (kDebugLocking && (gAborting == 0)) {
259      CHECK(!IsExclusiveHeld(self)) << *this;
260    }
261  }
262  void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
263    AssertNotHeldExclusive(self);
264  }
265
266  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
267  // than the owner.
268  uint64_t GetExclusiveOwnerTid() const;
269
270  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
271  unsigned int GetDepth() const {
272    return recursion_count_;
273  }
274
275  virtual void Dump(std::ostream& os) const;
276
277  // For negative capabilities in clang annotations.
278  const Mutex& operator!() const { return *this; }
279
280  void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
281
282 private:
283#if ART_USE_FUTEXES
284  // 0 is unheld, 1 is held.
285  AtomicInteger state_;
286  // Exclusive owner.
287  volatile uint64_t exclusive_owner_;
288  // Number of waiting contenders.
289  AtomicInteger num_contenders_;
290#else
291  pthread_mutex_t mutex_;
292  volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
293#endif
294  const bool recursive_;  // Can the lock be recursively held?
295  unsigned int recursion_count_;
296  friend class ConditionVariable;
297  DISALLOW_COPY_AND_ASSIGN(Mutex);
298};
299
300// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
301// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
302// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
303// condition variable. A ReaderWriterMutex can be in one of three states:
304// - Free - not owned by any thread,
305// - Exclusive - owned by a single thread,
306// - Shared(n) - shared amongst n threads.
307//
308// The effect of locking and unlocking operations on the state is:
309//
310// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
311// ----------------------------------------------------------------------------
312// Free      | Exclusive     | error           | SharedLock(1)    | error
313// Exclusive | Block         | Free            | Block            | error
314// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
315// * for large values of n the SharedLock may block.
316std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
317class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
318 public:
319  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
320  ~ReaderWriterMutex();
321
322  virtual bool IsReaderWriterMutex() const { return true; }
323
324  // Block until ReaderWriterMutex is free then acquire exclusive access.
325  void ExclusiveLock(Thread* self) ACQUIRE();
326  void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
327
328  // Release exclusive access.
329  void ExclusiveUnlock(Thread* self) RELEASE();
330  void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
331
332  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
333  // or false if timeout is reached.
334#if HAVE_TIMED_RWLOCK
335  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
336      EXCLUSIVE_TRYLOCK_FUNCTION(true);
337#endif
338
339  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
340  void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
341  void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
342
343  // Try to acquire share of ReaderWriterMutex.
344  bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
345
346  // Release a share of the access.
347  void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
348  void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
349
350  // Is the current thread the exclusive holder of the ReaderWriterMutex.
351  bool IsExclusiveHeld(const Thread* self) const;
352
353  // Assert the current thread has exclusive access to the ReaderWriterMutex.
354  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
355    if (kDebugLocking && (gAborting == 0)) {
356      CHECK(IsExclusiveHeld(self)) << *this;
357    }
358  }
359  void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
360
361  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
362  void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
363    if (kDebugLocking && (gAborting == 0)) {
364      CHECK(!IsExclusiveHeld(self)) << *this;
365    }
366  }
367  void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
368    AssertNotExclusiveHeld(self);
369  }
370
371  // Is the current thread a shared holder of the ReaderWriterMutex.
372  bool IsSharedHeld(const Thread* self) const;
373
374  // Assert the current thread has shared access to the ReaderWriterMutex.
375  void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
376    if (kDebugLocking && (gAborting == 0)) {
377      // TODO: we can only assert this well when self != null.
378      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
379    }
380  }
381  void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
382    AssertSharedHeld(self);
383  }
384
385  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
386  // mode.
387  void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
388    if (kDebugLocking && (gAborting == 0)) {
389      CHECK(!IsSharedHeld(self)) << *this;
390    }
391  }
392
393  // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
394  // than the owner.
395  uint64_t GetExclusiveOwnerTid() const;
396
397  virtual void Dump(std::ostream& os) const;
398
399  // For negative capabilities in clang annotations.
400  const ReaderWriterMutex& operator!() const { return *this; }
401
402  void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
403
404 private:
405#if ART_USE_FUTEXES
406  // Out-of-inline path for handling contention for a SharedLock.
407  void HandleSharedLockContention(Thread* self, int32_t cur_state);
408
409  // -1 implies held exclusive, +ve shared held by state_ many owners.
410  AtomicInteger state_;
411  // Exclusive owner. Modification guarded by this mutex.
412  volatile uint64_t exclusive_owner_;
413  // Number of contenders waiting for a reader share.
414  AtomicInteger num_pending_readers_;
415  // Number of contenders waiting to be the writer.
416  AtomicInteger num_pending_writers_;
417#else
418  pthread_rwlock_t rwlock_;
419  volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
420#endif
421  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
422};
423
424// MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
425// Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
426// thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
427// held by any mutator threads. However, a thread in the kRunnable state is considered to have
428// shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
429// state have associated implications on lock ownership. Extra methods to handle the state
430// transitions have been added to the interface but are only accessible to the methods dealing
431// with state transitions. The thread state and flags attributes are used to ensure thread state
432// transitions are consistent with the permitted behaviour of the mutex.
433//
434// *) The most important consequence of this behaviour is that all threads must be in one of the
435// suspended states before exclusive ownership of the mutator mutex is sought.
436//
437std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
438class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
439 public:
440  explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
441    : ReaderWriterMutex(name, level) {}
442  ~MutatorMutex() {}
443
444  virtual bool IsMutatorMutex() const { return true; }
445
446  // For negative capabilities in clang annotations.
447  const MutatorMutex& operator!() const { return *this; }
448
449 private:
450  friend class Thread;
451  void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
452  void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
453
454  DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
455};
456
457// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
458// (Signal) or all at once (Broadcast).
459class ConditionVariable {
460 public:
461  ConditionVariable(const char* name, Mutex& mutex);
462  ~ConditionVariable();
463
464  void Broadcast(Thread* self);
465  void Signal(Thread* self);
466  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
467  //       pointer copy, thereby defeating annotalysis.
468  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
469  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
470  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
471  // when waiting.
472  // TODO: remove this.
473  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
474
475 private:
476  const char* const name_;
477  // The Mutex being used by waiters. It is an error to mix condition variables between different
478  // Mutexes.
479  Mutex& guard_;
480#if ART_USE_FUTEXES
481  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
482  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
483  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
484  // without guard_ held.
485  AtomicInteger sequence_;
486  // Number of threads that have come into to wait, not the length of the waiters on the futex as
487  // waiters may have been requeued onto guard_. Guarded by guard_.
488  volatile int32_t num_waiters_;
489#else
490  pthread_cond_t cond_;
491#endif
492  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
493};
494
495// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
496// upon destruction.
497class SCOPED_CAPABILITY MutexLock {
498 public:
499  MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
500    mu_.ExclusiveLock(self_);
501  }
502
503  ~MutexLock() RELEASE() {
504    mu_.ExclusiveUnlock(self_);
505  }
506
507 private:
508  Thread* const self_;
509  Mutex& mu_;
510  DISALLOW_COPY_AND_ASSIGN(MutexLock);
511};
512// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
513#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
514
515// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
516// construction and releases it upon destruction.
517class SCOPED_CAPABILITY ReaderMutexLock {
518 public:
519  ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) :
520      self_(self), mu_(mu) {
521    mu_.SharedLock(self_);
522  }
523
524  ~ReaderMutexLock() RELEASE() {
525    mu_.SharedUnlock(self_);
526  }
527
528 private:
529  Thread* const self_;
530  ReaderWriterMutex& mu_;
531  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
532};
533// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
534// "ReaderMutexLock mu(lock)".
535#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
536
537// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
538// construction and releases it upon destruction.
539class SCOPED_CAPABILITY WriterMutexLock {
540 public:
541  WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
542      self_(self), mu_(mu) {
543    mu_.ExclusiveLock(self_);
544  }
545
546  ~WriterMutexLock() UNLOCK_FUNCTION() {
547    mu_.ExclusiveUnlock(self_);
548  }
549
550 private:
551  Thread* const self_;
552  ReaderWriterMutex& mu_;
553  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
554};
555// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
556// "WriterMutexLock mu(lock)".
557#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
558
559// For StartNoThreadSuspension and EndNoThreadSuspension.
560class CAPABILITY("role") Role {
561 public:
562  void Acquire() ACQUIRE() {}
563  void Release() RELEASE() {}
564  const Role& operator!() const { return *this; }
565};
566
567class Uninterruptible : public Role {
568};
569
570// Global mutexes corresponding to the levels above.
571class Locks {
572 public:
573  static void Init();
574  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
575
576  // Destroying various lock types can emit errors that vary depending upon
577  // whether the client (art::Runtime) is currently active.  Allow the client
578  // to set a callback that is used to check when it is acceptable to call
579  // Abort.  The default behavior is that the client *is not* able to call
580  // Abort if no callback is established.
581  using ClientCallback = bool();
582  static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
583  // Checks for whether it is safe to call Abort() without using locks.
584  static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
585
586
587  // Guards allocation entrypoint instrumenting.
588  static Mutex* instrument_entrypoints_lock_;
589
590  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
591  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
592  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
593  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
594  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
595  //
596  // Thread suspension:
597  // mutator thread                                | GC/Debugger
598  //   .. running ..                               |   .. running ..
599  //   .. running ..                               | Request thread suspension by:
600  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
601  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
602  //   .. running ..                               |     all mutator threads
603  //   .. running ..                               |   - releasing thread_suspend_count_lock_
604  //   .. running ..                               | Block wait for all threads to pass a barrier
605  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
606  // suspend code.                                 |   .. blocked ..
607  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
608  // x: Acquire thread_suspend_count_lock_         |   .. running ..
609  // while Thread::suspend_count_ > 0              |   .. running ..
610  //   - wait on Thread::resume_cond_              |   .. running ..
611  //     (releases thread_suspend_count_lock_)     |   .. running ..
612  //   .. waiting ..                               | Request thread resumption by:
613  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
614  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
615  //   .. waiting ..                               |     all mutator threads
616  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
617  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
618  // Release thread_suspend_count_lock_            |  .. running ..
619  // Change to kRunnable                           |  .. running ..
620  //  - this uses a CAS operation to ensure the    |  .. running ..
621  //    suspend request flag isn't raised as the   |  .. running ..
622  //    state is changed                           |  .. running ..
623  //  - if the CAS operation fails then goto x     |  .. running ..
624  //  .. running ..                                |  .. running ..
625  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
626
627  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
628  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
629
630  // Guards shutdown of the runtime.
631  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
632
633  // Guards background profiler global state.
634  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
635
636  // Guards trace (ie traceview) requests.
637  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
638
639  // Guards debugger recent allocation records.
640  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
641
642  // Guards updates to instrumentation to ensure mutual exclusion of
643  // events like deoptimization requests.
644  // TODO: improve name, perhaps instrumentation_update_lock_.
645  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
646
647  // Guards Class Hierarchy Analysis (CHA).
648  static Mutex* cha_lock_ ACQUIRED_AFTER(deoptimization_lock_);
649
650  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
651  // attaching and detaching.
652  static Mutex* thread_list_lock_ ACQUIRED_AFTER(cha_lock_);
653
654  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
655  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
656
657  // Guards maintaining loading library data structures.
658  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
659
660  // Guards breakpoints.
661  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
662
663  // Guards lists of classes within the class linker.
664  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
665
666  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
667  // doesn't try to hold a higher level Mutex.
668  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
669
670  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
671
672  // Guard the allocation/deallocation of thread ids.
673  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
674
675  // Guards modification of the LDT on x86.
676  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
677
678  static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
679
680  // Guards opened oat files in OatFileManager.
681  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
682
683  // Guards extra string entries for VerifierDeps.
684  static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
685
686  // Guards dlopen_handles_ in DlOpenOatFile.
687  static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
688
689  // Guards intern table.
690  static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
691
692  // Guards reference processor.
693  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
694
695  // Guards cleared references queue.
696  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
697
698  // Guards weak references queue.
699  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
700
701  // Guards finalizer references queue.
702  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
703
704  // Guards phantom references queue.
705  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
706
707  // Guards soft references queue.
708  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
709
710  // Guard accesses to the JNI Global Reference table.
711  static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
712
713  // Guard accesses to the JNI Weak Global Reference table.
714  static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
715
716  // Guard accesses to the JNI function table override.
717  static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
718
719  // Have an exclusive aborting thread.
720  static Mutex* abort_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
721
722  // Allow mutual exclusion when manipulating Thread::suspend_count_.
723  // TODO: Does the trade-off of a per-thread lock make sense?
724  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
725
726  // One unexpected signal at a time lock.
727  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
728
729  // Have an exclusive logging thread.
730  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
731
732  // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
733  // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
734  // encounter an unexpected mutex on accessing weak refs,
735  // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
736  static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
737};
738
739class Roles {
740 public:
741  // Uninterruptible means that the thread may not become suspended.
742  static Uninterruptible uninterruptible_;
743};
744
745}  // namespace art
746
747#endif  // ART_RUNTIME_BASE_MUTEX_H_
748