mutex.h revision 719d1a33f6569864f529e5a3fff59e7bca97aad0
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_BASE_MUTEX_H_
18#define ART_RUNTIME_BASE_MUTEX_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <string>
25
26#include "atomic.h"
27#include "base/logging.h"
28#include "base/macros.h"
29#include "globals.h"
30
31#if defined(__APPLE__)
32#define ART_USE_FUTEXES 0
33#else
34#define ART_USE_FUTEXES 1
35#endif
36
37// Currently Darwin doesn't support locks with timeouts.
38#if !defined(__APPLE__)
39#define HAVE_TIMED_RWLOCK 1
40#else
41#define HAVE_TIMED_RWLOCK 0
42#endif
43
44namespace art {
45
46class LOCKABLE ReaderWriterMutex;
47class ScopedContentionRecorder;
48class Thread;
49
50// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
51// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
52// partial ordering and thereby cause deadlock situations to fail checks.
53//
54// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
55enum LockLevel {
56  kLoggingLock = 0,
57  kUnexpectedSignalLock,
58  kThreadSuspendCountLock,
59  kAbortLock,
60  kJdwpSocketLock,
61  kRosAllocGlobalLock,
62  kRosAllocBracketLock,
63  kRosAllocBulkFreeLock,
64  kAllocSpaceLock,
65  kDexFileMethodInlinerLock,
66  kDexFileToMethodInlinerMapLock,
67  kMarkSweepMarkStackLock,
68  kTransactionLogLock,
69  kInternTableLock,
70  kMonitorPoolLock,
71  kDefaultMutexLevel,
72  kMarkSweepLargeObjectLock,
73  kPinTableLock,
74  kLoadLibraryLock,
75  kJdwpObjectRegistryLock,
76  kClassLinkerClassesLock,
77  kBreakpointLock,
78  kMonitorLock,
79  kThreadListLock,
80  kBreakpointInvokeLock,
81  kDeoptimizationLock,
82  kTraceLock,
83  kProfilerLock,
84  kJdwpEventListLock,
85  kJdwpAttachLock,
86  kJdwpStartLock,
87  kRuntimeShutdownLock,
88  kHeapBitmapLock,
89  kMutatorLock,
90  kZygoteCreationLock,
91
92  kLockLevelCount  // Must come last.
93};
94std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
95
96const bool kDebugLocking = kIsDebugBuild;
97
98// Record Log contention information, dumpable via SIGQUIT.
99#ifdef ART_USE_FUTEXES
100// To enable lock contention logging, set this to true.
101const bool kLogLockContentions = false;
102#else
103// Keep this false as lock contention logging is supported only with
104// futex.
105const bool kLogLockContentions = false;
106#endif
107const size_t kContentionLogSize = 4;
108const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
109const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
110
111// Base class for all Mutex implementations
112class BaseMutex {
113 public:
114  const char* GetName() const {
115    return name_;
116  }
117
118  virtual bool IsMutex() const { return false; }
119  virtual bool IsReaderWriterMutex() const { return false; }
120
121  virtual void Dump(std::ostream& os) const = 0;
122
123  static void DumpAll(std::ostream& os);
124
125 protected:
126  friend class ConditionVariable;
127
128  BaseMutex(const char* name, LockLevel level);
129  virtual ~BaseMutex();
130  void RegisterAsLocked(Thread* self);
131  void RegisterAsUnlocked(Thread* self);
132  void CheckSafeToWait(Thread* self);
133
134  friend class ScopedContentionRecorder;
135
136  void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
137  void DumpContention(std::ostream& os) const;
138
139  const LockLevel level_;  // Support for lock hierarchy.
140  const char* const name_;
141
142  // A log entry that records contention but makes no guarantee that either tid will be held live.
143  struct ContentionLogEntry {
144    ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
145    uint64_t blocked_tid;
146    uint64_t owner_tid;
147    AtomicInteger count;
148  };
149  struct ContentionLogData {
150    ContentionLogEntry contention_log[kContentionLogSize];
151    // The next entry in the contention log to be updated. Value ranges from 0 to
152    // kContentionLogSize - 1.
153    AtomicInteger cur_content_log_entry;
154    // Number of times the Mutex has been contended.
155    AtomicInteger contention_count;
156    // Sum of time waited by all contenders in ns.
157    volatile uint64_t wait_time;
158    void AddToWaitTime(uint64_t value);
159    ContentionLogData() : wait_time(0) {}
160  };
161  ContentionLogData contetion_log_data_[kContentionLogDataSize];
162
163 public:
164  bool HasEverContended() const {
165    if (kLogLockContentions) {
166      return contetion_log_data_->contention_count > 0;
167    }
168    return false;
169  }
170};
171
172// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
173// exclusive access to what it guards. A Mutex can be in one of two states:
174// - Free - not owned by any thread,
175// - Exclusive - owned by a single thread.
176//
177// The effect of locking and unlocking operations on the state is:
178// State     | ExclusiveLock | ExclusiveUnlock
179// -------------------------------------------
180// Free      | Exclusive     | error
181// Exclusive | Block*        | Free
182// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
183//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
184std::ostream& operator<<(std::ostream& os, const Mutex& mu);
185class LOCKABLE Mutex : public BaseMutex {
186 public:
187  explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
188  ~Mutex();
189
190  virtual bool IsMutex() const { return true; }
191
192  // Block until mutex is free then acquire exclusive access.
193  void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
194  void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
195
196  // Returns true if acquires exclusive access, false otherwise.
197  bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
198  bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); }
199
200  // Release exclusive access.
201  void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
202  void Unlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
203
204  // Is the current thread the exclusive holder of the Mutex.
205  bool IsExclusiveHeld(const Thread* self) const;
206
207  // Assert that the Mutex is exclusively held by the current thread.
208  void AssertExclusiveHeld(const Thread* self) {
209    if (kDebugLocking && (gAborting == 0)) {
210      CHECK(IsExclusiveHeld(self)) << *this;
211    }
212  }
213  void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); }
214
215  // Assert that the Mutex is not held by the current thread.
216  void AssertNotHeldExclusive(const Thread* self) {
217    if (kDebugLocking && (gAborting == 0)) {
218      CHECK(!IsExclusiveHeld(self)) << *this;
219    }
220  }
221  void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); }
222
223  // Id associated with exclusive owner.
224  uint64_t GetExclusiveOwnerTid() const;
225
226  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
227  unsigned int GetDepth() const {
228    return recursion_count_;
229  }
230
231  virtual void Dump(std::ostream& os) const;
232
233 private:
234#if ART_USE_FUTEXES
235  // 0 is unheld, 1 is held.
236  volatile int32_t state_;
237  // Exclusive owner.
238  volatile uint64_t exclusive_owner_;
239  // Number of waiting contenders.
240  AtomicInteger num_contenders_;
241#else
242  pthread_mutex_t mutex_;
243#endif
244  const bool recursive_;  // Can the lock be recursively held?
245  unsigned int recursion_count_;
246  friend class ConditionVariable;
247  DISALLOW_COPY_AND_ASSIGN(Mutex);
248};
249
250// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
251// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
252// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
253// condition variable. A ReaderWriterMutex can be in one of three states:
254// - Free - not owned by any thread,
255// - Exclusive - owned by a single thread,
256// - Shared(n) - shared amongst n threads.
257//
258// The effect of locking and unlocking operations on the state is:
259//
260// State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
261// ----------------------------------------------------------------------------
262// Free      | Exclusive     | error           | SharedLock(1)    | error
263// Exclusive | Block         | Free            | Block            | error
264// Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
265// * for large values of n the SharedLock may block.
266std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
267class LOCKABLE ReaderWriterMutex : public BaseMutex {
268 public:
269  explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
270  ~ReaderWriterMutex();
271
272  virtual bool IsReaderWriterMutex() const { return true; }
273
274  // Block until ReaderWriterMutex is free then acquire exclusive access.
275  void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
276  void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
277
278  // Release exclusive access.
279  void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
280  void WriterUnlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
281
282  // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
283  // or false if timeout is reached.
284#if HAVE_TIMED_RWLOCK
285  bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
286      EXCLUSIVE_TRYLOCK_FUNCTION(true);
287#endif
288
289  // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
290  void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
291  void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
292
293  // Try to acquire share of ReaderWriterMutex.
294  bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
295
296  // Release a share of the access.
297  void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
298  void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
299
300  // Is the current thread the exclusive holder of the ReaderWriterMutex.
301  bool IsExclusiveHeld(const Thread* self) const;
302
303  // Assert the current thread has exclusive access to the ReaderWriterMutex.
304  void AssertExclusiveHeld(const Thread* self) {
305    if (kDebugLocking && (gAborting == 0)) {
306      CHECK(IsExclusiveHeld(self)) << *this;
307    }
308  }
309  void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
310
311  // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
312  void AssertNotExclusiveHeld(const Thread* self) {
313    if (kDebugLocking && (gAborting == 0)) {
314      CHECK(!IsExclusiveHeld(self)) << *this;
315    }
316  }
317  void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
318
319  // Is the current thread a shared holder of the ReaderWriterMutex.
320  bool IsSharedHeld(const Thread* self) const;
321
322  // Assert the current thread has shared access to the ReaderWriterMutex.
323  void AssertSharedHeld(const Thread* self) {
324    if (kDebugLocking && (gAborting == 0)) {
325      // TODO: we can only assert this well when self != NULL.
326      CHECK(IsSharedHeld(self) || self == NULL) << *this;
327    }
328  }
329  void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
330
331  // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
332  // mode.
333  void AssertNotHeld(const Thread* self) {
334    if (kDebugLocking && (gAborting == 0)) {
335      CHECK(!IsSharedHeld(self)) << *this;
336    }
337  }
338
339  // Id associated with exclusive owner.
340  uint64_t GetExclusiveOwnerTid() const;
341
342  virtual void Dump(std::ostream& os) const;
343
344 private:
345#if ART_USE_FUTEXES
346  // -1 implies held exclusive, +ve shared held by state_ many owners.
347  volatile int32_t state_;
348  // Exclusive owner.
349  volatile uint64_t exclusive_owner_;
350  // Pending readers.
351  volatile int32_t num_pending_readers_;
352  // Pending writers.
353  AtomicInteger num_pending_writers_;
354#else
355  pthread_rwlock_t rwlock_;
356#endif
357  DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
358};
359
360// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
361// (Signal) or all at once (Broadcast).
362class ConditionVariable {
363 public:
364  explicit ConditionVariable(const char* name, Mutex& mutex);
365  ~ConditionVariable();
366
367  void Broadcast(Thread* self);
368  void Signal(Thread* self);
369  // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
370  //       pointer copy, thereby defeating annotalysis.
371  void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
372  void TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
373  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
374  // when waiting.
375  // TODO: remove this.
376  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
377
378 private:
379  const char* const name_;
380  // The Mutex being used by waiters. It is an error to mix condition variables between different
381  // Mutexes.
382  Mutex& guard_;
383#if ART_USE_FUTEXES
384  // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
385  // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
386  // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
387  // without guard_ held.
388  AtomicInteger sequence_;
389  // Number of threads that have come into to wait, not the length of the waiters on the futex as
390  // waiters may have been requeued onto guard_. Guarded by guard_.
391  volatile int32_t num_waiters_;
392#else
393  pthread_cond_t cond_;
394#endif
395  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
396};
397
398// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
399// upon destruction.
400class SCOPED_LOCKABLE MutexLock {
401 public:
402  explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) {
403    mu_.ExclusiveLock(self_);
404  }
405
406  ~MutexLock() UNLOCK_FUNCTION() {
407    mu_.ExclusiveUnlock(self_);
408  }
409
410 private:
411  Thread* const self_;
412  Mutex& mu_;
413  DISALLOW_COPY_AND_ASSIGN(MutexLock);
414};
415// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
416#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_declaration_missing_variable_name)
417
418// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
419// construction and releases it upon destruction.
420class SCOPED_LOCKABLE ReaderMutexLock {
421 public:
422  explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
423      self_(self), mu_(mu) {
424    mu_.SharedLock(self_);
425  }
426
427  ~ReaderMutexLock() UNLOCK_FUNCTION() {
428    mu_.SharedUnlock(self_);
429  }
430
431 private:
432  Thread* const self_;
433  ReaderWriterMutex& mu_;
434  DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
435};
436// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
437// "ReaderMutexLock mu(lock)".
438#define ReaderMutexLock(x) COMPILE_ASSERT(0, reader_mutex_lock_declaration_missing_variable_name)
439
440// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
441// construction and releases it upon destruction.
442class SCOPED_LOCKABLE WriterMutexLock {
443 public:
444  explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
445      self_(self), mu_(mu) {
446    mu_.ExclusiveLock(self_);
447  }
448
449  ~WriterMutexLock() UNLOCK_FUNCTION() {
450    mu_.ExclusiveUnlock(self_);
451  }
452
453 private:
454  Thread* const self_;
455  ReaderWriterMutex& mu_;
456  DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
457};
458// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
459// "WriterMutexLock mu(lock)".
460#define WriterMutexLock(x) COMPILE_ASSERT(0, writer_mutex_lock_declaration_missing_variable_name)
461
462// Global mutexes corresponding to the levels above.
463class Locks {
464 public:
465  static void Init();
466
467  // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
468  // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
469  // a share on the mutator_lock_. The garbage collector may also execute with shared access but
470  // at times requires exclusive access to the heap (not to be confused with the heap meta-data
471  // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks
472  // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_
473  // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition
474  // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on
475  // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector)
476  // chance to acquire the lock.
477  //
478  // Thread suspension:
479  // Shared users                                  | Exclusive user
480  // (holding mutator lock and in kRunnable state) |   .. running ..
481  //   .. running ..                               | Request thread suspension by:
482  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
483  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
484  //   .. running ..                               |     all mutator threads
485  //   .. running ..                               |   - releasing thread_suspend_count_lock_
486  //   .. running ..                               | Block trying to acquire exclusive mutator lock
487  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
488  // suspend code.                                 |   .. blocked ..
489  // Change state to kSuspended                    |   .. blocked ..
490  // x: Release share on mutator_lock_             | Carry out exclusive access
491  // Acquire thread_suspend_count_lock_            |   .. exclusive ..
492  // while Thread::suspend_count_ > 0              |   .. exclusive ..
493  //   - wait on Thread::resume_cond_              |   .. exclusive ..
494  //     (releases thread_suspend_count_lock_)     |   .. exclusive ..
495  //   .. waiting ..                               | Release mutator_lock_
496  //   .. waiting ..                               | Request thread resumption by:
497  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
498  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
499  //   .. waiting ..                               |     all mutator threads
500  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
501  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
502  // Release thread_suspend_count_lock_            |  .. running ..
503  // Acquire share on mutator_lock_                |  .. running ..
504  //  - This could block but the thread still      |  .. running ..
505  //    has a state of kSuspended and so this      |  .. running ..
506  //    isn't an issue.                            |  .. running ..
507  // Acquire thread_suspend_count_lock_            |  .. running ..
508  //  - we poll here as we're transitioning into   |  .. running ..
509  //    kRunnable and an individual thread suspend |  .. running ..
510  //    request (e.g for debugging) won't try      |  .. running ..
511  //    to acquire the mutator lock (which would   |  .. running ..
512  //    block as we hold the mutator lock). This   |  .. running ..
513  //    poll ensures that if the suspender thought |  .. running ..
514  //    we were suspended by incrementing our      |  .. running ..
515  //    Thread::suspend_count_ and then reading    |  .. running ..
516  //    our state we go back to waiting on         |  .. running ..
517  //    Thread::resume_cond_.                      |  .. running ..
518  // can_go_runnable = Thread::suspend_count_ == 0 |  .. running ..
519  // Release thread_suspend_count_lock_            |  .. running ..
520  // if can_go_runnable                            |  .. running ..
521  //   Change state to kRunnable                   |  .. running ..
522  // else                                          |  .. running ..
523  //   Goto x                                      |  .. running ..
524  //  .. running ..                                |  .. running ..
525  static ReaderWriterMutex* mutator_lock_;
526
527  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
528  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
529
530  // Guards shutdown of the runtime.
531  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
532
533  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
534  // attaching and detaching.
535  static Mutex* thread_list_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
536
537  // Guards breakpoints.
538  static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
539
540  // Guards deoptimization requests.
541  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(breakpoint_lock_);
542
543  // Guards trace requests.
544  static Mutex* trace_lock_ ACQUIRED_AFTER(deoptimization_lock_);
545
546  // Guards profile objects.
547  static Mutex* profiler_lock_ ACQUIRED_AFTER(trace_lock_);
548
549  // Guards lists of classes within the class linker.
550  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(profiler_lock_);
551
552  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
553  // doesn't try to hold a higher level Mutex.
554  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
555
556  // Guards intern table.
557  static Mutex* intern_table_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
558
559  // Have an exclusive aborting thread.
560  static Mutex* abort_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
561
562  // Allow mutual exclusion when manipulating Thread::suspend_count_.
563  // TODO: Does the trade-off of a per-thread lock make sense?
564  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
565
566  // One unexpected signal at a time lock.
567  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
568
569  // Have an exclusive logging thread.
570  static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
571};
572
573}  // namespace art
574
575#endif  // ART_RUNTIME_BASE_MUTEX_H_
576