mutex.h revision 2cd334ae2d4287216523882f0d298cf3901b7ab1
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_BASE_MUTEX_H_ 18#define ART_RUNTIME_BASE_MUTEX_H_ 19 20#include <pthread.h> 21#include <stdint.h> 22 23#include <iosfwd> 24#include <string> 25 26#include "atomic.h" 27#include "base/logging.h" 28#include "base/macros.h" 29#include "globals.h" 30 31#if defined(__APPLE__) 32#define ART_USE_FUTEXES 0 33#else 34#define ART_USE_FUTEXES 1 35#endif 36 37// Currently Darwin doesn't support locks with timeouts. 38#if !defined(__APPLE__) 39#define HAVE_TIMED_RWLOCK 1 40#else 41#define HAVE_TIMED_RWLOCK 0 42#endif 43 44namespace art { 45 46class LOCKABLE ReaderWriterMutex; 47class ScopedContentionRecorder; 48class Thread; 49 50// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or 51// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free 52// partial ordering and thereby cause deadlock situations to fail checks. 53// 54// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163 55enum LockLevel { 56 kLoggingLock = 0, 57 kMemMapsLock, 58 kSwapMutexesLock, 59 kUnexpectedSignalLock, 60 kThreadSuspendCountLock, 61 kAbortLock, 62 kJdwpSocketLock, 63 kRegionSpaceRegionLock, 64 kReferenceQueueSoftReferencesLock, 65 kReferenceQueuePhantomReferencesLock, 66 kReferenceQueueFinalizerReferencesLock, 67 kReferenceQueueWeakReferencesLock, 68 kReferenceQueueClearedReferencesLock, 69 kReferenceProcessorLock, 70 kRosAllocGlobalLock, 71 kRosAllocBracketLock, 72 kRosAllocBulkFreeLock, 73 kAllocSpaceLock, 74 kBumpPointerSpaceBlockLock, 75 kDexFileMethodInlinerLock, 76 kDexFileToMethodInlinerMapLock, 77 kMarkSweepMarkStackLock, 78 kTransactionLogLock, 79 kInternTableLock, 80 kOatFileSecondaryLookupLock, 81 kDefaultMutexLevel, 82 kMarkSweepLargeObjectLock, 83 kPinTableLock, 84 kJdwpObjectRegistryLock, 85 kModifyLdtLock, 86 kAllocatedThreadIdsLock, 87 kMonitorPoolLock, 88 kClassLinkerClassesLock, 89 kBreakpointLock, 90 kMonitorLock, 91 kMonitorListLock, 92 kJniLoadLibraryLock, 93 kThreadListLock, 94 kBreakpointInvokeLock, 95 kAllocTrackerLock, 96 kDeoptimizationLock, 97 kProfilerLock, 98 kJdwpEventListLock, 99 kJdwpAttachLock, 100 kJdwpStartLock, 101 kRuntimeShutdownLock, 102 kTraceLock, 103 kHeapBitmapLock, 104 kMutatorLock, 105 kInstrumentEntrypointsLock, 106 kZygoteCreationLock, 107 108 kLockLevelCount // Must come last. 109}; 110std::ostream& operator<<(std::ostream& os, const LockLevel& rhs); 111 112const bool kDebugLocking = kIsDebugBuild; 113 114// Record Log contention information, dumpable via SIGQUIT. 115#ifdef ART_USE_FUTEXES 116// To enable lock contention logging, set this to true. 117const bool kLogLockContentions = false; 118#else 119// Keep this false as lock contention logging is supported only with 120// futex. 121const bool kLogLockContentions = false; 122#endif 123const size_t kContentionLogSize = 4; 124const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0; 125const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0; 126 127// Base class for all Mutex implementations 128class BaseMutex { 129 public: 130 const char* GetName() const { 131 return name_; 132 } 133 134 virtual bool IsMutex() const { return false; } 135 virtual bool IsReaderWriterMutex() const { return false; } 136 137 virtual void Dump(std::ostream& os) const = 0; 138 139 static void DumpAll(std::ostream& os); 140 141 protected: 142 friend class ConditionVariable; 143 144 BaseMutex(const char* name, LockLevel level); 145 virtual ~BaseMutex(); 146 void RegisterAsLocked(Thread* self); 147 void RegisterAsUnlocked(Thread* self); 148 void CheckSafeToWait(Thread* self); 149 150 friend class ScopedContentionRecorder; 151 152 void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked); 153 void DumpContention(std::ostream& os) const; 154 155 const LockLevel level_; // Support for lock hierarchy. 156 const char* const name_; 157 158 // A log entry that records contention but makes no guarantee that either tid will be held live. 159 struct ContentionLogEntry { 160 ContentionLogEntry() : blocked_tid(0), owner_tid(0) {} 161 uint64_t blocked_tid; 162 uint64_t owner_tid; 163 AtomicInteger count; 164 }; 165 struct ContentionLogData { 166 ContentionLogEntry contention_log[kContentionLogSize]; 167 // The next entry in the contention log to be updated. Value ranges from 0 to 168 // kContentionLogSize - 1. 169 AtomicInteger cur_content_log_entry; 170 // Number of times the Mutex has been contended. 171 AtomicInteger contention_count; 172 // Sum of time waited by all contenders in ns. 173 Atomic<uint64_t> wait_time; 174 void AddToWaitTime(uint64_t value); 175 ContentionLogData() : wait_time(0) {} 176 }; 177 ContentionLogData contention_log_data_[kContentionLogDataSize]; 178 179 public: 180 bool HasEverContended() const { 181 if (kLogLockContentions) { 182 return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0; 183 } 184 return false; 185 } 186}; 187 188// A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain 189// exclusive access to what it guards. A Mutex can be in one of two states: 190// - Free - not owned by any thread, 191// - Exclusive - owned by a single thread. 192// 193// The effect of locking and unlocking operations on the state is: 194// State | ExclusiveLock | ExclusiveUnlock 195// ------------------------------------------- 196// Free | Exclusive | error 197// Exclusive | Block* | Free 198// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in 199// an error. Being non-reentrant simplifies Waiting on ConditionVariables. 200std::ostream& operator<<(std::ostream& os, const Mutex& mu); 201class LOCKABLE Mutex : public BaseMutex { 202 public: 203 explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false); 204 ~Mutex(); 205 206 virtual bool IsMutex() const { return true; } 207 208 // Block until mutex is free then acquire exclusive access. 209 void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION(); 210 void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(self); } 211 212 // Returns true if acquires exclusive access, false otherwise. 213 bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true); 214 bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); } 215 216 // Release exclusive access. 217 void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION(); 218 void Unlock(Thread* self) UNLOCK_FUNCTION() { ExclusiveUnlock(self); } 219 220 // Is the current thread the exclusive holder of the Mutex. 221 bool IsExclusiveHeld(const Thread* self) const; 222 223 // Assert that the Mutex is exclusively held by the current thread. 224 void AssertExclusiveHeld(const Thread* self) { 225 if (kDebugLocking && (gAborting == 0)) { 226 CHECK(IsExclusiveHeld(self)) << *this; 227 } 228 } 229 void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); } 230 231 // Assert that the Mutex is not held by the current thread. 232 void AssertNotHeldExclusive(const Thread* self) { 233 if (kDebugLocking && (gAborting == 0)) { 234 CHECK(!IsExclusiveHeld(self)) << *this; 235 } 236 } 237 void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); } 238 239 // Id associated with exclusive owner. No memory ordering semantics if called from a thread other 240 // than the owner. 241 uint64_t GetExclusiveOwnerTid() const; 242 243 // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld. 244 unsigned int GetDepth() const { 245 return recursion_count_; 246 } 247 248 virtual void Dump(std::ostream& os) const; 249 250 private: 251#if ART_USE_FUTEXES 252 // 0 is unheld, 1 is held. 253 AtomicInteger state_; 254 // Exclusive owner. 255 volatile uint64_t exclusive_owner_; 256 // Number of waiting contenders. 257 AtomicInteger num_contenders_; 258#else 259 pthread_mutex_t mutex_; 260 volatile uint64_t exclusive_owner_; // Guarded by mutex_. 261#endif 262 const bool recursive_; // Can the lock be recursively held? 263 unsigned int recursion_count_; 264 friend class ConditionVariable; 265 DISALLOW_COPY_AND_ASSIGN(Mutex); 266}; 267 268// A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex. 269// Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader) 270// access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a 271// condition variable. A ReaderWriterMutex can be in one of three states: 272// - Free - not owned by any thread, 273// - Exclusive - owned by a single thread, 274// - Shared(n) - shared amongst n threads. 275// 276// The effect of locking and unlocking operations on the state is: 277// 278// State | ExclusiveLock | ExclusiveUnlock | SharedLock | SharedUnlock 279// ---------------------------------------------------------------------------- 280// Free | Exclusive | error | SharedLock(1) | error 281// Exclusive | Block | Free | Block | error 282// Shared(n) | Block | error | SharedLock(n+1)* | Shared(n-1) or Free 283// * for large values of n the SharedLock may block. 284std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu); 285class LOCKABLE ReaderWriterMutex : public BaseMutex { 286 public: 287 explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel); 288 ~ReaderWriterMutex(); 289 290 virtual bool IsReaderWriterMutex() const { return true; } 291 292 // Block until ReaderWriterMutex is free then acquire exclusive access. 293 void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION(); 294 void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(self); } 295 296 // Release exclusive access. 297 void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION(); 298 void WriterUnlock(Thread* self) UNLOCK_FUNCTION() { ExclusiveUnlock(self); } 299 300 // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success 301 // or false if timeout is reached. 302#if HAVE_TIMED_RWLOCK 303 bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) 304 EXCLUSIVE_TRYLOCK_FUNCTION(true); 305#endif 306 307 // Block until ReaderWriterMutex is shared or free then acquire a share on the access. 308 void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE; 309 void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); } 310 311 // Try to acquire share of ReaderWriterMutex. 312 bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true); 313 314 // Release a share of the access. 315 void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE; 316 void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); } 317 318 // Is the current thread the exclusive holder of the ReaderWriterMutex. 319 bool IsExclusiveHeld(const Thread* self) const; 320 321 // Assert the current thread has exclusive access to the ReaderWriterMutex. 322 void AssertExclusiveHeld(const Thread* self) { 323 if (kDebugLocking && (gAborting == 0)) { 324 CHECK(IsExclusiveHeld(self)) << *this; 325 } 326 } 327 void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); } 328 329 // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex. 330 void AssertNotExclusiveHeld(const Thread* self) { 331 if (kDebugLocking && (gAborting == 0)) { 332 CHECK(!IsExclusiveHeld(self)) << *this; 333 } 334 } 335 void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); } 336 337 // Is the current thread a shared holder of the ReaderWriterMutex. 338 bool IsSharedHeld(const Thread* self) const; 339 340 // Assert the current thread has shared access to the ReaderWriterMutex. 341 void AssertSharedHeld(const Thread* self) { 342 if (kDebugLocking && (gAborting == 0)) { 343 // TODO: we can only assert this well when self != NULL. 344 CHECK(IsSharedHeld(self) || self == NULL) << *this; 345 } 346 } 347 void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); } 348 349 // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive 350 // mode. 351 void AssertNotHeld(const Thread* self) { 352 if (kDebugLocking && (gAborting == 0)) { 353 CHECK(!IsSharedHeld(self)) << *this; 354 } 355 } 356 357 // Id associated with exclusive owner. No memory ordering semantics if called from a thread other 358 // than the owner. 359 uint64_t GetExclusiveOwnerTid() const; 360 361 virtual void Dump(std::ostream& os) const; 362 363 private: 364#if ART_USE_FUTEXES 365 // Out-of-inline path for handling contention for a SharedLock. 366 void HandleSharedLockContention(Thread* self, int32_t cur_state); 367 368 // -1 implies held exclusive, +ve shared held by state_ many owners. 369 AtomicInteger state_; 370 // Exclusive owner. Modification guarded by this mutex. 371 volatile uint64_t exclusive_owner_; 372 // Number of contenders waiting for a reader share. 373 AtomicInteger num_pending_readers_; 374 // Number of contenders waiting to be the writer. 375 AtomicInteger num_pending_writers_; 376#else 377 pthread_rwlock_t rwlock_; 378 volatile uint64_t exclusive_owner_; // Guarded by rwlock_. 379#endif 380 DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex); 381}; 382 383// ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually 384// (Signal) or all at once (Broadcast). 385class ConditionVariable { 386 public: 387 explicit ConditionVariable(const char* name, Mutex& mutex); 388 ~ConditionVariable(); 389 390 void Broadcast(Thread* self); 391 void Signal(Thread* self); 392 // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their 393 // pointer copy, thereby defeating annotalysis. 394 void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS; 395 bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS; 396 // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held 397 // when waiting. 398 // TODO: remove this. 399 void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS; 400 401 private: 402 const char* const name_; 403 // The Mutex being used by waiters. It is an error to mix condition variables between different 404 // Mutexes. 405 Mutex& guard_; 406#if ART_USE_FUTEXES 407 // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up 408 // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_ 409 // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait 410 // without guard_ held. 411 AtomicInteger sequence_; 412 // Number of threads that have come into to wait, not the length of the waiters on the futex as 413 // waiters may have been requeued onto guard_. Guarded by guard_. 414 volatile int32_t num_waiters_; 415#else 416 pthread_cond_t cond_; 417#endif 418 DISALLOW_COPY_AND_ASSIGN(ConditionVariable); 419}; 420 421// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it 422// upon destruction. 423class SCOPED_LOCKABLE MutexLock { 424 public: 425 explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) { 426 mu_.ExclusiveLock(self_); 427 } 428 429 ~MutexLock() UNLOCK_FUNCTION() { 430 mu_.ExclusiveUnlock(self_); 431 } 432 433 private: 434 Thread* const self_; 435 Mutex& mu_; 436 DISALLOW_COPY_AND_ASSIGN(MutexLock); 437}; 438// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)". 439#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name") 440 441// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon 442// construction and releases it upon destruction. 443class SCOPED_LOCKABLE ReaderMutexLock { 444 public: 445 explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : 446 self_(self), mu_(mu) { 447 mu_.SharedLock(self_); 448 } 449 450 ~ReaderMutexLock() UNLOCK_FUNCTION() { 451 mu_.SharedUnlock(self_); 452 } 453 454 private: 455 Thread* const self_; 456 ReaderWriterMutex& mu_; 457 DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock); 458}; 459// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of 460// "ReaderMutexLock mu(lock)". 461#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name") 462 463// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon 464// construction and releases it upon destruction. 465class SCOPED_LOCKABLE WriterMutexLock { 466 public: 467 explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : 468 self_(self), mu_(mu) { 469 mu_.ExclusiveLock(self_); 470 } 471 472 ~WriterMutexLock() UNLOCK_FUNCTION() { 473 mu_.ExclusiveUnlock(self_); 474 } 475 476 private: 477 Thread* const self_; 478 ReaderWriterMutex& mu_; 479 DISALLOW_COPY_AND_ASSIGN(WriterMutexLock); 480}; 481// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of 482// "WriterMutexLock mu(lock)". 483#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name") 484 485// Global mutexes corresponding to the levels above. 486class Locks { 487 public: 488 static void Init(); 489 490 // Guards allocation entrypoint instrumenting. 491 static Mutex* instrument_entrypoints_lock_; 492 493 // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block 494 // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds 495 // a share on the mutator_lock_. The garbage collector may also execute with shared access but 496 // at times requires exclusive access to the heap (not to be confused with the heap meta-data 497 // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks 498 // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_ 499 // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition 500 // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on 501 // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector) 502 // chance to acquire the lock. 503 // 504 // Thread suspension: 505 // Shared users | Exclusive user 506 // (holding mutator lock and in kRunnable state) | .. running .. 507 // .. running .. | Request thread suspension by: 508 // .. running .. | - acquiring thread_suspend_count_lock_ 509 // .. running .. | - incrementing Thread::suspend_count_ on 510 // .. running .. | all mutator threads 511 // .. running .. | - releasing thread_suspend_count_lock_ 512 // .. running .. | Block trying to acquire exclusive mutator lock 513 // Poll Thread::suspend_count_ and enter full | .. blocked .. 514 // suspend code. | .. blocked .. 515 // Change state to kSuspended | .. blocked .. 516 // x: Release share on mutator_lock_ | Carry out exclusive access 517 // Acquire thread_suspend_count_lock_ | .. exclusive .. 518 // while Thread::suspend_count_ > 0 | .. exclusive .. 519 // - wait on Thread::resume_cond_ | .. exclusive .. 520 // (releases thread_suspend_count_lock_) | .. exclusive .. 521 // .. waiting .. | Release mutator_lock_ 522 // .. waiting .. | Request thread resumption by: 523 // .. waiting .. | - acquiring thread_suspend_count_lock_ 524 // .. waiting .. | - decrementing Thread::suspend_count_ on 525 // .. waiting .. | all mutator threads 526 // .. waiting .. | - notifying on Thread::resume_cond_ 527 // - re-acquire thread_suspend_count_lock_ | - releasing thread_suspend_count_lock_ 528 // Release thread_suspend_count_lock_ | .. running .. 529 // Acquire share on mutator_lock_ | .. running .. 530 // - This could block but the thread still | .. running .. 531 // has a state of kSuspended and so this | .. running .. 532 // isn't an issue. | .. running .. 533 // Acquire thread_suspend_count_lock_ | .. running .. 534 // - we poll here as we're transitioning into | .. running .. 535 // kRunnable and an individual thread suspend | .. running .. 536 // request (e.g for debugging) won't try | .. running .. 537 // to acquire the mutator lock (which would | .. running .. 538 // block as we hold the mutator lock). This | .. running .. 539 // poll ensures that if the suspender thought | .. running .. 540 // we were suspended by incrementing our | .. running .. 541 // Thread::suspend_count_ and then reading | .. running .. 542 // our state we go back to waiting on | .. running .. 543 // Thread::resume_cond_. | .. running .. 544 // can_go_runnable = Thread::suspend_count_ == 0 | .. running .. 545 // Release thread_suspend_count_lock_ | .. running .. 546 // if can_go_runnable | .. running .. 547 // Change state to kRunnable | .. running .. 548 // else | .. running .. 549 // Goto x | .. running .. 550 // .. running .. | .. running .. 551 static ReaderWriterMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_); 552 553 // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap. 554 static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_); 555 556 // Guards shutdown of the runtime. 557 static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_); 558 559 // Guards background profiler global state. 560 static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_); 561 562 // Guards trace (ie traceview) requests. 563 static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_); 564 565 // Guards debugger recent allocation records. 566 static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_); 567 568 // Guards updates to instrumentation to ensure mutual exclusion of 569 // events like deoptimization requests. 570 // TODO: improve name, perhaps instrumentation_update_lock_. 571 static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_); 572 573 // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads 574 // attaching and detaching. 575 static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_); 576 577 // Guards maintaining loading library data structures. 578 static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_); 579 580 // Guards breakpoints. 581 static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_); 582 583 // Guards lists of classes within the class linker. 584 static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_); 585 586 // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code 587 // doesn't try to hold a higher level Mutex. 588 #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_) 589 590 static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_); 591 592 // Guard the allocation/deallocation of thread ids. 593 static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_); 594 595 // Guards modification of the LDT on x86. 596 static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_); 597 598 // Guards intern table. 599 static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_); 600 601 // Guards reference processor. 602 static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_); 603 604 // Guards cleared references queue. 605 static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_); 606 607 // Guards weak references queue. 608 static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_); 609 610 // Guards finalizer references queue. 611 static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_); 612 613 // Guards phantom references queue. 614 static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_); 615 616 // Guards soft references queue. 617 static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_); 618 619 // Have an exclusive aborting thread. 620 static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_); 621 622 // Allow mutual exclusion when manipulating Thread::suspend_count_. 623 // TODO: Does the trade-off of a per-thread lock make sense? 624 static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_); 625 626 // One unexpected signal at a time lock. 627 static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_); 628 629 // Guards the maps in mem_map. 630 static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_); 631 632 // Have an exclusive logging thread. 633 static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_); 634}; 635 636} // namespace art 637 638#endif // ART_RUNTIME_BASE_MUTEX_H_ 639