mutex.cc revision 719d1a33f6569864f529e5a3fff59e7bca97aad0
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mutex.h" 18 19#include <errno.h> 20#include <sys/time.h> 21 22#include "atomic.h" 23#include "base/logging.h" 24#include "mutex-inl.h" 25#include "runtime.h" 26#include "scoped_thread_state_change.h" 27#include "thread-inl.h" 28#include "utils.h" 29 30namespace art { 31 32Mutex* Locks::abort_lock_ = nullptr; 33Mutex* Locks::breakpoint_lock_ = nullptr; 34Mutex* Locks::deoptimization_lock_ = nullptr; 35ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr; 36ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr; 37Mutex* Locks::logging_lock_ = nullptr; 38ReaderWriterMutex* Locks::mutator_lock_ = nullptr; 39Mutex* Locks::runtime_shutdown_lock_ = nullptr; 40Mutex* Locks::thread_list_lock_ = nullptr; 41Mutex* Locks::thread_suspend_count_lock_ = nullptr; 42Mutex* Locks::trace_lock_ = nullptr; 43Mutex* Locks::profiler_lock_ = nullptr; 44Mutex* Locks::unexpected_signal_lock_ = nullptr; 45Mutex* Locks::intern_table_lock_ = nullptr; 46 47struct AllMutexData { 48 // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait). 49 Atomic<const BaseMutex*> all_mutexes_guard; 50 // All created mutexes guarded by all_mutexes_guard_. 51 std::set<BaseMutex*>* all_mutexes; 52 AllMutexData() : all_mutexes(NULL) {} 53}; 54static struct AllMutexData gAllMutexData[kAllMutexDataSize]; 55 56#if ART_USE_FUTEXES 57static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) { 58 const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds. 59 result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec; 60 result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec; 61 if (result_ts->tv_nsec < 0) { 62 result_ts->tv_sec--; 63 result_ts->tv_nsec += one_sec; 64 } else if (result_ts->tv_nsec > one_sec) { 65 result_ts->tv_sec++; 66 result_ts->tv_nsec -= one_sec; 67 } 68 return result_ts->tv_sec < 0; 69} 70#endif 71 72class ScopedAllMutexesLock { 73 public: 74 explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) { 75 while (!gAllMutexData->all_mutexes_guard.CompareAndSwap(0, mutex)) { 76 NanoSleep(100); 77 } 78 } 79 ~ScopedAllMutexesLock() { 80 while (!gAllMutexData->all_mutexes_guard.CompareAndSwap(mutex_, 0)) { 81 NanoSleep(100); 82 } 83 } 84 private: 85 const BaseMutex* const mutex_; 86}; 87 88BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) { 89 if (kLogLockContentions) { 90 ScopedAllMutexesLock mu(this); 91 std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes; 92 if (*all_mutexes_ptr == NULL) { 93 // We leak the global set of all mutexes to avoid ordering issues in global variable 94 // construction/destruction. 95 *all_mutexes_ptr = new std::set<BaseMutex*>(); 96 } 97 (*all_mutexes_ptr)->insert(this); 98 } 99} 100 101BaseMutex::~BaseMutex() { 102 if (kLogLockContentions) { 103 ScopedAllMutexesLock mu(this); 104 gAllMutexData->all_mutexes->erase(this); 105 } 106} 107 108void BaseMutex::DumpAll(std::ostream& os) { 109 if (kLogLockContentions) { 110 os << "Mutex logging:\n"; 111 ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1)); 112 std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes; 113 if (all_mutexes == NULL) { 114 // No mutexes have been created yet during at startup. 115 return; 116 } 117 typedef std::set<BaseMutex*>::const_iterator It; 118 os << "(Contended)\n"; 119 for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) { 120 BaseMutex* mutex = *it; 121 if (mutex->HasEverContended()) { 122 mutex->Dump(os); 123 os << "\n"; 124 } 125 } 126 os << "(Never contented)\n"; 127 for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) { 128 BaseMutex* mutex = *it; 129 if (!mutex->HasEverContended()) { 130 mutex->Dump(os); 131 os << "\n"; 132 } 133 } 134 } 135} 136 137void BaseMutex::CheckSafeToWait(Thread* self) { 138 if (self == NULL) { 139 CheckUnattachedThread(level_); 140 return; 141 } 142 if (kDebugLocking) { 143 CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock) 144 << "Waiting on unacquired mutex: " << name_; 145 bool bad_mutexes_held = false; 146 for (int i = kLockLevelCount - 1; i >= 0; --i) { 147 if (i != level_) { 148 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i)); 149 if (held_mutex != NULL) { 150 LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" " 151 << "(level " << LockLevel(i) << ") while performing wait on " 152 << "\"" << name_ << "\" (level " << level_ << ")"; 153 bad_mutexes_held = true; 154 } 155 } 156 } 157 CHECK(!bad_mutexes_held); 158 } 159} 160 161inline void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) { 162 if (kLogLockContentions) { 163 // Atomically add value to wait_time. 164 uint64_t new_val, old_val; 165 volatile int64_t* addr = reinterpret_cast<volatile int64_t*>(&wait_time); 166 volatile const int64_t* caddr = const_cast<volatile const int64_t*>(addr); 167 do { 168 old_val = static_cast<uint64_t>(QuasiAtomic::Read64(caddr)); 169 new_val = old_val + value; 170 } while (!QuasiAtomic::Cas64(static_cast<int64_t>(old_val), static_cast<int64_t>(new_val), addr)); 171 } 172} 173 174void BaseMutex::RecordContention(uint64_t blocked_tid, 175 uint64_t owner_tid, 176 uint64_t nano_time_blocked) { 177 if (kLogLockContentions) { 178 ContentionLogData* data = contetion_log_data_; 179 ++(data->contention_count); 180 data->AddToWaitTime(nano_time_blocked); 181 ContentionLogEntry* log = data->contention_log; 182 // This code is intentionally racy as it is only used for diagnostics. 183 uint32_t slot = data->cur_content_log_entry; 184 if (log[slot].blocked_tid == blocked_tid && 185 log[slot].owner_tid == blocked_tid) { 186 ++log[slot].count; 187 } else { 188 uint32_t new_slot; 189 do { 190 slot = data->cur_content_log_entry; 191 new_slot = (slot + 1) % kContentionLogSize; 192 } while (!data->cur_content_log_entry.CompareAndSwap(slot, new_slot)); 193 log[new_slot].blocked_tid = blocked_tid; 194 log[new_slot].owner_tid = owner_tid; 195 log[new_slot].count = 1; 196 } 197 } 198} 199 200void BaseMutex::DumpContention(std::ostream& os) const { 201 if (kLogLockContentions) { 202 const ContentionLogData* data = contetion_log_data_; 203 const ContentionLogEntry* log = data->contention_log; 204 uint64_t wait_time = data->wait_time; 205 uint32_t contention_count = data->contention_count; 206 if (contention_count == 0) { 207 os << "never contended"; 208 } else { 209 os << "contended " << contention_count 210 << " times, average wait of contender " << PrettyDuration(wait_time / contention_count); 211 SafeMap<uint64_t, size_t> most_common_blocker; 212 SafeMap<uint64_t, size_t> most_common_blocked; 213 typedef SafeMap<uint64_t, size_t>::const_iterator It; 214 for (size_t i = 0; i < kContentionLogSize; ++i) { 215 uint64_t blocked_tid = log[i].blocked_tid; 216 uint64_t owner_tid = log[i].owner_tid; 217 uint32_t count = log[i].count; 218 if (count > 0) { 219 It it = most_common_blocked.find(blocked_tid); 220 if (it != most_common_blocked.end()) { 221 most_common_blocked.Overwrite(blocked_tid, it->second + count); 222 } else { 223 most_common_blocked.Put(blocked_tid, count); 224 } 225 it = most_common_blocker.find(owner_tid); 226 if (it != most_common_blocker.end()) { 227 most_common_blocker.Overwrite(owner_tid, it->second + count); 228 } else { 229 most_common_blocker.Put(owner_tid, count); 230 } 231 } 232 } 233 uint64_t max_tid = 0; 234 size_t max_tid_count = 0; 235 for (It it = most_common_blocked.begin(); it != most_common_blocked.end(); ++it) { 236 if (it->second > max_tid_count) { 237 max_tid = it->first; 238 max_tid_count = it->second; 239 } 240 } 241 if (max_tid != 0) { 242 os << " sample shows most blocked tid=" << max_tid; 243 } 244 max_tid = 0; 245 max_tid_count = 0; 246 for (It it = most_common_blocker.begin(); it != most_common_blocker.end(); ++it) { 247 if (it->second > max_tid_count) { 248 max_tid = it->first; 249 max_tid_count = it->second; 250 } 251 } 252 if (max_tid != 0) { 253 os << " sample shows tid=" << max_tid << " owning during this time"; 254 } 255 } 256 } 257} 258 259 260Mutex::Mutex(const char* name, LockLevel level, bool recursive) 261 : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) { 262#if ART_USE_FUTEXES 263 state_ = 0; 264 exclusive_owner_ = 0; 265 num_contenders_ = 0; 266#elif defined(__BIONIC__) || defined(__APPLE__) 267 // Use recursive mutexes for bionic and Apple otherwise the 268 // non-recursive mutexes don't have TIDs to check lock ownership of. 269 pthread_mutexattr_t attributes; 270 CHECK_MUTEX_CALL(pthread_mutexattr_init, (&attributes)); 271 CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&attributes, PTHREAD_MUTEX_RECURSIVE)); 272 CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, &attributes)); 273 CHECK_MUTEX_CALL(pthread_mutexattr_destroy, (&attributes)); 274#else 275 CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, NULL)); 276#endif 277} 278 279Mutex::~Mutex() { 280#if ART_USE_FUTEXES 281 if (state_ != 0) { 282 Runtime* runtime = Runtime::Current(); 283 bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current()); 284 LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_; 285 } else { 286 CHECK_EQ(exclusive_owner_, 0U) << "unexpectedly found an owner on unlocked mutex " << name_; 287 CHECK_EQ(num_contenders_, 0) << "unexpectedly found a contender on mutex " << name_; 288 } 289#else 290 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread 291 // may still be using locks. 292 int rc = pthread_mutex_destroy(&mutex_); 293 if (rc != 0) { 294 errno = rc; 295 // TODO: should we just not log at all if shutting down? this could be the logging mutex! 296 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_); 297 Runtime* runtime = Runtime::Current(); 298 bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked(); 299 PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_; 300 } 301#endif 302} 303 304void Mutex::ExclusiveLock(Thread* self) { 305 DCHECK(self == NULL || self == Thread::Current()); 306 if (kDebugLocking && !recursive_) { 307 AssertNotHeld(self); 308 } 309 if (!recursive_ || !IsExclusiveHeld(self)) { 310#if ART_USE_FUTEXES 311 bool done = false; 312 do { 313 int32_t cur_state = state_; 314 if (LIKELY(cur_state == 0)) { 315 // Change state from 0 to 1. 316 done = __sync_bool_compare_and_swap(&state_, 0 /* cur_state */, 1 /* new state */); 317 } else { 318 // Failed to acquire, hang up. 319 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); 320 num_contenders_++; 321 if (futex(&state_, FUTEX_WAIT, 1, NULL, NULL, 0) != 0) { 322 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. 323 // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock. 324 if ((errno != EAGAIN) && (errno != EINTR)) { 325 PLOG(FATAL) << "futex wait failed for " << name_; 326 } 327 } 328 num_contenders_--; 329 } 330 } while (!done); 331 QuasiAtomic::MembarStoreLoad(); 332 DCHECK_EQ(state_, 1); 333 exclusive_owner_ = SafeGetTid(self); 334#else 335 CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_)); 336#endif 337 RegisterAsLocked(self); 338 } 339 recursion_count_++; 340 if (kDebugLocking) { 341 CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: " 342 << name_ << " " << recursion_count_; 343 AssertHeld(self); 344 } 345} 346 347bool Mutex::ExclusiveTryLock(Thread* self) { 348 DCHECK(self == NULL || self == Thread::Current()); 349 if (kDebugLocking && !recursive_) { 350 AssertNotHeld(self); 351 } 352 if (!recursive_ || !IsExclusiveHeld(self)) { 353#if ART_USE_FUTEXES 354 bool done = false; 355 do { 356 int32_t cur_state = state_; 357 if (cur_state == 0) { 358 // Change state from 0 to 1. 359 done = __sync_bool_compare_and_swap(&state_, 0 /* cur_state */, 1 /* new state */); 360 } else { 361 return false; 362 } 363 } while (!done); 364 QuasiAtomic::MembarStoreLoad(); 365 DCHECK_EQ(state_, 1); 366 exclusive_owner_ = SafeGetTid(self); 367#else 368 int result = pthread_mutex_trylock(&mutex_); 369 if (result == EBUSY) { 370 return false; 371 } 372 if (result != 0) { 373 errno = result; 374 PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; 375 } 376#endif 377 RegisterAsLocked(self); 378 } 379 recursion_count_++; 380 if (kDebugLocking) { 381 CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: " 382 << name_ << " " << recursion_count_; 383 AssertHeld(self); 384 } 385 return true; 386} 387 388void Mutex::ExclusiveUnlock(Thread* self) { 389 DCHECK(self == NULL || self == Thread::Current()); 390 AssertHeld(self); 391 recursion_count_--; 392 if (!recursive_ || recursion_count_ == 0) { 393 if (kDebugLocking) { 394 CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: " 395 << name_ << " " << recursion_count_; 396 } 397 RegisterAsUnlocked(self); 398#if ART_USE_FUTEXES 399 bool done = false; 400 do { 401 int32_t cur_state = state_; 402 if (LIKELY(cur_state == 1)) { 403 QuasiAtomic::MembarStoreStore(); 404 // We're no longer the owner. 405 exclusive_owner_ = 0; 406 // Change state to 0. 407 done = __sync_bool_compare_and_swap(&state_, cur_state, 0 /* new state */); 408 if (LIKELY(done)) { // Spurious fail? 409 // Wake a contender 410 if (UNLIKELY(num_contenders_ > 0)) { 411 futex(&state_, FUTEX_WAKE, 1, NULL, NULL, 0); 412 } 413 } 414 } else { 415 // Logging acquires the logging lock, avoid infinite recursion in that case. 416 if (this != Locks::logging_lock_) { 417 LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_; 418 } else { 419 LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1); 420 LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s", 421 cur_state, name_).c_str()); 422 _exit(1); 423 } 424 } 425 } while (!done); 426 QuasiAtomic::MembarStoreLoad(); 427#else 428 CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_)); 429#endif 430 } 431} 432 433void Mutex::Dump(std::ostream& os) const { 434 os << (recursive_ ? "recursive " : "non-recursive ") 435 << name_ 436 << " level=" << static_cast<int>(level_) 437 << " rec=" << recursion_count_ 438 << " owner=" << GetExclusiveOwnerTid() << " "; 439 DumpContention(os); 440} 441 442std::ostream& operator<<(std::ostream& os, const Mutex& mu) { 443 mu.Dump(os); 444 return os; 445} 446 447ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level) 448 : BaseMutex(name, level) 449#if ART_USE_FUTEXES 450 , state_(0), exclusive_owner_(0), num_pending_readers_(0), num_pending_writers_(0) 451#endif 452{ // NOLINT(whitespace/braces) 453#if !ART_USE_FUTEXES 454 CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, NULL)); 455#endif 456} 457 458ReaderWriterMutex::~ReaderWriterMutex() { 459#if ART_USE_FUTEXES 460 CHECK_EQ(state_, 0); 461 CHECK_EQ(exclusive_owner_, 0U); 462 CHECK_EQ(num_pending_readers_, 0); 463 CHECK_EQ(num_pending_writers_, 0); 464#else 465 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread 466 // may still be using locks. 467 int rc = pthread_rwlock_destroy(&rwlock_); 468 if (rc != 0) { 469 errno = rc; 470 // TODO: should we just not log at all if shutting down? this could be the logging mutex! 471 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_); 472 Runtime* runtime = Runtime::Current(); 473 bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked(); 474 PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_; 475 } 476#endif 477} 478 479void ReaderWriterMutex::ExclusiveLock(Thread* self) { 480 DCHECK(self == NULL || self == Thread::Current()); 481 AssertNotExclusiveHeld(self); 482#if ART_USE_FUTEXES 483 bool done = false; 484 do { 485 int32_t cur_state = state_; 486 if (LIKELY(cur_state == 0)) { 487 // Change state from 0 to -1. 488 done = __sync_bool_compare_and_swap(&state_, 0 /* cur_state*/, -1 /* new state */); 489 } else { 490 // Failed to acquire, hang up. 491 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); 492 num_pending_writers_++; 493 if (futex(&state_, FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) { 494 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. 495 // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock. 496 if ((errno != EAGAIN) && (errno != EINTR)) { 497 PLOG(FATAL) << "futex wait failed for " << name_; 498 } 499 } 500 num_pending_writers_--; 501 } 502 } while (!done); 503 DCHECK_EQ(state_, -1); 504 exclusive_owner_ = SafeGetTid(self); 505#else 506 CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_)); 507#endif 508 RegisterAsLocked(self); 509 AssertExclusiveHeld(self); 510} 511 512void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { 513 DCHECK(self == NULL || self == Thread::Current()); 514 AssertExclusiveHeld(self); 515 RegisterAsUnlocked(self); 516#if ART_USE_FUTEXES 517 bool done = false; 518 do { 519 int32_t cur_state = state_; 520 if (LIKELY(cur_state == -1)) { 521 // We're no longer the owner. 522 exclusive_owner_ = 0; 523 // Change state from -1 to 0. 524 done = __sync_bool_compare_and_swap(&state_, -1 /* cur_state*/, 0 /* new state */); 525 if (LIKELY(done)) { // cmpxchg may fail due to noise? 526 // Wake any waiters. 527 if (UNLIKELY(num_pending_readers_ > 0 || num_pending_writers_ > 0)) { 528 futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0); 529 } 530 } 531 } else { 532 LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_; 533 } 534 } while (!done); 535#else 536 CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); 537#endif 538} 539 540#if HAVE_TIMED_RWLOCK 541bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) { 542 DCHECK(self == NULL || self == Thread::Current()); 543#if ART_USE_FUTEXES 544 bool done = false; 545 timespec end_abs_ts; 546 InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts); 547 do { 548 int32_t cur_state = state_; 549 if (cur_state == 0) { 550 // Change state from 0 to -1. 551 done = __sync_bool_compare_and_swap(&state_, 0 /* cur_state */, -1 /* new state */); 552 } else { 553 // Failed to acquire, hang up. 554 timespec now_abs_ts; 555 InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts); 556 timespec rel_ts; 557 if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) { 558 return false; // Timed out. 559 } 560 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); 561 num_pending_writers_++; 562 if (futex(&state_, FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) { 563 if (errno == ETIMEDOUT) { 564 num_pending_writers_--; 565 return false; // Timed out. 566 } else if ((errno != EAGAIN) && (errno != EINTR)) { 567 // EAGAIN and EINTR both indicate a spurious failure, 568 // recompute the relative time out from now and try again. 569 // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts; 570 PLOG(FATAL) << "timed futex wait failed for " << name_; 571 } 572 } 573 num_pending_writers_--; 574 } 575 } while (!done); 576 exclusive_owner_ = SafeGetTid(self); 577#else 578 timespec ts; 579 InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts); 580 int result = pthread_rwlock_timedwrlock(&rwlock_, &ts); 581 if (result == ETIMEDOUT) { 582 return false; 583 } 584 if (result != 0) { 585 errno = result; 586 PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_; 587 } 588#endif 589 RegisterAsLocked(self); 590 AssertSharedHeld(self); 591 return true; 592} 593#endif 594 595bool ReaderWriterMutex::SharedTryLock(Thread* self) { 596 DCHECK(self == NULL || self == Thread::Current()); 597#if ART_USE_FUTEXES 598 bool done = false; 599 do { 600 int32_t cur_state = state_; 601 if (cur_state >= 0) { 602 // Add as an extra reader. 603 done = __sync_bool_compare_and_swap(&state_, cur_state, cur_state + 1); 604 } else { 605 // Owner holds it exclusively. 606 return false; 607 } 608 } while (!done); 609#else 610 int result = pthread_rwlock_tryrdlock(&rwlock_); 611 if (result == EBUSY) { 612 return false; 613 } 614 if (result != 0) { 615 errno = result; 616 PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; 617 } 618#endif 619 RegisterAsLocked(self); 620 AssertSharedHeld(self); 621 return true; 622} 623 624bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const { 625 DCHECK(self == NULL || self == Thread::Current()); 626 bool result; 627 if (UNLIKELY(self == NULL)) { // Handle unattached threads. 628 result = IsExclusiveHeld(self); // TODO: a better best effort here. 629 } else { 630 result = (self->GetHeldMutex(level_) == this); 631 } 632 return result; 633} 634 635void ReaderWriterMutex::Dump(std::ostream& os) const { 636 os << name_ 637 << " level=" << static_cast<int>(level_) 638 << " owner=" << GetExclusiveOwnerTid() << " "; 639 DumpContention(os); 640} 641 642std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) { 643 mu.Dump(os); 644 return os; 645} 646 647ConditionVariable::ConditionVariable(const char* name, Mutex& guard) 648 : name_(name), guard_(guard) { 649#if ART_USE_FUTEXES 650 sequence_ = 0; 651 num_waiters_ = 0; 652#else 653 CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, NULL)); 654#endif 655} 656 657ConditionVariable::~ConditionVariable() { 658#if ART_USE_FUTEXES 659 if (num_waiters_!= 0) { 660 Runtime* runtime = Runtime::Current(); 661 bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current()); 662 LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_ 663 << " called with " << num_waiters_ << " waiters."; 664 } 665#else 666 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread 667 // may still be using condition variables. 668 int rc = pthread_cond_destroy(&cond_); 669 if (rc != 0) { 670 errno = rc; 671 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_); 672 Runtime* runtime = Runtime::Current(); 673 bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked(); 674 PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_; 675 } 676#endif 677} 678 679void ConditionVariable::Broadcast(Thread* self) { 680 DCHECK(self == NULL || self == Thread::Current()); 681 // TODO: enable below, there's a race in thread creation that causes false failures currently. 682 // guard_.AssertExclusiveHeld(self); 683 DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self)); 684#if ART_USE_FUTEXES 685 if (num_waiters_ > 0) { 686 sequence_++; // Indicate the broadcast occurred. 687 bool done = false; 688 do { 689 int32_t cur_sequence = sequence_; 690 // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring 691 // mutex unlocks will awaken the requeued waiter thread. 692 done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0, 693 reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()), 694 &guard_.state_, cur_sequence) != -1; 695 if (!done) { 696 if (errno != EAGAIN) { 697 PLOG(FATAL) << "futex cmp requeue failed for " << name_; 698 } 699 } 700 } while (!done); 701 } 702#else 703 CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_)); 704#endif 705} 706 707void ConditionVariable::Signal(Thread* self) { 708 DCHECK(self == NULL || self == Thread::Current()); 709 guard_.AssertExclusiveHeld(self); 710#if ART_USE_FUTEXES 711 if (num_waiters_ > 0) { 712 sequence_++; // Indicate a signal occurred. 713 // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them 714 // to avoid this, however, requeueing can only move all waiters. 715 int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0); 716 // Check something was woken or else we changed sequence_ before they had chance to wait. 717 CHECK((num_woken == 0) || (num_woken == 1)); 718 } 719#else 720 CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_)); 721#endif 722} 723 724void ConditionVariable::Wait(Thread* self) { 725 guard_.CheckSafeToWait(self); 726 WaitHoldingLocks(self); 727} 728 729void ConditionVariable::WaitHoldingLocks(Thread* self) { 730 DCHECK(self == NULL || self == Thread::Current()); 731 guard_.AssertExclusiveHeld(self); 732 unsigned int old_recursion_count = guard_.recursion_count_; 733#if ART_USE_FUTEXES 734 num_waiters_++; 735 // Ensure the Mutex is contended so that requeued threads are awoken. 736 guard_.num_contenders_++; 737 guard_.recursion_count_ = 1; 738 int32_t cur_sequence = sequence_; 739 guard_.ExclusiveUnlock(self); 740 if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) { 741 // Futex failed, check it is an expected error. 742 // EAGAIN == EWOULDBLK, so we let the caller try again. 743 // EINTR implies a signal was sent to this thread. 744 if ((errno != EINTR) && (errno != EAGAIN)) { 745 PLOG(FATAL) << "futex wait failed for " << name_; 746 } 747 } 748 guard_.ExclusiveLock(self); 749 CHECK_GE(num_waiters_, 0); 750 num_waiters_--; 751 // We awoke and so no longer require awakes from the guard_'s unlock. 752 CHECK_GE(guard_.num_contenders_, 0); 753 guard_.num_contenders_--; 754#else 755 guard_.recursion_count_ = 0; 756 CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_)); 757#endif 758 guard_.recursion_count_ = old_recursion_count; 759} 760 761void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { 762 DCHECK(self == NULL || self == Thread::Current()); 763 guard_.AssertExclusiveHeld(self); 764 guard_.CheckSafeToWait(self); 765 unsigned int old_recursion_count = guard_.recursion_count_; 766#if ART_USE_FUTEXES 767 timespec rel_ts; 768 InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts); 769 num_waiters_++; 770 // Ensure the Mutex is contended so that requeued threads are awoken. 771 guard_.num_contenders_++; 772 guard_.recursion_count_ = 1; 773 int32_t cur_sequence = sequence_; 774 guard_.ExclusiveUnlock(self); 775 if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) { 776 if (errno == ETIMEDOUT) { 777 // Timed out we're done. 778 } else if ((errno == EAGAIN) || (errno == EINTR)) { 779 // A signal or ConditionVariable::Signal/Broadcast has come in. 780 } else { 781 PLOG(FATAL) << "timed futex wait failed for " << name_; 782 } 783 } 784 guard_.ExclusiveLock(self); 785 CHECK_GE(num_waiters_, 0); 786 num_waiters_--; 787 // We awoke and so no longer require awakes from the guard_'s unlock. 788 CHECK_GE(guard_.num_contenders_, 0); 789 guard_.num_contenders_--; 790#else 791#ifdef HAVE_TIMEDWAIT_MONOTONIC 792#define TIMEDWAIT pthread_cond_timedwait_monotonic 793 int clock = CLOCK_MONOTONIC; 794#else 795#define TIMEDWAIT pthread_cond_timedwait 796 int clock = CLOCK_REALTIME; 797#endif 798 guard_.recursion_count_ = 0; 799 timespec ts; 800 InitTimeSpec(true, clock, ms, ns, &ts); 801 int rc = TEMP_FAILURE_RETRY(TIMEDWAIT(&cond_, &guard_.mutex_, &ts)); 802 if (rc != 0 && rc != ETIMEDOUT) { 803 errno = rc; 804 PLOG(FATAL) << "TimedWait failed for " << name_; 805 } 806#endif 807 guard_.recursion_count_ = old_recursion_count; 808} 809 810void Locks::Init() { 811 if (logging_lock_ != nullptr) { 812 // Already initialized. 813 DCHECK(abort_lock_ != nullptr); 814 DCHECK(breakpoint_lock_ != nullptr); 815 DCHECK(deoptimization_lock_ != nullptr); 816 DCHECK(classlinker_classes_lock_ != nullptr); 817 DCHECK(heap_bitmap_lock_ != nullptr); 818 DCHECK(logging_lock_ != nullptr); 819 DCHECK(mutator_lock_ != nullptr); 820 DCHECK(thread_list_lock_ != nullptr); 821 DCHECK(thread_suspend_count_lock_ != nullptr); 822 DCHECK(trace_lock_ != nullptr); 823 DCHECK(profiler_lock_ != nullptr); 824 DCHECK(unexpected_signal_lock_ != nullptr); 825 DCHECK(intern_table_lock_ != nullptr); 826 } else { 827 logging_lock_ = new Mutex("logging lock", kLoggingLock, true); 828 abort_lock_ = new Mutex("abort lock", kAbortLock, true); 829 830 DCHECK(breakpoint_lock_ == nullptr); 831 breakpoint_lock_ = new Mutex("breakpoint lock", kBreakpointLock); 832 DCHECK(deoptimization_lock_ == nullptr); 833 deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock); 834 DCHECK(classlinker_classes_lock_ == nullptr); 835 classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock", 836 kClassLinkerClassesLock); 837 DCHECK(heap_bitmap_lock_ == nullptr); 838 heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", kHeapBitmapLock); 839 DCHECK(mutator_lock_ == nullptr); 840 mutator_lock_ = new ReaderWriterMutex("mutator lock", kMutatorLock); 841 DCHECK(runtime_shutdown_lock_ == nullptr); 842 runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", kRuntimeShutdownLock); 843 DCHECK(thread_list_lock_ == nullptr); 844 thread_list_lock_ = new Mutex("thread list lock", kThreadListLock); 845 DCHECK(thread_suspend_count_lock_ == nullptr); 846 thread_suspend_count_lock_ = new Mutex("thread suspend count lock", kThreadSuspendCountLock); 847 DCHECK(trace_lock_ == nullptr); 848 trace_lock_ = new Mutex("trace lock", kTraceLock); 849 DCHECK(profiler_lock_ == nullptr); 850 profiler_lock_ = new Mutex("profiler lock", kProfilerLock); 851 DCHECK(unexpected_signal_lock_ == nullptr); 852 unexpected_signal_lock_ = new Mutex("unexpected signal lock", kUnexpectedSignalLock, true); 853 DCHECK(intern_table_lock_ == nullptr); 854 intern_table_lock_ = new Mutex("InternTable lock", kInternTableLock); 855 } 856} 857 858 859} // namespace art 860