mutex.cc revision 3eed93dd5be03e5539827bebf0f414251a12e15e
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mutex.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#include "atomic.h"
23#include "base/logging.h"
24#include "mutex-inl.h"
25#include "runtime.h"
26#include "scoped_thread_state_change.h"
27#include "thread-inl.h"
28#include "utils.h"
29
30namespace art {
31
32Mutex* Locks::abort_lock_ = nullptr;
33Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
34Mutex* Locks::breakpoint_lock_ = nullptr;
35ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
36ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
37Mutex* Locks::logging_lock_ = nullptr;
38Mutex* Locks::mem_maps_lock_ = nullptr;
39Mutex* Locks::modify_ldt_lock_ = nullptr;
40ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
41Mutex* Locks::runtime_shutdown_lock_ = nullptr;
42Mutex* Locks::thread_list_lock_ = nullptr;
43Mutex* Locks::thread_suspend_count_lock_ = nullptr;
44Mutex* Locks::trace_lock_ = nullptr;
45Mutex* Locks::profiler_lock_ = nullptr;
46Mutex* Locks::unexpected_signal_lock_ = nullptr;
47Mutex* Locks::intern_table_lock_ = nullptr;
48
49struct AllMutexData {
50  // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
51  Atomic<const BaseMutex*> all_mutexes_guard;
52  // All created mutexes guarded by all_mutexes_guard_.
53  std::set<BaseMutex*>* all_mutexes;
54  AllMutexData() : all_mutexes(NULL) {}
55};
56static struct AllMutexData gAllMutexData[kAllMutexDataSize];
57
58#if ART_USE_FUTEXES
59static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
60  const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
61  result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
62  result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
63  if (result_ts->tv_nsec < 0) {
64    result_ts->tv_sec--;
65    result_ts->tv_nsec += one_sec;
66  } else if (result_ts->tv_nsec > one_sec) {
67    result_ts->tv_sec++;
68    result_ts->tv_nsec -= one_sec;
69  }
70  return result_ts->tv_sec < 0;
71}
72#endif
73
74class ScopedAllMutexesLock {
75 public:
76  explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
77    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
78      NanoSleep(100);
79    }
80  }
81  ~ScopedAllMutexesLock() {
82    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
83      NanoSleep(100);
84    }
85  }
86 private:
87  const BaseMutex* const mutex_;
88};
89
90BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
91  if (kLogLockContentions) {
92    ScopedAllMutexesLock mu(this);
93    std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
94    if (*all_mutexes_ptr == NULL) {
95      // We leak the global set of all mutexes to avoid ordering issues in global variable
96      // construction/destruction.
97      *all_mutexes_ptr = new std::set<BaseMutex*>();
98    }
99    (*all_mutexes_ptr)->insert(this);
100  }
101}
102
103BaseMutex::~BaseMutex() {
104  if (kLogLockContentions) {
105    ScopedAllMutexesLock mu(this);
106    gAllMutexData->all_mutexes->erase(this);
107  }
108}
109
110void BaseMutex::DumpAll(std::ostream& os) {
111  if (kLogLockContentions) {
112    os << "Mutex logging:\n";
113    ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
114    std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
115    if (all_mutexes == NULL) {
116      // No mutexes have been created yet during at startup.
117      return;
118    }
119    typedef std::set<BaseMutex*>::const_iterator It;
120    os << "(Contended)\n";
121    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
122      BaseMutex* mutex = *it;
123      if (mutex->HasEverContended()) {
124        mutex->Dump(os);
125        os << "\n";
126      }
127    }
128    os << "(Never contented)\n";
129    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
130      BaseMutex* mutex = *it;
131      if (!mutex->HasEverContended()) {
132        mutex->Dump(os);
133        os << "\n";
134      }
135    }
136  }
137}
138
139void BaseMutex::CheckSafeToWait(Thread* self) {
140  if (self == NULL) {
141    CheckUnattachedThread(level_);
142    return;
143  }
144  if (kDebugLocking) {
145    CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
146        << "Waiting on unacquired mutex: " << name_;
147    bool bad_mutexes_held = false;
148    for (int i = kLockLevelCount - 1; i >= 0; --i) {
149      if (i != level_) {
150        BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
151        if (held_mutex != NULL) {
152          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
153                     << "(level " << LockLevel(i) << ") while performing wait on "
154                     << "\"" << name_ << "\" (level " << level_ << ")";
155          bad_mutexes_held = true;
156        }
157      }
158    }
159    CHECK(!bad_mutexes_held);
160  }
161}
162
163inline void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
164  if (kLogLockContentions) {
165    // Atomically add value to wait_time.
166    uint64_t new_val, old_val;
167    volatile int64_t* addr = reinterpret_cast<volatile int64_t*>(&wait_time);
168    volatile const int64_t* caddr = const_cast<volatile const int64_t*>(addr);
169    do {
170      old_val = static_cast<uint64_t>(QuasiAtomic::Read64(caddr));
171      new_val = old_val + value;
172    } while (!QuasiAtomic::Cas64(static_cast<int64_t>(old_val), static_cast<int64_t>(new_val), addr));
173  }
174}
175
176void BaseMutex::RecordContention(uint64_t blocked_tid,
177                                 uint64_t owner_tid,
178                                 uint64_t nano_time_blocked) {
179  if (kLogLockContentions) {
180    ContentionLogData* data = contention_log_data_;
181    ++(data->contention_count);
182    data->AddToWaitTime(nano_time_blocked);
183    ContentionLogEntry* log = data->contention_log;
184    // This code is intentionally racy as it is only used for diagnostics.
185    uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
186    if (log[slot].blocked_tid == blocked_tid &&
187        log[slot].owner_tid == blocked_tid) {
188      ++log[slot].count;
189    } else {
190      uint32_t new_slot;
191      do {
192        slot = data->cur_content_log_entry.LoadRelaxed();
193        new_slot = (slot + 1) % kContentionLogSize;
194      } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
195      log[new_slot].blocked_tid = blocked_tid;
196      log[new_slot].owner_tid = owner_tid;
197      log[new_slot].count.StoreRelaxed(1);
198    }
199  }
200}
201
202void BaseMutex::DumpContention(std::ostream& os) const {
203  if (kLogLockContentions) {
204    const ContentionLogData* data = contention_log_data_;
205    const ContentionLogEntry* log = data->contention_log;
206    uint64_t wait_time = data->wait_time;
207    uint32_t contention_count = data->contention_count.LoadRelaxed();
208    if (contention_count == 0) {
209      os << "never contended";
210    } else {
211      os << "contended " << contention_count
212         << " total wait of contender " << PrettyDuration(wait_time)
213         << " average " << PrettyDuration(wait_time / contention_count);
214      SafeMap<uint64_t, size_t> most_common_blocker;
215      SafeMap<uint64_t, size_t> most_common_blocked;
216      for (size_t i = 0; i < kContentionLogSize; ++i) {
217        uint64_t blocked_tid = log[i].blocked_tid;
218        uint64_t owner_tid = log[i].owner_tid;
219        uint32_t count = log[i].count.LoadRelaxed();
220        if (count > 0) {
221          auto it = most_common_blocked.find(blocked_tid);
222          if (it != most_common_blocked.end()) {
223            most_common_blocked.Overwrite(blocked_tid, it->second + count);
224          } else {
225            most_common_blocked.Put(blocked_tid, count);
226          }
227          it = most_common_blocker.find(owner_tid);
228          if (it != most_common_blocker.end()) {
229            most_common_blocker.Overwrite(owner_tid, it->second + count);
230          } else {
231            most_common_blocker.Put(owner_tid, count);
232          }
233        }
234      }
235      uint64_t max_tid = 0;
236      size_t max_tid_count = 0;
237      for (const auto& pair : most_common_blocked) {
238        if (pair.second > max_tid_count) {
239          max_tid = pair.first;
240          max_tid_count = pair.second;
241        }
242      }
243      if (max_tid != 0) {
244        os << " sample shows most blocked tid=" << max_tid;
245      }
246      max_tid = 0;
247      max_tid_count = 0;
248      for (const auto& pair : most_common_blocker) {
249        if (pair.second > max_tid_count) {
250          max_tid = pair.first;
251          max_tid_count = pair.second;
252        }
253      }
254      if (max_tid != 0) {
255        os << " sample shows tid=" << max_tid << " owning during this time";
256      }
257    }
258  }
259}
260
261
262Mutex::Mutex(const char* name, LockLevel level, bool recursive)
263    : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
264#if ART_USE_FUTEXES
265  state_ = 0;
266  exclusive_owner_ = 0;
267  DCHECK_EQ(0, num_contenders_.LoadRelaxed());
268#elif defined(__BIONIC__) || defined(__APPLE__)
269  // Use recursive mutexes for bionic and Apple otherwise the
270  // non-recursive mutexes don't have TIDs to check lock ownership of.
271  pthread_mutexattr_t attributes;
272  CHECK_MUTEX_CALL(pthread_mutexattr_init, (&attributes));
273  CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&attributes, PTHREAD_MUTEX_RECURSIVE));
274  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, &attributes));
275  CHECK_MUTEX_CALL(pthread_mutexattr_destroy, (&attributes));
276#else
277  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, NULL));
278#endif
279}
280
281Mutex::~Mutex() {
282#if ART_USE_FUTEXES
283  if (state_ != 0) {
284    Runtime* runtime = Runtime::Current();
285    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
286    LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
287  } else {
288    CHECK_EQ(exclusive_owner_, 0U)  << "unexpectedly found an owner on unlocked mutex " << name_;
289    CHECK_EQ(num_contenders_.LoadRelaxed(), 0)
290        << "unexpectedly found a contender on mutex " << name_;
291  }
292#else
293  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
294  // may still be using locks.
295  int rc = pthread_mutex_destroy(&mutex_);
296  if (rc != 0) {
297    errno = rc;
298    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
299    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
300    Runtime* runtime = Runtime::Current();
301    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
302    PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
303  }
304#endif
305}
306
307void Mutex::ExclusiveLock(Thread* self) {
308  DCHECK(self == NULL || self == Thread::Current());
309  if (kDebugLocking && !recursive_) {
310    AssertNotHeld(self);
311  }
312  if (!recursive_ || !IsExclusiveHeld(self)) {
313#if ART_USE_FUTEXES
314    bool done = false;
315    do {
316      int32_t cur_state = state_;
317      if (LIKELY(cur_state == 0)) {
318        // Change state from 0 to 1.
319        done = __sync_bool_compare_and_swap(&state_, 0 /* cur_state */, 1 /* new state */);
320      } else {
321        // Failed to acquire, hang up.
322        ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
323        num_contenders_++;
324        if (futex(&state_, FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
325          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
326          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
327          if ((errno != EAGAIN) && (errno != EINTR)) {
328            PLOG(FATAL) << "futex wait failed for " << name_;
329          }
330        }
331        num_contenders_--;
332      }
333    } while (!done);
334    QuasiAtomic::MembarStoreLoad();
335    DCHECK_EQ(state_, 1);
336    exclusive_owner_ = SafeGetTid(self);
337#else
338    CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
339#endif
340    RegisterAsLocked(self);
341  }
342  recursion_count_++;
343  if (kDebugLocking) {
344    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
345        << name_ << " " << recursion_count_;
346    AssertHeld(self);
347  }
348}
349
350bool Mutex::ExclusiveTryLock(Thread* self) {
351  DCHECK(self == NULL || self == Thread::Current());
352  if (kDebugLocking && !recursive_) {
353    AssertNotHeld(self);
354  }
355  if (!recursive_ || !IsExclusiveHeld(self)) {
356#if ART_USE_FUTEXES
357    bool done = false;
358    do {
359      int32_t cur_state = state_;
360      if (cur_state == 0) {
361        // Change state from 0 to 1.
362        done = __sync_bool_compare_and_swap(&state_, 0 /* cur_state */, 1 /* new state */);
363      } else {
364        return false;
365      }
366    } while (!done);
367    QuasiAtomic::MembarStoreLoad();
368    DCHECK_EQ(state_, 1);
369    exclusive_owner_ = SafeGetTid(self);
370#else
371    int result = pthread_mutex_trylock(&mutex_);
372    if (result == EBUSY) {
373      return false;
374    }
375    if (result != 0) {
376      errno = result;
377      PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
378    }
379#endif
380    RegisterAsLocked(self);
381  }
382  recursion_count_++;
383  if (kDebugLocking) {
384    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
385        << name_ << " " << recursion_count_;
386    AssertHeld(self);
387  }
388  return true;
389}
390
391void Mutex::ExclusiveUnlock(Thread* self) {
392  DCHECK(self == NULL || self == Thread::Current());
393  AssertHeld(self);
394  recursion_count_--;
395  if (!recursive_ || recursion_count_ == 0) {
396    if (kDebugLocking) {
397      CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
398          << name_ << " " << recursion_count_;
399    }
400    RegisterAsUnlocked(self);
401#if ART_USE_FUTEXES
402  bool done = false;
403  do {
404    int32_t cur_state = state_;
405    if (LIKELY(cur_state == 1)) {
406      QuasiAtomic::MembarStoreStore();
407      // We're no longer the owner.
408      exclusive_owner_ = 0;
409      // Change state to 0.
410      done =  __sync_bool_compare_and_swap(&state_, cur_state, 0 /* new state */);
411      if (LIKELY(done)) {  // Spurious fail?
412        // Wake a contender
413        if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
414          futex(&state_, FUTEX_WAKE, 1, NULL, NULL, 0);
415        }
416      }
417    } else {
418      // Logging acquires the logging lock, avoid infinite recursion in that case.
419      if (this != Locks::logging_lock_) {
420        LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
421      } else {
422        LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
423        LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
424                                               cur_state, name_).c_str());
425        _exit(1);
426      }
427    }
428  } while (!done);
429  QuasiAtomic::MembarStoreLoad();
430#else
431    CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
432#endif
433  }
434}
435
436void Mutex::Dump(std::ostream& os) const {
437  os << (recursive_ ? "recursive " : "non-recursive ")
438      << name_
439      << " level=" << static_cast<int>(level_)
440      << " rec=" << recursion_count_
441      << " owner=" << GetExclusiveOwnerTid() << " ";
442  DumpContention(os);
443}
444
445std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
446  mu.Dump(os);
447  return os;
448}
449
450ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
451    : BaseMutex(name, level)
452#if ART_USE_FUTEXES
453    , state_(0), exclusive_owner_(0), num_pending_readers_(0), num_pending_writers_(0)
454#endif
455{  // NOLINT(whitespace/braces)
456#if !ART_USE_FUTEXES
457  CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, NULL));
458#endif
459}
460
461ReaderWriterMutex::~ReaderWriterMutex() {
462#if ART_USE_FUTEXES
463  CHECK_EQ(state_, 0);
464  CHECK_EQ(exclusive_owner_, 0U);
465  CHECK_EQ(num_pending_readers_, 0);
466  CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
467#else
468  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
469  // may still be using locks.
470  int rc = pthread_rwlock_destroy(&rwlock_);
471  if (rc != 0) {
472    errno = rc;
473    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
474    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
475    Runtime* runtime = Runtime::Current();
476    bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
477    PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
478  }
479#endif
480}
481
482void ReaderWriterMutex::ExclusiveLock(Thread* self) {
483  DCHECK(self == NULL || self == Thread::Current());
484  AssertNotExclusiveHeld(self);
485#if ART_USE_FUTEXES
486  bool done = false;
487  do {
488    int32_t cur_state = state_;
489    if (LIKELY(cur_state == 0)) {
490      // Change state from 0 to -1.
491      done =  __sync_bool_compare_and_swap(&state_, 0 /* cur_state*/, -1 /* new state */);
492    } else {
493      // Failed to acquire, hang up.
494      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
495      num_pending_writers_++;
496      if (futex(&state_, FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
497        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
498        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
499        if ((errno != EAGAIN) && (errno != EINTR)) {
500          PLOG(FATAL) << "futex wait failed for " << name_;
501        }
502      }
503      num_pending_writers_--;
504    }
505  } while (!done);
506  DCHECK_EQ(state_, -1);
507  exclusive_owner_ = SafeGetTid(self);
508#else
509  CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
510#endif
511  RegisterAsLocked(self);
512  AssertExclusiveHeld(self);
513}
514
515void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
516  DCHECK(self == NULL || self == Thread::Current());
517  AssertExclusiveHeld(self);
518  RegisterAsUnlocked(self);
519#if ART_USE_FUTEXES
520  bool done = false;
521  do {
522    int32_t cur_state = state_;
523    if (LIKELY(cur_state == -1)) {
524      // We're no longer the owner.
525      exclusive_owner_ = 0;
526      // Change state from -1 to 0.
527      done =  __sync_bool_compare_and_swap(&state_, -1 /* cur_state*/, 0 /* new state */);
528      if (LIKELY(done)) {  // cmpxchg may fail due to noise?
529        // Wake any waiters.
530        if (UNLIKELY(num_pending_readers_ > 0 || num_pending_writers_.LoadRelaxed() > 0)) {
531          futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0);
532        }
533      }
534    } else {
535      LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
536    }
537  } while (!done);
538#else
539  CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
540#endif
541}
542
543#if HAVE_TIMED_RWLOCK
544bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
545  DCHECK(self == NULL || self == Thread::Current());
546#if ART_USE_FUTEXES
547  bool done = false;
548  timespec end_abs_ts;
549  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts);
550  do {
551    int32_t cur_state = state_;
552    if (cur_state == 0) {
553      // Change state from 0 to -1.
554      done =  __sync_bool_compare_and_swap(&state_, 0 /* cur_state */, -1 /* new state */);
555    } else {
556      // Failed to acquire, hang up.
557      timespec now_abs_ts;
558      InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts);
559      timespec rel_ts;
560      if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
561        return false;  // Timed out.
562      }
563      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
564      num_pending_writers_++;
565      if (futex(&state_, FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
566        if (errno == ETIMEDOUT) {
567          num_pending_writers_--;
568          return false;  // Timed out.
569        } else if ((errno != EAGAIN) && (errno != EINTR)) {
570          // EAGAIN and EINTR both indicate a spurious failure,
571          // recompute the relative time out from now and try again.
572          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
573          PLOG(FATAL) << "timed futex wait failed for " << name_;
574        }
575      }
576      num_pending_writers_--;
577    }
578  } while (!done);
579  exclusive_owner_ = SafeGetTid(self);
580#else
581  timespec ts;
582  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
583  int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
584  if (result == ETIMEDOUT) {
585    return false;
586  }
587  if (result != 0) {
588    errno = result;
589    PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
590  }
591#endif
592  RegisterAsLocked(self);
593  AssertSharedHeld(self);
594  return true;
595}
596#endif
597
598bool ReaderWriterMutex::SharedTryLock(Thread* self) {
599  DCHECK(self == NULL || self == Thread::Current());
600#if ART_USE_FUTEXES
601  bool done = false;
602  do {
603    int32_t cur_state = state_;
604    if (cur_state >= 0) {
605      // Add as an extra reader.
606      done =  __sync_bool_compare_and_swap(&state_, cur_state, cur_state + 1);
607    } else {
608      // Owner holds it exclusively.
609      return false;
610    }
611  } while (!done);
612#else
613  int result = pthread_rwlock_tryrdlock(&rwlock_);
614  if (result == EBUSY) {
615    return false;
616  }
617  if (result != 0) {
618    errno = result;
619    PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
620  }
621#endif
622  RegisterAsLocked(self);
623  AssertSharedHeld(self);
624  return true;
625}
626
627bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
628  DCHECK(self == NULL || self == Thread::Current());
629  bool result;
630  if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
631    result = IsExclusiveHeld(self);  // TODO: a better best effort here.
632  } else {
633    result = (self->GetHeldMutex(level_) == this);
634  }
635  return result;
636}
637
638void ReaderWriterMutex::Dump(std::ostream& os) const {
639  os << name_
640      << " level=" << static_cast<int>(level_)
641      << " owner=" << GetExclusiveOwnerTid() << " ";
642  DumpContention(os);
643}
644
645std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
646  mu.Dump(os);
647  return os;
648}
649
650ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
651    : name_(name), guard_(guard) {
652#if ART_USE_FUTEXES
653  DCHECK_EQ(0, sequence_.LoadRelaxed());
654  num_waiters_ = 0;
655#else
656  pthread_condattr_t cond_attrs;
657  CHECK_MUTEX_CALL(pthread_condattr_init(&cond_attrs));
658#if !defined(__APPLE__)
659  // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
660  CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
661#endif
662  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
663#endif
664}
665
666ConditionVariable::~ConditionVariable() {
667#if ART_USE_FUTEXES
668  if (num_waiters_!= 0) {
669    Runtime* runtime = Runtime::Current();
670    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
671    LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
672        << " called with " << num_waiters_ << " waiters.";
673  }
674#else
675  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
676  // may still be using condition variables.
677  int rc = pthread_cond_destroy(&cond_);
678  if (rc != 0) {
679    errno = rc;
680    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
681    Runtime* runtime = Runtime::Current();
682    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
683    PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
684  }
685#endif
686}
687
688void ConditionVariable::Broadcast(Thread* self) {
689  DCHECK(self == NULL || self == Thread::Current());
690  // TODO: enable below, there's a race in thread creation that causes false failures currently.
691  // guard_.AssertExclusiveHeld(self);
692  DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
693#if ART_USE_FUTEXES
694  if (num_waiters_ > 0) {
695    sequence_++;  // Indicate the broadcast occurred.
696    bool done = false;
697    do {
698      int32_t cur_sequence = sequence_.LoadRelaxed();
699      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
700      // mutex unlocks will awaken the requeued waiter thread.
701      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
702                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
703                   &guard_.state_, cur_sequence) != -1;
704      if (!done) {
705        if (errno != EAGAIN) {
706          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
707        }
708      }
709    } while (!done);
710  }
711#else
712  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
713#endif
714}
715
716void ConditionVariable::Signal(Thread* self) {
717  DCHECK(self == NULL || self == Thread::Current());
718  guard_.AssertExclusiveHeld(self);
719#if ART_USE_FUTEXES
720  if (num_waiters_ > 0) {
721    sequence_++;  // Indicate a signal occurred.
722    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
723    // to avoid this, however, requeueing can only move all waiters.
724    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
725    // Check something was woken or else we changed sequence_ before they had chance to wait.
726    CHECK((num_woken == 0) || (num_woken == 1));
727  }
728#else
729  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
730#endif
731}
732
733void ConditionVariable::Wait(Thread* self) {
734  guard_.CheckSafeToWait(self);
735  WaitHoldingLocks(self);
736}
737
738void ConditionVariable::WaitHoldingLocks(Thread* self) {
739  DCHECK(self == NULL || self == Thread::Current());
740  guard_.AssertExclusiveHeld(self);
741  unsigned int old_recursion_count = guard_.recursion_count_;
742#if ART_USE_FUTEXES
743  num_waiters_++;
744  // Ensure the Mutex is contended so that requeued threads are awoken.
745  guard_.num_contenders_++;
746  guard_.recursion_count_ = 1;
747  int32_t cur_sequence = sequence_.LoadRelaxed();
748  guard_.ExclusiveUnlock(self);
749  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
750    // Futex failed, check it is an expected error.
751    // EAGAIN == EWOULDBLK, so we let the caller try again.
752    // EINTR implies a signal was sent to this thread.
753    if ((errno != EINTR) && (errno != EAGAIN)) {
754      PLOG(FATAL) << "futex wait failed for " << name_;
755    }
756  }
757  guard_.ExclusiveLock(self);
758  CHECK_GE(num_waiters_, 0);
759  num_waiters_--;
760  // We awoke and so no longer require awakes from the guard_'s unlock.
761  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
762  guard_.num_contenders_--;
763#else
764  guard_.recursion_count_ = 0;
765  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
766#endif
767  guard_.recursion_count_ = old_recursion_count;
768}
769
770void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
771  DCHECK(self == NULL || self == Thread::Current());
772  guard_.AssertExclusiveHeld(self);
773  guard_.CheckSafeToWait(self);
774  unsigned int old_recursion_count = guard_.recursion_count_;
775#if ART_USE_FUTEXES
776  timespec rel_ts;
777  InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
778  num_waiters_++;
779  // Ensure the Mutex is contended so that requeued threads are awoken.
780  guard_.num_contenders_++;
781  guard_.recursion_count_ = 1;
782  int32_t cur_sequence = sequence_.LoadRelaxed();
783  guard_.ExclusiveUnlock(self);
784  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
785    if (errno == ETIMEDOUT) {
786      // Timed out we're done.
787    } else if ((errno == EAGAIN) || (errno == EINTR)) {
788      // A signal or ConditionVariable::Signal/Broadcast has come in.
789    } else {
790      PLOG(FATAL) << "timed futex wait failed for " << name_;
791    }
792  }
793  guard_.ExclusiveLock(self);
794  CHECK_GE(num_waiters_, 0);
795  num_waiters_--;
796  // We awoke and so no longer require awakes from the guard_'s unlock.
797  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
798  guard_.num_contenders_--;
799#else
800#if !defined(__APPLE__)
801  int clock = CLOCK_MONOTONIC;
802#else
803  int clock = CLOCK_REALTIME;
804#endif
805  guard_.recursion_count_ = 0;
806  timespec ts;
807  InitTimeSpec(true, clock, ms, ns, &ts);
808  int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
809  if (rc != 0 && rc != ETIMEDOUT) {
810    errno = rc;
811    PLOG(FATAL) << "TimedWait failed for " << name_;
812  }
813#endif
814  guard_.recursion_count_ = old_recursion_count;
815}
816
817void Locks::Init() {
818  if (logging_lock_ != nullptr) {
819    // Already initialized.
820    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
821      DCHECK(modify_ldt_lock_ != nullptr);
822    } else {
823      DCHECK(modify_ldt_lock_ == nullptr);
824    }
825    DCHECK(abort_lock_ != nullptr);
826    DCHECK(allocated_thread_ids_lock_ != nullptr);
827    DCHECK(breakpoint_lock_ != nullptr);
828    DCHECK(classlinker_classes_lock_ != nullptr);
829    DCHECK(heap_bitmap_lock_ != nullptr);
830    DCHECK(logging_lock_ != nullptr);
831    DCHECK(mutator_lock_ != nullptr);
832    DCHECK(thread_list_lock_ != nullptr);
833    DCHECK(thread_suspend_count_lock_ != nullptr);
834    DCHECK(trace_lock_ != nullptr);
835    DCHECK(profiler_lock_ != nullptr);
836    DCHECK(unexpected_signal_lock_ != nullptr);
837    DCHECK(intern_table_lock_ != nullptr);
838  } else {
839    // Create global locks in level order from highest lock level to lowest.
840    LockLevel current_lock_level = kMutatorLock;
841    DCHECK(mutator_lock_ == nullptr);
842    mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
843
844    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
845        DCHECK_LT(new_level, current_lock_level); \
846        current_lock_level = new_level;
847
848    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
849    DCHECK(heap_bitmap_lock_ == nullptr);
850    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
851
852    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
853    DCHECK(runtime_shutdown_lock_ == nullptr);
854    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
855
856    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
857    DCHECK(profiler_lock_ == nullptr);
858    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
859
860    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
861    DCHECK(trace_lock_ == nullptr);
862    trace_lock_ = new Mutex("trace lock", current_lock_level);
863
864    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
865    DCHECK(thread_list_lock_ == nullptr);
866    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
867
868    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
869    DCHECK(breakpoint_lock_ == nullptr);
870    breakpoint_lock_ = new Mutex("breakpoint lock", current_lock_level);
871
872    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
873    DCHECK(classlinker_classes_lock_ == nullptr);
874    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
875                                                      current_lock_level);
876
877    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
878    DCHECK(allocated_thread_ids_lock_ == nullptr);
879    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
880
881    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
882      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
883      DCHECK(modify_ldt_lock_ == nullptr);
884      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
885    }
886
887    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
888    DCHECK(intern_table_lock_ == nullptr);
889    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
890
891
892    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
893    DCHECK(abort_lock_ == nullptr);
894    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
895
896    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
897    DCHECK(thread_suspend_count_lock_ == nullptr);
898    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
899
900    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
901    DCHECK(unexpected_signal_lock_ == nullptr);
902    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
903
904    UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
905    DCHECK(mem_maps_lock_ == nullptr);
906    mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
907
908    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
909    DCHECK(logging_lock_ == nullptr);
910    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
911
912    #undef UPDATE_CURRENT_LOCK_LEVEL
913  }
914}
915
916
917}  // namespace art
918