mutex.cc revision 73d1e17b3afc7d5e56184f90bf819dc64956448a
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mutex.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#include "atomic.h"
23#include "base/logging.h"
24#include "mutex-inl.h"
25#include "runtime.h"
26#include "scoped_thread_state_change.h"
27#include "thread-inl.h"
28#include "utils.h"
29
30namespace art {
31
32Mutex* Locks::abort_lock_ = nullptr;
33Mutex* Locks::breakpoint_lock_ = nullptr;
34ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
35ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
36Mutex* Locks::logging_lock_ = nullptr;
37ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
38Mutex* Locks::runtime_shutdown_lock_ = nullptr;
39Mutex* Locks::thread_list_lock_ = nullptr;
40Mutex* Locks::thread_suspend_count_lock_ = nullptr;
41Mutex* Locks::trace_lock_ = nullptr;
42Mutex* Locks::profiler_lock_ = nullptr;
43Mutex* Locks::unexpected_signal_lock_ = nullptr;
44Mutex* Locks::intern_table_lock_ = nullptr;
45
46struct AllMutexData {
47  // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
48  Atomic<const BaseMutex*> all_mutexes_guard;
49  // All created mutexes guarded by all_mutexes_guard_.
50  std::set<BaseMutex*>* all_mutexes;
51  AllMutexData() : all_mutexes(NULL) {}
52};
53static struct AllMutexData gAllMutexData[kAllMutexDataSize];
54
55#if ART_USE_FUTEXES
56static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
57  const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
58  result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
59  result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
60  if (result_ts->tv_nsec < 0) {
61    result_ts->tv_sec--;
62    result_ts->tv_nsec += one_sec;
63  } else if (result_ts->tv_nsec > one_sec) {
64    result_ts->tv_sec++;
65    result_ts->tv_nsec -= one_sec;
66  }
67  return result_ts->tv_sec < 0;
68}
69#endif
70
71class ScopedAllMutexesLock {
72 public:
73  explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
74    while (!gAllMutexData->all_mutexes_guard.CompareAndSwap(0, mutex)) {
75      NanoSleep(100);
76    }
77  }
78  ~ScopedAllMutexesLock() {
79    while (!gAllMutexData->all_mutexes_guard.CompareAndSwap(mutex_, 0)) {
80      NanoSleep(100);
81    }
82  }
83 private:
84  const BaseMutex* const mutex_;
85};
86
87BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
88  if (kLogLockContentions) {
89    ScopedAllMutexesLock mu(this);
90    std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
91    if (*all_mutexes_ptr == NULL) {
92      // We leak the global set of all mutexes to avoid ordering issues in global variable
93      // construction/destruction.
94      *all_mutexes_ptr = new std::set<BaseMutex*>();
95    }
96    (*all_mutexes_ptr)->insert(this);
97  }
98}
99
100BaseMutex::~BaseMutex() {
101  if (kLogLockContentions) {
102    ScopedAllMutexesLock mu(this);
103    gAllMutexData->all_mutexes->erase(this);
104  }
105}
106
107void BaseMutex::DumpAll(std::ostream& os) {
108  if (kLogLockContentions) {
109    os << "Mutex logging:\n";
110    ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
111    std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
112    if (all_mutexes == NULL) {
113      // No mutexes have been created yet during at startup.
114      return;
115    }
116    typedef std::set<BaseMutex*>::const_iterator It;
117    os << "(Contended)\n";
118    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
119      BaseMutex* mutex = *it;
120      if (mutex->HasEverContended()) {
121        mutex->Dump(os);
122        os << "\n";
123      }
124    }
125    os << "(Never contented)\n";
126    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
127      BaseMutex* mutex = *it;
128      if (!mutex->HasEverContended()) {
129        mutex->Dump(os);
130        os << "\n";
131      }
132    }
133  }
134}
135
136void BaseMutex::CheckSafeToWait(Thread* self) {
137  if (self == NULL) {
138    CheckUnattachedThread(level_);
139    return;
140  }
141  if (kDebugLocking) {
142    CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
143        << "Waiting on unacquired mutex: " << name_;
144    bool bad_mutexes_held = false;
145    for (int i = kLockLevelCount - 1; i >= 0; --i) {
146      if (i != level_) {
147        BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
148        if (held_mutex != NULL) {
149          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
150                     << "(level " << LockLevel(i) << ") while performing wait on "
151                     << "\"" << name_ << "\" (level " << level_ << ")";
152          bad_mutexes_held = true;
153        }
154      }
155    }
156    CHECK(!bad_mutexes_held);
157  }
158}
159
160inline void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
161  if (kLogLockContentions) {
162    // Atomically add value to wait_time.
163    uint64_t new_val, old_val;
164    volatile int64_t* addr = reinterpret_cast<volatile int64_t*>(&wait_time);
165    volatile const int64_t* caddr = const_cast<volatile const int64_t*>(addr);
166    do {
167      old_val = static_cast<uint64_t>(QuasiAtomic::Read64(caddr));
168      new_val = old_val + value;
169    } while (!QuasiAtomic::Cas64(static_cast<int64_t>(old_val), static_cast<int64_t>(new_val), addr));
170  }
171}
172
173void BaseMutex::RecordContention(uint64_t blocked_tid,
174                                 uint64_t owner_tid,
175                                 uint64_t nano_time_blocked) {
176  if (kLogLockContentions) {
177    ContentionLogData* data = contetion_log_data_;
178    ++(data->contention_count);
179    data->AddToWaitTime(nano_time_blocked);
180    ContentionLogEntry* log = data->contention_log;
181    // This code is intentionally racy as it is only used for diagnostics.
182    uint32_t slot = data->cur_content_log_entry;
183    if (log[slot].blocked_tid == blocked_tid &&
184        log[slot].owner_tid == blocked_tid) {
185      ++log[slot].count;
186    } else {
187      uint32_t new_slot;
188      do {
189        slot = data->cur_content_log_entry;
190        new_slot = (slot + 1) % kContentionLogSize;
191      } while (!data->cur_content_log_entry.CompareAndSwap(slot, new_slot));
192      log[new_slot].blocked_tid = blocked_tid;
193      log[new_slot].owner_tid = owner_tid;
194      log[new_slot].count = 1;
195    }
196  }
197}
198
199void BaseMutex::DumpContention(std::ostream& os) const {
200  if (kLogLockContentions) {
201    const ContentionLogData* data = contetion_log_data_;
202    const ContentionLogEntry* log = data->contention_log;
203    uint64_t wait_time = data->wait_time;
204    uint32_t contention_count = data->contention_count;
205    if (contention_count == 0) {
206      os << "never contended";
207    } else {
208      os << "contended " << contention_count
209         << " total wait of contender " << PrettyDuration(wait_time)
210         << " average " << PrettyDuration(wait_time / contention_count);
211      SafeMap<uint64_t, size_t> most_common_blocker;
212      SafeMap<uint64_t, size_t> most_common_blocked;
213      for (size_t i = 0; i < kContentionLogSize; ++i) {
214        uint64_t blocked_tid = log[i].blocked_tid;
215        uint64_t owner_tid = log[i].owner_tid;
216        uint32_t count = log[i].count;
217        if (count > 0) {
218          auto it = most_common_blocked.find(blocked_tid);
219          if (it != most_common_blocked.end()) {
220            most_common_blocked.Overwrite(blocked_tid, it->second + count);
221          } else {
222            most_common_blocked.Put(blocked_tid, count);
223          }
224          it = most_common_blocker.find(owner_tid);
225          if (it != most_common_blocker.end()) {
226            most_common_blocker.Overwrite(owner_tid, it->second + count);
227          } else {
228            most_common_blocker.Put(owner_tid, count);
229          }
230        }
231      }
232      uint64_t max_tid = 0;
233      size_t max_tid_count = 0;
234      for (const auto& pair : most_common_blocked) {
235        if (pair.second > max_tid_count) {
236          max_tid = pair.first;
237          max_tid_count = pair.second;
238        }
239      }
240      if (max_tid != 0) {
241        os << " sample shows most blocked tid=" << max_tid;
242      }
243      max_tid = 0;
244      max_tid_count = 0;
245      for (const auto& pair : most_common_blocker) {
246        if (pair.second > max_tid_count) {
247          max_tid = pair.first;
248          max_tid_count = pair.second;
249        }
250      }
251      if (max_tid != 0) {
252        os << " sample shows tid=" << max_tid << " owning during this time";
253      }
254    }
255  }
256}
257
258
259Mutex::Mutex(const char* name, LockLevel level, bool recursive)
260    : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
261#if ART_USE_FUTEXES
262  state_ = 0;
263  exclusive_owner_ = 0;
264  num_contenders_ = 0;
265#elif defined(__BIONIC__) || defined(__APPLE__)
266  // Use recursive mutexes for bionic and Apple otherwise the
267  // non-recursive mutexes don't have TIDs to check lock ownership of.
268  pthread_mutexattr_t attributes;
269  CHECK_MUTEX_CALL(pthread_mutexattr_init, (&attributes));
270  CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&attributes, PTHREAD_MUTEX_RECURSIVE));
271  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, &attributes));
272  CHECK_MUTEX_CALL(pthread_mutexattr_destroy, (&attributes));
273#else
274  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, NULL));
275#endif
276}
277
278Mutex::~Mutex() {
279#if ART_USE_FUTEXES
280  if (state_ != 0) {
281    Runtime* runtime = Runtime::Current();
282    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
283    LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
284  } else {
285    CHECK_EQ(exclusive_owner_, 0U)  << "unexpectedly found an owner on unlocked mutex " << name_;
286    CHECK_EQ(num_contenders_, 0) << "unexpectedly found a contender on mutex " << name_;
287  }
288#else
289  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
290  // may still be using locks.
291  int rc = pthread_mutex_destroy(&mutex_);
292  if (rc != 0) {
293    errno = rc;
294    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
295    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
296    Runtime* runtime = Runtime::Current();
297    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
298    PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
299  }
300#endif
301}
302
303void Mutex::ExclusiveLock(Thread* self) {
304  DCHECK(self == NULL || self == Thread::Current());
305  if (kDebugLocking && !recursive_) {
306    AssertNotHeld(self);
307  }
308  if (!recursive_ || !IsExclusiveHeld(self)) {
309#if ART_USE_FUTEXES
310    bool done = false;
311    do {
312      int32_t cur_state = state_;
313      if (LIKELY(cur_state == 0)) {
314        // Change state from 0 to 1.
315        done = __sync_bool_compare_and_swap(&state_, 0 /* cur_state */, 1 /* new state */);
316      } else {
317        // Failed to acquire, hang up.
318        ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
319        num_contenders_++;
320        if (futex(&state_, FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
321          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
322          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
323          if ((errno != EAGAIN) && (errno != EINTR)) {
324            PLOG(FATAL) << "futex wait failed for " << name_;
325          }
326        }
327        num_contenders_--;
328      }
329    } while (!done);
330    QuasiAtomic::MembarStoreLoad();
331    DCHECK_EQ(state_, 1);
332    exclusive_owner_ = SafeGetTid(self);
333#else
334    CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
335#endif
336    RegisterAsLocked(self);
337  }
338  recursion_count_++;
339  if (kDebugLocking) {
340    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
341        << name_ << " " << recursion_count_;
342    AssertHeld(self);
343  }
344}
345
346bool Mutex::ExclusiveTryLock(Thread* self) {
347  DCHECK(self == NULL || self == Thread::Current());
348  if (kDebugLocking && !recursive_) {
349    AssertNotHeld(self);
350  }
351  if (!recursive_ || !IsExclusiveHeld(self)) {
352#if ART_USE_FUTEXES
353    bool done = false;
354    do {
355      int32_t cur_state = state_;
356      if (cur_state == 0) {
357        // Change state from 0 to 1.
358        done = __sync_bool_compare_and_swap(&state_, 0 /* cur_state */, 1 /* new state */);
359      } else {
360        return false;
361      }
362    } while (!done);
363    QuasiAtomic::MembarStoreLoad();
364    DCHECK_EQ(state_, 1);
365    exclusive_owner_ = SafeGetTid(self);
366#else
367    int result = pthread_mutex_trylock(&mutex_);
368    if (result == EBUSY) {
369      return false;
370    }
371    if (result != 0) {
372      errno = result;
373      PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
374    }
375#endif
376    RegisterAsLocked(self);
377  }
378  recursion_count_++;
379  if (kDebugLocking) {
380    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
381        << name_ << " " << recursion_count_;
382    AssertHeld(self);
383  }
384  return true;
385}
386
387void Mutex::ExclusiveUnlock(Thread* self) {
388  DCHECK(self == NULL || self == Thread::Current());
389  AssertHeld(self);
390  recursion_count_--;
391  if (!recursive_ || recursion_count_ == 0) {
392    if (kDebugLocking) {
393      CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
394          << name_ << " " << recursion_count_;
395    }
396    RegisterAsUnlocked(self);
397#if ART_USE_FUTEXES
398  bool done = false;
399  do {
400    int32_t cur_state = state_;
401    if (LIKELY(cur_state == 1)) {
402      QuasiAtomic::MembarStoreStore();
403      // We're no longer the owner.
404      exclusive_owner_ = 0;
405      // Change state to 0.
406      done =  __sync_bool_compare_and_swap(&state_, cur_state, 0 /* new state */);
407      if (LIKELY(done)) {  // Spurious fail?
408        // Wake a contender
409        if (UNLIKELY(num_contenders_ > 0)) {
410          futex(&state_, FUTEX_WAKE, 1, NULL, NULL, 0);
411        }
412      }
413    } else {
414      // Logging acquires the logging lock, avoid infinite recursion in that case.
415      if (this != Locks::logging_lock_) {
416        LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
417      } else {
418        LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
419        LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
420                                               cur_state, name_).c_str());
421        _exit(1);
422      }
423    }
424  } while (!done);
425  QuasiAtomic::MembarStoreLoad();
426#else
427    CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
428#endif
429  }
430}
431
432void Mutex::Dump(std::ostream& os) const {
433  os << (recursive_ ? "recursive " : "non-recursive ")
434      << name_
435      << " level=" << static_cast<int>(level_)
436      << " rec=" << recursion_count_
437      << " owner=" << GetExclusiveOwnerTid() << " ";
438  DumpContention(os);
439}
440
441std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
442  mu.Dump(os);
443  return os;
444}
445
446ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
447    : BaseMutex(name, level)
448#if ART_USE_FUTEXES
449    , state_(0), exclusive_owner_(0), num_pending_readers_(0), num_pending_writers_(0)
450#endif
451{  // NOLINT(whitespace/braces)
452#if !ART_USE_FUTEXES
453  CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, NULL));
454#endif
455}
456
457ReaderWriterMutex::~ReaderWriterMutex() {
458#if ART_USE_FUTEXES
459  CHECK_EQ(state_, 0);
460  CHECK_EQ(exclusive_owner_, 0U);
461  CHECK_EQ(num_pending_readers_, 0);
462  CHECK_EQ(num_pending_writers_, 0);
463#else
464  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
465  // may still be using locks.
466  int rc = pthread_rwlock_destroy(&rwlock_);
467  if (rc != 0) {
468    errno = rc;
469    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
470    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
471    Runtime* runtime = Runtime::Current();
472    bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
473    PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
474  }
475#endif
476}
477
478void ReaderWriterMutex::ExclusiveLock(Thread* self) {
479  DCHECK(self == NULL || self == Thread::Current());
480  AssertNotExclusiveHeld(self);
481#if ART_USE_FUTEXES
482  bool done = false;
483  do {
484    int32_t cur_state = state_;
485    if (LIKELY(cur_state == 0)) {
486      // Change state from 0 to -1.
487      done =  __sync_bool_compare_and_swap(&state_, 0 /* cur_state*/, -1 /* new state */);
488    } else {
489      // Failed to acquire, hang up.
490      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
491      num_pending_writers_++;
492      if (futex(&state_, FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
493        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
494        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
495        if ((errno != EAGAIN) && (errno != EINTR)) {
496          PLOG(FATAL) << "futex wait failed for " << name_;
497        }
498      }
499      num_pending_writers_--;
500    }
501  } while (!done);
502  DCHECK_EQ(state_, -1);
503  exclusive_owner_ = SafeGetTid(self);
504#else
505  CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
506#endif
507  RegisterAsLocked(self);
508  AssertExclusiveHeld(self);
509}
510
511void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
512  DCHECK(self == NULL || self == Thread::Current());
513  AssertExclusiveHeld(self);
514  RegisterAsUnlocked(self);
515#if ART_USE_FUTEXES
516  bool done = false;
517  do {
518    int32_t cur_state = state_;
519    if (LIKELY(cur_state == -1)) {
520      // We're no longer the owner.
521      exclusive_owner_ = 0;
522      // Change state from -1 to 0.
523      done =  __sync_bool_compare_and_swap(&state_, -1 /* cur_state*/, 0 /* new state */);
524      if (LIKELY(done)) {  // cmpxchg may fail due to noise?
525        // Wake any waiters.
526        if (UNLIKELY(num_pending_readers_ > 0 || num_pending_writers_ > 0)) {
527          futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0);
528        }
529      }
530    } else {
531      LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
532    }
533  } while (!done);
534#else
535  CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
536#endif
537}
538
539#if HAVE_TIMED_RWLOCK
540bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
541  DCHECK(self == NULL || self == Thread::Current());
542#if ART_USE_FUTEXES
543  bool done = false;
544  timespec end_abs_ts;
545  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts);
546  do {
547    int32_t cur_state = state_;
548    if (cur_state == 0) {
549      // Change state from 0 to -1.
550      done =  __sync_bool_compare_and_swap(&state_, 0 /* cur_state */, -1 /* new state */);
551    } else {
552      // Failed to acquire, hang up.
553      timespec now_abs_ts;
554      InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts);
555      timespec rel_ts;
556      if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
557        return false;  // Timed out.
558      }
559      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
560      num_pending_writers_++;
561      if (futex(&state_, FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
562        if (errno == ETIMEDOUT) {
563          num_pending_writers_--;
564          return false;  // Timed out.
565        } else if ((errno != EAGAIN) && (errno != EINTR)) {
566          // EAGAIN and EINTR both indicate a spurious failure,
567          // recompute the relative time out from now and try again.
568          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
569          PLOG(FATAL) << "timed futex wait failed for " << name_;
570        }
571      }
572      num_pending_writers_--;
573    }
574  } while (!done);
575  exclusive_owner_ = SafeGetTid(self);
576#else
577  timespec ts;
578  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
579  int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
580  if (result == ETIMEDOUT) {
581    return false;
582  }
583  if (result != 0) {
584    errno = result;
585    PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
586  }
587#endif
588  RegisterAsLocked(self);
589  AssertSharedHeld(self);
590  return true;
591}
592#endif
593
594bool ReaderWriterMutex::SharedTryLock(Thread* self) {
595  DCHECK(self == NULL || self == Thread::Current());
596#if ART_USE_FUTEXES
597  bool done = false;
598  do {
599    int32_t cur_state = state_;
600    if (cur_state >= 0) {
601      // Add as an extra reader.
602      done =  __sync_bool_compare_and_swap(&state_, cur_state, cur_state + 1);
603    } else {
604      // Owner holds it exclusively.
605      return false;
606    }
607  } while (!done);
608#else
609  int result = pthread_rwlock_tryrdlock(&rwlock_);
610  if (result == EBUSY) {
611    return false;
612  }
613  if (result != 0) {
614    errno = result;
615    PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
616  }
617#endif
618  RegisterAsLocked(self);
619  AssertSharedHeld(self);
620  return true;
621}
622
623bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
624  DCHECK(self == NULL || self == Thread::Current());
625  bool result;
626  if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
627    result = IsExclusiveHeld(self);  // TODO: a better best effort here.
628  } else {
629    result = (self->GetHeldMutex(level_) == this);
630  }
631  return result;
632}
633
634void ReaderWriterMutex::Dump(std::ostream& os) const {
635  os << name_
636      << " level=" << static_cast<int>(level_)
637      << " owner=" << GetExclusiveOwnerTid() << " ";
638  DumpContention(os);
639}
640
641std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
642  mu.Dump(os);
643  return os;
644}
645
646ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
647    : name_(name), guard_(guard) {
648#if ART_USE_FUTEXES
649  sequence_ = 0;
650  num_waiters_ = 0;
651#else
652  pthread_condattr_t cond_attrs;
653  CHECK_MUTEX_CALL(pthread_condattr_init(&cond_attrs));
654#if !defined(__APPLE__)
655  // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
656  CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
657#endif
658  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
659#endif
660}
661
662ConditionVariable::~ConditionVariable() {
663#if ART_USE_FUTEXES
664  if (num_waiters_!= 0) {
665    Runtime* runtime = Runtime::Current();
666    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
667    LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
668        << " called with " << num_waiters_ << " waiters.";
669  }
670#else
671  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
672  // may still be using condition variables.
673  int rc = pthread_cond_destroy(&cond_);
674  if (rc != 0) {
675    errno = rc;
676    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
677    Runtime* runtime = Runtime::Current();
678    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
679    PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
680  }
681#endif
682}
683
684void ConditionVariable::Broadcast(Thread* self) {
685  DCHECK(self == NULL || self == Thread::Current());
686  // TODO: enable below, there's a race in thread creation that causes false failures currently.
687  // guard_.AssertExclusiveHeld(self);
688  DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
689#if ART_USE_FUTEXES
690  if (num_waiters_ > 0) {
691    sequence_++;  // Indicate the broadcast occurred.
692    bool done = false;
693    do {
694      int32_t cur_sequence = sequence_;
695      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
696      // mutex unlocks will awaken the requeued waiter thread.
697      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
698                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
699                   &guard_.state_, cur_sequence) != -1;
700      if (!done) {
701        if (errno != EAGAIN) {
702          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
703        }
704      }
705    } while (!done);
706  }
707#else
708  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
709#endif
710}
711
712void ConditionVariable::Signal(Thread* self) {
713  DCHECK(self == NULL || self == Thread::Current());
714  guard_.AssertExclusiveHeld(self);
715#if ART_USE_FUTEXES
716  if (num_waiters_ > 0) {
717    sequence_++;  // Indicate a signal occurred.
718    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
719    // to avoid this, however, requeueing can only move all waiters.
720    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
721    // Check something was woken or else we changed sequence_ before they had chance to wait.
722    CHECK((num_woken == 0) || (num_woken == 1));
723  }
724#else
725  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
726#endif
727}
728
729void ConditionVariable::Wait(Thread* self) {
730  guard_.CheckSafeToWait(self);
731  WaitHoldingLocks(self);
732}
733
734void ConditionVariable::WaitHoldingLocks(Thread* self) {
735  DCHECK(self == NULL || self == Thread::Current());
736  guard_.AssertExclusiveHeld(self);
737  unsigned int old_recursion_count = guard_.recursion_count_;
738#if ART_USE_FUTEXES
739  num_waiters_++;
740  // Ensure the Mutex is contended so that requeued threads are awoken.
741  guard_.num_contenders_++;
742  guard_.recursion_count_ = 1;
743  int32_t cur_sequence = sequence_;
744  guard_.ExclusiveUnlock(self);
745  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
746    // Futex failed, check it is an expected error.
747    // EAGAIN == EWOULDBLK, so we let the caller try again.
748    // EINTR implies a signal was sent to this thread.
749    if ((errno != EINTR) && (errno != EAGAIN)) {
750      PLOG(FATAL) << "futex wait failed for " << name_;
751    }
752  }
753  guard_.ExclusiveLock(self);
754  CHECK_GE(num_waiters_, 0);
755  num_waiters_--;
756  // We awoke and so no longer require awakes from the guard_'s unlock.
757  CHECK_GE(guard_.num_contenders_, 0);
758  guard_.num_contenders_--;
759#else
760  guard_.recursion_count_ = 0;
761  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
762#endif
763  guard_.recursion_count_ = old_recursion_count;
764}
765
766void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
767  DCHECK(self == NULL || self == Thread::Current());
768  guard_.AssertExclusiveHeld(self);
769  guard_.CheckSafeToWait(self);
770  unsigned int old_recursion_count = guard_.recursion_count_;
771#if ART_USE_FUTEXES
772  timespec rel_ts;
773  InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
774  num_waiters_++;
775  // Ensure the Mutex is contended so that requeued threads are awoken.
776  guard_.num_contenders_++;
777  guard_.recursion_count_ = 1;
778  int32_t cur_sequence = sequence_;
779  guard_.ExclusiveUnlock(self);
780  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
781    if (errno == ETIMEDOUT) {
782      // Timed out we're done.
783    } else if ((errno == EAGAIN) || (errno == EINTR)) {
784      // A signal or ConditionVariable::Signal/Broadcast has come in.
785    } else {
786      PLOG(FATAL) << "timed futex wait failed for " << name_;
787    }
788  }
789  guard_.ExclusiveLock(self);
790  CHECK_GE(num_waiters_, 0);
791  num_waiters_--;
792  // We awoke and so no longer require awakes from the guard_'s unlock.
793  CHECK_GE(guard_.num_contenders_, 0);
794  guard_.num_contenders_--;
795#else
796#if !defined(__APPLE__)
797  int clock = CLOCK_MONOTONIC;
798#else
799  int clock = CLOCK_REALTIME;
800#endif
801  guard_.recursion_count_ = 0;
802  timespec ts;
803  InitTimeSpec(true, clock, ms, ns, &ts);
804  int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
805  if (rc != 0 && rc != ETIMEDOUT) {
806    errno = rc;
807    PLOG(FATAL) << "TimedWait failed for " << name_;
808  }
809#endif
810  guard_.recursion_count_ = old_recursion_count;
811}
812
813void Locks::Init() {
814  if (logging_lock_ != nullptr) {
815    // Already initialized.
816    DCHECK(abort_lock_ != nullptr);
817    DCHECK(breakpoint_lock_ != nullptr);
818    DCHECK(classlinker_classes_lock_ != nullptr);
819    DCHECK(heap_bitmap_lock_ != nullptr);
820    DCHECK(logging_lock_ != nullptr);
821    DCHECK(mutator_lock_ != nullptr);
822    DCHECK(thread_list_lock_ != nullptr);
823    DCHECK(thread_suspend_count_lock_ != nullptr);
824    DCHECK(trace_lock_ != nullptr);
825    DCHECK(profiler_lock_ != nullptr);
826    DCHECK(unexpected_signal_lock_ != nullptr);
827    DCHECK(intern_table_lock_ != nullptr);
828  } else {
829    logging_lock_ = new Mutex("logging lock", kLoggingLock, true);
830    abort_lock_ = new Mutex("abort lock", kAbortLock, true);
831
832    DCHECK(breakpoint_lock_ == nullptr);
833    breakpoint_lock_ = new Mutex("breakpoint lock", kBreakpointLock);
834    DCHECK(classlinker_classes_lock_ == nullptr);
835    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
836                                                      kClassLinkerClassesLock);
837    DCHECK(heap_bitmap_lock_ == nullptr);
838    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", kHeapBitmapLock);
839    DCHECK(mutator_lock_ == nullptr);
840    mutator_lock_ = new ReaderWriterMutex("mutator lock", kMutatorLock);
841    DCHECK(runtime_shutdown_lock_ == nullptr);
842    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", kRuntimeShutdownLock);
843    DCHECK(thread_list_lock_ == nullptr);
844    thread_list_lock_ = new Mutex("thread list lock", kThreadListLock);
845    DCHECK(thread_suspend_count_lock_ == nullptr);
846    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", kThreadSuspendCountLock);
847    DCHECK(trace_lock_ == nullptr);
848    trace_lock_ = new Mutex("trace lock", kTraceLock);
849    DCHECK(profiler_lock_ == nullptr);
850    profiler_lock_ = new Mutex("profiler lock", kProfilerLock);
851    DCHECK(unexpected_signal_lock_ == nullptr);
852    unexpected_signal_lock_ = new Mutex("unexpected signal lock", kUnexpectedSignalLock, true);
853    DCHECK(intern_table_lock_ == nullptr);
854    intern_table_lock_ = new Mutex("InternTable lock", kInternTableLock);
855  }
856}
857
858
859}  // namespace art
860