mutex.cc revision ef7d42fca18c16fbaf103822ad16f23246e2905d
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mutex.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#include "atomic.h"
23#include "base/logging.h"
24#include "mutex-inl.h"
25#include "runtime.h"
26#include "scoped_thread_state_change.h"
27#include "thread-inl.h"
28#include "utils.h"
29
30namespace art {
31
32#if ART_USE_FUTEXES
33static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
34  const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
35  result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
36  result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
37  if (result_ts->tv_nsec < 0) {
38    result_ts->tv_sec--;
39    result_ts->tv_nsec += one_sec;
40  } else if (result_ts->tv_nsec > one_sec) {
41    result_ts->tv_sec++;
42    result_ts->tv_nsec -= one_sec;
43  }
44  return result_ts->tv_sec < 0;
45}
46#endif
47
48struct AllMutexData {
49  // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
50  Atomic<const BaseMutex*> all_mutexes_guard;
51  // All created mutexes guarded by all_mutexes_guard_.
52  std::set<BaseMutex*>* all_mutexes;
53  AllMutexData() : all_mutexes(NULL) {}
54};
55static struct AllMutexData gAllMutexData[kAllMutexDataSize];
56
57class ScopedAllMutexesLock {
58 public:
59  explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
60    while (!gAllMutexData->all_mutexes_guard.CompareAndSwap(0, mutex)) {
61      NanoSleep(100);
62    }
63  }
64  ~ScopedAllMutexesLock() {
65    while (!gAllMutexData->all_mutexes_guard.CompareAndSwap(mutex_, 0)) {
66      NanoSleep(100);
67    }
68  }
69 private:
70  const BaseMutex* const mutex_;
71};
72
73BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
74  if (kLogLockContentions) {
75    ScopedAllMutexesLock mu(this);
76    std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
77    if (*all_mutexes_ptr == NULL) {
78      // We leak the global set of all mutexes to avoid ordering issues in global variable
79      // construction/destruction.
80      *all_mutexes_ptr = new std::set<BaseMutex*>();
81    }
82    (*all_mutexes_ptr)->insert(this);
83  }
84}
85
86BaseMutex::~BaseMutex() {
87  if (kLogLockContentions) {
88    ScopedAllMutexesLock mu(this);
89    gAllMutexData->all_mutexes->erase(this);
90  }
91}
92
93void BaseMutex::DumpAll(std::ostream& os) {
94  if (kLogLockContentions) {
95    os << "Mutex logging:\n";
96    ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
97    std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
98    if (all_mutexes == NULL) {
99      // No mutexes have been created yet during at startup.
100      return;
101    }
102    typedef std::set<BaseMutex*>::const_iterator It;
103    os << "(Contended)\n";
104    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
105      BaseMutex* mutex = *it;
106      if (mutex->HasEverContended()) {
107        mutex->Dump(os);
108        os << "\n";
109      }
110    }
111    os << "(Never contented)\n";
112    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
113      BaseMutex* mutex = *it;
114      if (!mutex->HasEverContended()) {
115        mutex->Dump(os);
116        os << "\n";
117      }
118    }
119  }
120}
121
122void BaseMutex::CheckSafeToWait(Thread* self) {
123  if (self == NULL) {
124    CheckUnattachedThread(level_);
125    return;
126  }
127  if (kDebugLocking) {
128    CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
129        << "Waiting on unacquired mutex: " << name_;
130    bool bad_mutexes_held = false;
131    for (int i = kLockLevelCount - 1; i >= 0; --i) {
132      if (i != level_) {
133        BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
134        if (held_mutex != NULL) {
135          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
136                     << "(level " << LockLevel(i) << ") while performing wait on "
137                     << "\"" << name_ << "\" (level " << level_ << ")";
138          bad_mutexes_held = true;
139        }
140      }
141    }
142    CHECK(!bad_mutexes_held);
143  }
144}
145
146inline void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
147  if (kLogLockContentions) {
148    // Atomically add value to wait_time.
149    uint64_t new_val, old_val;
150    volatile int64_t* addr = reinterpret_cast<volatile int64_t*>(&wait_time);
151    volatile const int64_t* caddr = const_cast<volatile const int64_t*>(addr);
152    do {
153      old_val = static_cast<uint64_t>(QuasiAtomic::Read64(caddr));
154      new_val = old_val + value;
155    } while (!QuasiAtomic::Cas64(static_cast<int64_t>(old_val), static_cast<int64_t>(new_val), addr));
156  }
157}
158
159void BaseMutex::RecordContention(uint64_t blocked_tid,
160                                 uint64_t owner_tid,
161                                 uint64_t nano_time_blocked) {
162  if (kLogLockContentions) {
163    ContentionLogData* data = contetion_log_data_;
164    ++(data->contention_count);
165    data->AddToWaitTime(nano_time_blocked);
166    ContentionLogEntry* log = data->contention_log;
167    // This code is intentionally racy as it is only used for diagnostics.
168    uint32_t slot = data->cur_content_log_entry;
169    if (log[slot].blocked_tid == blocked_tid &&
170        log[slot].owner_tid == blocked_tid) {
171      ++log[slot].count;
172    } else {
173      uint32_t new_slot;
174      do {
175        slot = data->cur_content_log_entry;
176        new_slot = (slot + 1) % kContentionLogSize;
177      } while (!data->cur_content_log_entry.CompareAndSwap(slot, new_slot));
178      log[new_slot].blocked_tid = blocked_tid;
179      log[new_slot].owner_tid = owner_tid;
180      log[new_slot].count = 1;
181    }
182  }
183}
184
185void BaseMutex::DumpContention(std::ostream& os) const {
186  if (kLogLockContentions) {
187    const ContentionLogData* data = contetion_log_data_;
188    const ContentionLogEntry* log = data->contention_log;
189    uint64_t wait_time = data->wait_time;
190    uint32_t contention_count = data->contention_count;
191    if (contention_count == 0) {
192      os << "never contended";
193    } else {
194      os << "contended " << contention_count
195         << " times, average wait of contender " << PrettyDuration(wait_time / contention_count);
196      SafeMap<uint64_t, size_t> most_common_blocker;
197      SafeMap<uint64_t, size_t> most_common_blocked;
198      typedef SafeMap<uint64_t, size_t>::const_iterator It;
199      for (size_t i = 0; i < kContentionLogSize; ++i) {
200        uint64_t blocked_tid = log[i].blocked_tid;
201        uint64_t owner_tid = log[i].owner_tid;
202        uint32_t count = log[i].count;
203        if (count > 0) {
204          It it = most_common_blocked.find(blocked_tid);
205          if (it != most_common_blocked.end()) {
206            most_common_blocked.Overwrite(blocked_tid, it->second + count);
207          } else {
208            most_common_blocked.Put(blocked_tid, count);
209          }
210          it = most_common_blocker.find(owner_tid);
211          if (it != most_common_blocker.end()) {
212            most_common_blocker.Overwrite(owner_tid, it->second + count);
213          } else {
214            most_common_blocker.Put(owner_tid, count);
215          }
216        }
217      }
218      uint64_t max_tid = 0;
219      size_t max_tid_count = 0;
220      for (It it = most_common_blocked.begin(); it != most_common_blocked.end(); ++it) {
221        if (it->second > max_tid_count) {
222          max_tid = it->first;
223          max_tid_count = it->second;
224        }
225      }
226      if (max_tid != 0) {
227        os << " sample shows most blocked tid=" << max_tid;
228      }
229      max_tid = 0;
230      max_tid_count = 0;
231      for (It it = most_common_blocker.begin(); it != most_common_blocker.end(); ++it) {
232        if (it->second > max_tid_count) {
233          max_tid = it->first;
234          max_tid_count = it->second;
235        }
236      }
237      if (max_tid != 0) {
238        os << " sample shows tid=" << max_tid << " owning during this time";
239      }
240    }
241  }
242}
243
244
245Mutex::Mutex(const char* name, LockLevel level, bool recursive)
246    : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
247#if ART_USE_FUTEXES
248  state_ = 0;
249  exclusive_owner_ = 0;
250  num_contenders_ = 0;
251#elif defined(__BIONIC__) || defined(__APPLE__)
252  // Use recursive mutexes for bionic and Apple otherwise the
253  // non-recursive mutexes don't have TIDs to check lock ownership of.
254  pthread_mutexattr_t attributes;
255  CHECK_MUTEX_CALL(pthread_mutexattr_init, (&attributes));
256  CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&attributes, PTHREAD_MUTEX_RECURSIVE));
257  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, &attributes));
258  CHECK_MUTEX_CALL(pthread_mutexattr_destroy, (&attributes));
259#else
260  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, NULL));
261#endif
262}
263
264Mutex::~Mutex() {
265#if ART_USE_FUTEXES
266  if (state_ != 0) {
267    Runtime* runtime = Runtime::Current();
268    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
269    LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
270  } else {
271    CHECK_EQ(exclusive_owner_, 0U)  << "unexpectedly found an owner on unlocked mutex " << name_;
272    CHECK_EQ(num_contenders_, 0) << "unexpectedly found a contender on mutex " << name_;
273  }
274#else
275  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
276  // may still be using locks.
277  int rc = pthread_mutex_destroy(&mutex_);
278  if (rc != 0) {
279    errno = rc;
280    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
281    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
282    Runtime* runtime = Runtime::Current();
283    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
284    PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
285  }
286#endif
287}
288
289void Mutex::ExclusiveLock(Thread* self) {
290  DCHECK(self == NULL || self == Thread::Current());
291  if (kDebugLocking && !recursive_) {
292    AssertNotHeld(self);
293  }
294  if (!recursive_ || !IsExclusiveHeld(self)) {
295#if ART_USE_FUTEXES
296    bool done = false;
297    do {
298      int32_t cur_state = state_;
299      if (LIKELY(cur_state == 0)) {
300        // Change state from 0 to 1.
301        done = __sync_bool_compare_and_swap(&state_, 0 /* cur_state */, 1 /* new state */);
302      } else {
303        // Failed to acquire, hang up.
304        ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
305        num_contenders_++;
306        if (futex(&state_, FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
307          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
308          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
309          if ((errno != EAGAIN) && (errno != EINTR)) {
310            PLOG(FATAL) << "futex wait failed for " << name_;
311          }
312        }
313        num_contenders_--;
314      }
315    } while (!done);
316    QuasiAtomic::MembarStoreLoad();
317    DCHECK_EQ(state_, 1);
318    exclusive_owner_ = SafeGetTid(self);
319#else
320    CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
321#endif
322    RegisterAsLocked(self);
323  }
324  recursion_count_++;
325  if (kDebugLocking) {
326    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
327        << name_ << " " << recursion_count_;
328    AssertHeld(self);
329  }
330}
331
332bool Mutex::ExclusiveTryLock(Thread* self) {
333  DCHECK(self == NULL || self == Thread::Current());
334  if (kDebugLocking && !recursive_) {
335    AssertNotHeld(self);
336  }
337  if (!recursive_ || !IsExclusiveHeld(self)) {
338#if ART_USE_FUTEXES
339    bool done = false;
340    do {
341      int32_t cur_state = state_;
342      if (cur_state == 0) {
343        // Change state from 0 to 1.
344        done = __sync_bool_compare_and_swap(&state_, 0 /* cur_state */, 1 /* new state */);
345      } else {
346        return false;
347      }
348    } while (!done);
349    QuasiAtomic::MembarStoreLoad();
350    DCHECK_EQ(state_, 1);
351    exclusive_owner_ = SafeGetTid(self);
352#else
353    int result = pthread_mutex_trylock(&mutex_);
354    if (result == EBUSY) {
355      return false;
356    }
357    if (result != 0) {
358      errno = result;
359      PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
360    }
361#endif
362    RegisterAsLocked(self);
363  }
364  recursion_count_++;
365  if (kDebugLocking) {
366    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
367        << name_ << " " << recursion_count_;
368    AssertHeld(self);
369  }
370  return true;
371}
372
373void Mutex::ExclusiveUnlock(Thread* self) {
374  DCHECK(self == NULL || self == Thread::Current());
375  AssertHeld(self);
376  recursion_count_--;
377  if (!recursive_ || recursion_count_ == 0) {
378    if (kDebugLocking) {
379      CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
380          << name_ << " " << recursion_count_;
381    }
382    RegisterAsUnlocked(self);
383#if ART_USE_FUTEXES
384  bool done = false;
385  do {
386    int32_t cur_state = state_;
387    if (LIKELY(cur_state == 1)) {
388      QuasiAtomic::MembarStoreStore();
389      // We're no longer the owner.
390      exclusive_owner_ = 0;
391      // Change state to 0.
392      done =  __sync_bool_compare_and_swap(&state_, cur_state, 0 /* new state */);
393      if (LIKELY(done)) {  // Spurious fail?
394        // Wake a contender
395        if (UNLIKELY(num_contenders_ > 0)) {
396          futex(&state_, FUTEX_WAKE, 1, NULL, NULL, 0);
397        }
398      }
399    } else {
400      // Logging acquires the logging lock, avoid infinite recursion in that case.
401      if (this != Locks::logging_lock_) {
402        LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
403      } else {
404        LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
405        LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
406                                               cur_state, name_).c_str());
407        _exit(1);
408      }
409    }
410  } while (!done);
411  QuasiAtomic::MembarStoreLoad();
412#else
413    CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
414#endif
415  }
416}
417
418void Mutex::Dump(std::ostream& os) const {
419  os << (recursive_ ? "recursive " : "non-recursive ")
420      << name_
421      << " level=" << static_cast<int>(level_)
422      << " rec=" << recursion_count_
423      << " owner=" << GetExclusiveOwnerTid() << " ";
424  DumpContention(os);
425}
426
427std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
428  mu.Dump(os);
429  return os;
430}
431
432ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
433    : BaseMutex(name, level)
434#if ART_USE_FUTEXES
435    , state_(0), exclusive_owner_(0), num_pending_readers_(0), num_pending_writers_(0)
436#endif
437{  // NOLINT(whitespace/braces)
438#if !ART_USE_FUTEXES
439  CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, NULL));
440#endif
441}
442
443ReaderWriterMutex::~ReaderWriterMutex() {
444#if ART_USE_FUTEXES
445  CHECK_EQ(state_, 0);
446  CHECK_EQ(exclusive_owner_, 0U);
447  CHECK_EQ(num_pending_readers_, 0);
448  CHECK_EQ(num_pending_writers_, 0);
449#else
450  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
451  // may still be using locks.
452  int rc = pthread_rwlock_destroy(&rwlock_);
453  if (rc != 0) {
454    errno = rc;
455    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
456    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
457    Runtime* runtime = Runtime::Current();
458    bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
459    PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
460  }
461#endif
462}
463
464void ReaderWriterMutex::ExclusiveLock(Thread* self) {
465  DCHECK(self == NULL || self == Thread::Current());
466  AssertNotExclusiveHeld(self);
467#if ART_USE_FUTEXES
468  bool done = false;
469  do {
470    int32_t cur_state = state_;
471    if (LIKELY(cur_state == 0)) {
472      // Change state from 0 to -1.
473      done =  __sync_bool_compare_and_swap(&state_, 0 /* cur_state*/, -1 /* new state */);
474    } else {
475      // Failed to acquire, hang up.
476      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
477      num_pending_writers_++;
478      if (futex(&state_, FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
479        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
480        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
481        if ((errno != EAGAIN) && (errno != EINTR)) {
482          PLOG(FATAL) << "futex wait failed for " << name_;
483        }
484      }
485      num_pending_writers_--;
486    }
487  } while (!done);
488  DCHECK_EQ(state_, -1);
489  exclusive_owner_ = SafeGetTid(self);
490#else
491  CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
492#endif
493  RegisterAsLocked(self);
494  AssertExclusiveHeld(self);
495}
496
497void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
498  DCHECK(self == NULL || self == Thread::Current());
499  AssertExclusiveHeld(self);
500  RegisterAsUnlocked(self);
501#if ART_USE_FUTEXES
502  bool done = false;
503  do {
504    int32_t cur_state = state_;
505    if (LIKELY(cur_state == -1)) {
506      // We're no longer the owner.
507      exclusive_owner_ = 0;
508      // Change state from -1 to 0.
509      done =  __sync_bool_compare_and_swap(&state_, -1 /* cur_state*/, 0 /* new state */);
510      if (LIKELY(done)) {  // cmpxchg may fail due to noise?
511        // Wake any waiters.
512        if (UNLIKELY(num_pending_readers_ > 0 || num_pending_writers_ > 0)) {
513          futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0);
514        }
515      }
516    } else {
517      LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
518    }
519  } while (!done);
520#else
521  CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
522#endif
523}
524
525#if HAVE_TIMED_RWLOCK
526bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
527  DCHECK(self == NULL || self == Thread::Current());
528#if ART_USE_FUTEXES
529  bool done = false;
530  timespec end_abs_ts;
531  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts);
532  do {
533    int32_t cur_state = state_;
534    if (cur_state == 0) {
535      // Change state from 0 to -1.
536      done =  __sync_bool_compare_and_swap(&state_, 0 /* cur_state */, -1 /* new state */);
537    } else {
538      // Failed to acquire, hang up.
539      timespec now_abs_ts;
540      InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts);
541      timespec rel_ts;
542      if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
543        return false;  // Timed out.
544      }
545      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
546      num_pending_writers_++;
547      if (futex(&state_, FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
548        if (errno == ETIMEDOUT) {
549          num_pending_writers_--;
550          return false;  // Timed out.
551        } else if ((errno != EAGAIN) && (errno != EINTR)) {
552          // EAGAIN and EINTR both indicate a spurious failure,
553          // recompute the relative time out from now and try again.
554          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
555          PLOG(FATAL) << "timed futex wait failed for " << name_;
556        }
557      }
558      num_pending_writers_--;
559    }
560  } while (!done);
561  exclusive_owner_ = SafeGetTid(self);
562#else
563  timespec ts;
564  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
565  int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
566  if (result == ETIMEDOUT) {
567    return false;
568  }
569  if (result != 0) {
570    errno = result;
571    PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
572  }
573#endif
574  RegisterAsLocked(self);
575  AssertSharedHeld(self);
576  return true;
577}
578#endif
579
580bool ReaderWriterMutex::SharedTryLock(Thread* self) {
581  DCHECK(self == NULL || self == Thread::Current());
582#if ART_USE_FUTEXES
583  bool done = false;
584  do {
585    int32_t cur_state = state_;
586    if (cur_state >= 0) {
587      // Add as an extra reader.
588      done =  __sync_bool_compare_and_swap(&state_, cur_state, cur_state + 1);
589    } else {
590      // Owner holds it exclusively.
591      return false;
592    }
593  } while (!done);
594#else
595  int result = pthread_rwlock_tryrdlock(&rwlock_);
596  if (result == EBUSY) {
597    return false;
598  }
599  if (result != 0) {
600    errno = result;
601    PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
602  }
603#endif
604  RegisterAsLocked(self);
605  AssertSharedHeld(self);
606  return true;
607}
608
609bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
610  DCHECK(self == NULL || self == Thread::Current());
611  bool result;
612  if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
613    result = IsExclusiveHeld(self);  // TODO: a better best effort here.
614  } else {
615    result = (self->GetHeldMutex(level_) == this);
616  }
617  return result;
618}
619
620void ReaderWriterMutex::Dump(std::ostream& os) const {
621  os << name_
622      << " level=" << static_cast<int>(level_)
623      << " owner=" << GetExclusiveOwnerTid() << " ";
624  DumpContention(os);
625}
626
627std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
628  mu.Dump(os);
629  return os;
630}
631
632ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
633    : name_(name), guard_(guard) {
634#if ART_USE_FUTEXES
635  sequence_ = 0;
636  num_waiters_ = 0;
637#else
638  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, NULL));
639#endif
640}
641
642ConditionVariable::~ConditionVariable() {
643#if ART_USE_FUTEXES
644  if (num_waiters_!= 0) {
645    Runtime* runtime = Runtime::Current();
646    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
647    LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
648        << " called with " << num_waiters_ << " waiters.";
649  }
650#else
651  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
652  // may still be using condition variables.
653  int rc = pthread_cond_destroy(&cond_);
654  if (rc != 0) {
655    errno = rc;
656    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
657    Runtime* runtime = Runtime::Current();
658    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
659    PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
660  }
661#endif
662}
663
664void ConditionVariable::Broadcast(Thread* self) {
665  DCHECK(self == NULL || self == Thread::Current());
666  // TODO: enable below, there's a race in thread creation that causes false failures currently.
667  // guard_.AssertExclusiveHeld(self);
668  DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
669#if ART_USE_FUTEXES
670  if (num_waiters_ > 0) {
671    sequence_++;  // Indicate the broadcast occurred.
672    bool done = false;
673    do {
674      int32_t cur_sequence = sequence_;
675      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
676      // mutex unlocks will awaken the requeued waiter thread.
677      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
678                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
679                   &guard_.state_, cur_sequence) != -1;
680      if (!done) {
681        if (errno != EAGAIN) {
682          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
683        }
684      }
685    } while (!done);
686  }
687#else
688  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
689#endif
690}
691
692void ConditionVariable::Signal(Thread* self) {
693  DCHECK(self == NULL || self == Thread::Current());
694  guard_.AssertExclusiveHeld(self);
695#if ART_USE_FUTEXES
696  if (num_waiters_ > 0) {
697    sequence_++;  // Indicate a signal occurred.
698    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
699    // to avoid this, however, requeueing can only move all waiters.
700    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
701    // Check something was woken or else we changed sequence_ before they had chance to wait.
702    CHECK((num_woken == 0) || (num_woken == 1));
703  }
704#else
705  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
706#endif
707}
708
709void ConditionVariable::Wait(Thread* self) {
710  guard_.CheckSafeToWait(self);
711  WaitHoldingLocks(self);
712}
713
714void ConditionVariable::WaitHoldingLocks(Thread* self) {
715  DCHECK(self == NULL || self == Thread::Current());
716  guard_.AssertExclusiveHeld(self);
717  unsigned int old_recursion_count = guard_.recursion_count_;
718#if ART_USE_FUTEXES
719  num_waiters_++;
720  // Ensure the Mutex is contended so that requeued threads are awoken.
721  guard_.num_contenders_++;
722  guard_.recursion_count_ = 1;
723  int32_t cur_sequence = sequence_;
724  guard_.ExclusiveUnlock(self);
725  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
726    // Futex failed, check it is an expected error.
727    // EAGAIN == EWOULDBLK, so we let the caller try again.
728    // EINTR implies a signal was sent to this thread.
729    if ((errno != EINTR) && (errno != EAGAIN)) {
730      PLOG(FATAL) << "futex wait failed for " << name_;
731    }
732  }
733  guard_.ExclusiveLock(self);
734  CHECK_GE(num_waiters_, 0);
735  num_waiters_--;
736  // We awoke and so no longer require awakes from the guard_'s unlock.
737  CHECK_GE(guard_.num_contenders_, 0);
738  guard_.num_contenders_--;
739#else
740  guard_.recursion_count_ = 0;
741  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
742#endif
743  guard_.recursion_count_ = old_recursion_count;
744}
745
746void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
747  DCHECK(self == NULL || self == Thread::Current());
748  guard_.AssertExclusiveHeld(self);
749  guard_.CheckSafeToWait(self);
750  unsigned int old_recursion_count = guard_.recursion_count_;
751#if ART_USE_FUTEXES
752  timespec rel_ts;
753  InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
754  num_waiters_++;
755  // Ensure the Mutex is contended so that requeued threads are awoken.
756  guard_.num_contenders_++;
757  guard_.recursion_count_ = 1;
758  int32_t cur_sequence = sequence_;
759  guard_.ExclusiveUnlock(self);
760  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
761    if (errno == ETIMEDOUT) {
762      // Timed out we're done.
763    } else if ((errno == EAGAIN) || (errno == EINTR)) {
764      // A signal or ConditionVariable::Signal/Broadcast has come in.
765    } else {
766      PLOG(FATAL) << "timed futex wait failed for " << name_;
767    }
768  }
769  guard_.ExclusiveLock(self);
770  CHECK_GE(num_waiters_, 0);
771  num_waiters_--;
772  // We awoke and so no longer require awakes from the guard_'s unlock.
773  CHECK_GE(guard_.num_contenders_, 0);
774  guard_.num_contenders_--;
775#else
776#ifdef HAVE_TIMEDWAIT_MONOTONIC
777#define TIMEDWAIT pthread_cond_timedwait_monotonic
778  int clock = CLOCK_MONOTONIC;
779#else
780#define TIMEDWAIT pthread_cond_timedwait
781  int clock = CLOCK_REALTIME;
782#endif
783  guard_.recursion_count_ = 0;
784  timespec ts;
785  InitTimeSpec(true, clock, ms, ns, &ts);
786  int rc = TEMP_FAILURE_RETRY(TIMEDWAIT(&cond_, &guard_.mutex_, &ts));
787  if (rc != 0 && rc != ETIMEDOUT) {
788    errno = rc;
789    PLOG(FATAL) << "TimedWait failed for " << name_;
790  }
791#endif
792  guard_.recursion_count_ = old_recursion_count;
793}
794
795}  // namespace art
796