mutex.cc revision 37f3c968ecd04e77802fe17bb82dabc07de21ca1
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mutex.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#include "atomic.h"
23#include "base/logging.h"
24#include "mutex-inl.h"
25#include "runtime.h"
26#include "scoped_thread_state_change.h"
27#include "thread-inl.h"
28#include "utils.h"
29
30namespace art {
31
32Mutex* Locks::abort_lock_ = nullptr;
33Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
34Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
35Mutex* Locks::breakpoint_lock_ = nullptr;
36ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
37ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
38Mutex* Locks::logging_lock_ = nullptr;
39Mutex* Locks::mem_maps_lock_ = nullptr;
40Mutex* Locks::modify_ldt_lock_ = nullptr;
41ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
42Mutex* Locks::runtime_shutdown_lock_ = nullptr;
43Mutex* Locks::thread_list_lock_ = nullptr;
44Mutex* Locks::thread_suspend_count_lock_ = nullptr;
45Mutex* Locks::trace_lock_ = nullptr;
46Mutex* Locks::profiler_lock_ = nullptr;
47Mutex* Locks::unexpected_signal_lock_ = nullptr;
48Mutex* Locks::intern_table_lock_ = nullptr;
49
50struct AllMutexData {
51  // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
52  Atomic<const BaseMutex*> all_mutexes_guard;
53  // All created mutexes guarded by all_mutexes_guard_.
54  std::set<BaseMutex*>* all_mutexes;
55  AllMutexData() : all_mutexes(NULL) {}
56};
57static struct AllMutexData gAllMutexData[kAllMutexDataSize];
58
59#if ART_USE_FUTEXES
60static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
61  const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
62  result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
63  result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
64  if (result_ts->tv_nsec < 0) {
65    result_ts->tv_sec--;
66    result_ts->tv_nsec += one_sec;
67  } else if (result_ts->tv_nsec > one_sec) {
68    result_ts->tv_sec++;
69    result_ts->tv_nsec -= one_sec;
70  }
71  return result_ts->tv_sec < 0;
72}
73#endif
74
75class ScopedAllMutexesLock {
76 public:
77  explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
78    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
79      NanoSleep(100);
80    }
81  }
82  ~ScopedAllMutexesLock() {
83    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
84      NanoSleep(100);
85    }
86  }
87 private:
88  const BaseMutex* const mutex_;
89};
90
91BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
92  if (kLogLockContentions) {
93    ScopedAllMutexesLock mu(this);
94    std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
95    if (*all_mutexes_ptr == NULL) {
96      // We leak the global set of all mutexes to avoid ordering issues in global variable
97      // construction/destruction.
98      *all_mutexes_ptr = new std::set<BaseMutex*>();
99    }
100    (*all_mutexes_ptr)->insert(this);
101  }
102}
103
104BaseMutex::~BaseMutex() {
105  if (kLogLockContentions) {
106    ScopedAllMutexesLock mu(this);
107    gAllMutexData->all_mutexes->erase(this);
108  }
109}
110
111void BaseMutex::DumpAll(std::ostream& os) {
112  if (kLogLockContentions) {
113    os << "Mutex logging:\n";
114    ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
115    std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
116    if (all_mutexes == NULL) {
117      // No mutexes have been created yet during at startup.
118      return;
119    }
120    typedef std::set<BaseMutex*>::const_iterator It;
121    os << "(Contended)\n";
122    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
123      BaseMutex* mutex = *it;
124      if (mutex->HasEverContended()) {
125        mutex->Dump(os);
126        os << "\n";
127      }
128    }
129    os << "(Never contented)\n";
130    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
131      BaseMutex* mutex = *it;
132      if (!mutex->HasEverContended()) {
133        mutex->Dump(os);
134        os << "\n";
135      }
136    }
137  }
138}
139
140void BaseMutex::CheckSafeToWait(Thread* self) {
141  if (self == NULL) {
142    CheckUnattachedThread(level_);
143    return;
144  }
145  if (kDebugLocking) {
146    CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
147        << "Waiting on unacquired mutex: " << name_;
148    bool bad_mutexes_held = false;
149    for (int i = kLockLevelCount - 1; i >= 0; --i) {
150      if (i != level_) {
151        BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
152        if (held_mutex != NULL) {
153          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
154                     << "(level " << LockLevel(i) << ") while performing wait on "
155                     << "\"" << name_ << "\" (level " << level_ << ")";
156          bad_mutexes_held = true;
157        }
158      }
159    }
160    CHECK(!bad_mutexes_held);
161  }
162}
163
164void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
165  if (kLogLockContentions) {
166    // Atomically add value to wait_time.
167    wait_time.FetchAndAddSequentiallyConsistent(value);
168  }
169}
170
171void BaseMutex::RecordContention(uint64_t blocked_tid,
172                                 uint64_t owner_tid,
173                                 uint64_t nano_time_blocked) {
174  if (kLogLockContentions) {
175    ContentionLogData* data = contention_log_data_;
176    ++(data->contention_count);
177    data->AddToWaitTime(nano_time_blocked);
178    ContentionLogEntry* log = data->contention_log;
179    // This code is intentionally racy as it is only used for diagnostics.
180    uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
181    if (log[slot].blocked_tid == blocked_tid &&
182        log[slot].owner_tid == blocked_tid) {
183      ++log[slot].count;
184    } else {
185      uint32_t new_slot;
186      do {
187        slot = data->cur_content_log_entry.LoadRelaxed();
188        new_slot = (slot + 1) % kContentionLogSize;
189      } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
190      log[new_slot].blocked_tid = blocked_tid;
191      log[new_slot].owner_tid = owner_tid;
192      log[new_slot].count.StoreRelaxed(1);
193    }
194  }
195}
196
197void BaseMutex::DumpContention(std::ostream& os) const {
198  if (kLogLockContentions) {
199    const ContentionLogData* data = contention_log_data_;
200    const ContentionLogEntry* log = data->contention_log;
201    uint64_t wait_time = data->wait_time.LoadRelaxed();
202    uint32_t contention_count = data->contention_count.LoadRelaxed();
203    if (contention_count == 0) {
204      os << "never contended";
205    } else {
206      os << "contended " << contention_count
207         << " total wait of contender " << PrettyDuration(wait_time)
208         << " average " << PrettyDuration(wait_time / contention_count);
209      SafeMap<uint64_t, size_t> most_common_blocker;
210      SafeMap<uint64_t, size_t> most_common_blocked;
211      for (size_t i = 0; i < kContentionLogSize; ++i) {
212        uint64_t blocked_tid = log[i].blocked_tid;
213        uint64_t owner_tid = log[i].owner_tid;
214        uint32_t count = log[i].count.LoadRelaxed();
215        if (count > 0) {
216          auto it = most_common_blocked.find(blocked_tid);
217          if (it != most_common_blocked.end()) {
218            most_common_blocked.Overwrite(blocked_tid, it->second + count);
219          } else {
220            most_common_blocked.Put(blocked_tid, count);
221          }
222          it = most_common_blocker.find(owner_tid);
223          if (it != most_common_blocker.end()) {
224            most_common_blocker.Overwrite(owner_tid, it->second + count);
225          } else {
226            most_common_blocker.Put(owner_tid, count);
227          }
228        }
229      }
230      uint64_t max_tid = 0;
231      size_t max_tid_count = 0;
232      for (const auto& pair : most_common_blocked) {
233        if (pair.second > max_tid_count) {
234          max_tid = pair.first;
235          max_tid_count = pair.second;
236        }
237      }
238      if (max_tid != 0) {
239        os << " sample shows most blocked tid=" << max_tid;
240      }
241      max_tid = 0;
242      max_tid_count = 0;
243      for (const auto& pair : most_common_blocker) {
244        if (pair.second > max_tid_count) {
245          max_tid = pair.first;
246          max_tid_count = pair.second;
247        }
248      }
249      if (max_tid != 0) {
250        os << " sample shows tid=" << max_tid << " owning during this time";
251      }
252    }
253  }
254}
255
256
257Mutex::Mutex(const char* name, LockLevel level, bool recursive)
258    : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
259#if ART_USE_FUTEXES
260  DCHECK_EQ(0, state_.LoadRelaxed());
261  DCHECK_EQ(0, num_contenders_.LoadRelaxed());
262#else
263  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
264#endif
265  exclusive_owner_ = 0;
266}
267
268Mutex::~Mutex() {
269#if ART_USE_FUTEXES
270  if (state_.LoadRelaxed() != 0) {
271    Runtime* runtime = Runtime::Current();
272    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
273    LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
274  } else {
275    CHECK_EQ(exclusive_owner_, 0U)  << "unexpectedly found an owner on unlocked mutex " << name_;
276    CHECK_EQ(num_contenders_.LoadSequentiallyConsistent(), 0)
277        << "unexpectedly found a contender on mutex " << name_;
278  }
279#else
280  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
281  // may still be using locks.
282  int rc = pthread_mutex_destroy(&mutex_);
283  if (rc != 0) {
284    errno = rc;
285    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
286    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
287    Runtime* runtime = Runtime::Current();
288    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
289    PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
290  }
291#endif
292}
293
294void Mutex::ExclusiveLock(Thread* self) {
295  DCHECK(self == NULL || self == Thread::Current());
296  if (kDebugLocking && !recursive_) {
297    AssertNotHeld(self);
298  }
299  if (!recursive_ || !IsExclusiveHeld(self)) {
300#if ART_USE_FUTEXES
301    bool done = false;
302    do {
303      int32_t cur_state = state_.LoadRelaxed();
304      if (LIKELY(cur_state == 0)) {
305        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
306        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
307      } else {
308        // Failed to acquire, hang up.
309        ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
310        num_contenders_++;
311        if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
312          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
313          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
314          if ((errno != EAGAIN) && (errno != EINTR)) {
315            PLOG(FATAL) << "futex wait failed for " << name_;
316          }
317        }
318        num_contenders_--;
319      }
320    } while (!done);
321    DCHECK_EQ(state_.LoadRelaxed(), 1);
322#else
323    CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
324#endif
325    DCHECK_EQ(exclusive_owner_, 0U);
326    exclusive_owner_ = SafeGetTid(self);
327    RegisterAsLocked(self);
328  }
329  recursion_count_++;
330  if (kDebugLocking) {
331    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
332        << name_ << " " << recursion_count_;
333    AssertHeld(self);
334  }
335}
336
337bool Mutex::ExclusiveTryLock(Thread* self) {
338  DCHECK(self == NULL || self == Thread::Current());
339  if (kDebugLocking && !recursive_) {
340    AssertNotHeld(self);
341  }
342  if (!recursive_ || !IsExclusiveHeld(self)) {
343#if ART_USE_FUTEXES
344    bool done = false;
345    do {
346      int32_t cur_state = state_.LoadRelaxed();
347      if (cur_state == 0) {
348        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
349        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
350      } else {
351        return false;
352      }
353    } while (!done);
354    DCHECK_EQ(state_.LoadRelaxed(), 1);
355#else
356    int result = pthread_mutex_trylock(&mutex_);
357    if (result == EBUSY) {
358      return false;
359    }
360    if (result != 0) {
361      errno = result;
362      PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
363    }
364#endif
365    DCHECK_EQ(exclusive_owner_, 0U);
366    exclusive_owner_ = SafeGetTid(self);
367    RegisterAsLocked(self);
368  }
369  recursion_count_++;
370  if (kDebugLocking) {
371    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
372        << name_ << " " << recursion_count_;
373    AssertHeld(self);
374  }
375  return true;
376}
377
378void Mutex::ExclusiveUnlock(Thread* self) {
379  DCHECK(self == NULL || self == Thread::Current());
380  AssertHeld(self);
381  DCHECK_NE(exclusive_owner_, 0U);
382  recursion_count_--;
383  if (!recursive_ || recursion_count_ == 0) {
384    if (kDebugLocking) {
385      CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
386          << name_ << " " << recursion_count_;
387    }
388    RegisterAsUnlocked(self);
389#if ART_USE_FUTEXES
390    bool done = false;
391    do {
392      int32_t cur_state = state_.LoadRelaxed();
393      if (LIKELY(cur_state == 1)) {
394        // We're no longer the owner.
395        exclusive_owner_ = 0;
396        // Change state to 0 and impose load/store ordering appropriate for lock release.
397        // Note, the relaxed loads below musn't reorder before the CompareExchange.
398        // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
399        // a status bit into the state on contention.
400        done =  state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */);
401        if (LIKELY(done)) {  // Spurious fail?
402          // Wake a contender.
403          if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
404            futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
405          }
406        }
407      } else {
408        // Logging acquires the logging lock, avoid infinite recursion in that case.
409        if (this != Locks::logging_lock_) {
410          LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
411        } else {
412          LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
413          LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
414                                                 cur_state, name_).c_str());
415          _exit(1);
416        }
417      }
418    } while (!done);
419#else
420    exclusive_owner_ = 0;
421    CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
422#endif
423  }
424}
425
426void Mutex::Dump(std::ostream& os) const {
427  os << (recursive_ ? "recursive " : "non-recursive ")
428      << name_
429      << " level=" << static_cast<int>(level_)
430      << " rec=" << recursion_count_
431      << " owner=" << GetExclusiveOwnerTid() << " ";
432  DumpContention(os);
433}
434
435std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
436  mu.Dump(os);
437  return os;
438}
439
440ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
441    : BaseMutex(name, level)
442#if ART_USE_FUTEXES
443    , state_(0), num_pending_readers_(0), num_pending_writers_(0)
444#endif
445{  // NOLINT(whitespace/braces)
446#if !ART_USE_FUTEXES
447  CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
448#endif
449  exclusive_owner_ = 0;
450}
451
452ReaderWriterMutex::~ReaderWriterMutex() {
453#if ART_USE_FUTEXES
454  CHECK_EQ(state_.LoadRelaxed(), 0);
455  CHECK_EQ(exclusive_owner_, 0U);
456  CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0);
457  CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
458#else
459  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
460  // may still be using locks.
461  int rc = pthread_rwlock_destroy(&rwlock_);
462  if (rc != 0) {
463    errno = rc;
464    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
465    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
466    Runtime* runtime = Runtime::Current();
467    bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
468    PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
469  }
470#endif
471}
472
473void ReaderWriterMutex::ExclusiveLock(Thread* self) {
474  DCHECK(self == NULL || self == Thread::Current());
475  AssertNotExclusiveHeld(self);
476#if ART_USE_FUTEXES
477  bool done = false;
478  do {
479    int32_t cur_state = state_.LoadRelaxed();
480    if (LIKELY(cur_state == 0)) {
481      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
482      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */);
483    } else {
484      // Failed to acquire, hang up.
485      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
486      ++num_pending_writers_;
487      if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
488        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
489        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
490        if ((errno != EAGAIN) && (errno != EINTR)) {
491          PLOG(FATAL) << "futex wait failed for " << name_;
492        }
493      }
494      --num_pending_writers_;
495    }
496  } while (!done);
497  DCHECK_EQ(state_.LoadRelaxed(), -1);
498#else
499  CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
500#endif
501  DCHECK_EQ(exclusive_owner_, 0U);
502  exclusive_owner_ = SafeGetTid(self);
503  RegisterAsLocked(self);
504  AssertExclusiveHeld(self);
505}
506
507void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
508  DCHECK(self == NULL || self == Thread::Current());
509  AssertExclusiveHeld(self);
510  RegisterAsUnlocked(self);
511  DCHECK_NE(exclusive_owner_, 0U);
512#if ART_USE_FUTEXES
513  bool done = false;
514  do {
515    int32_t cur_state = state_.LoadRelaxed();
516    if (LIKELY(cur_state == -1)) {
517      // We're no longer the owner.
518      exclusive_owner_ = 0;
519      // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
520      // Note, the relaxed loads below musn't reorder before the CompareExchange.
521      // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
522      // a status bit into the state on contention.
523      done =  state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
524      if (LIKELY(done)) {  // Weak CAS may fail spuriously.
525        // Wake any waiters.
526        if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
527                     num_pending_writers_.LoadRelaxed() > 0)) {
528          futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
529        }
530      }
531    } else {
532      LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
533    }
534  } while (!done);
535#else
536  exclusive_owner_ = 0;
537  CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
538#endif
539}
540
541#if HAVE_TIMED_RWLOCK
542bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
543  DCHECK(self == NULL || self == Thread::Current());
544#if ART_USE_FUTEXES
545  bool done = false;
546  timespec end_abs_ts;
547  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts);
548  do {
549    int32_t cur_state = state_.LoadRelaxed();
550    if (cur_state == 0) {
551      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
552      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */);
553    } else {
554      // Failed to acquire, hang up.
555      timespec now_abs_ts;
556      InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts);
557      timespec rel_ts;
558      if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
559        return false;  // Timed out.
560      }
561      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
562      ++num_pending_writers_;
563      if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
564        if (errno == ETIMEDOUT) {
565          --num_pending_writers_;
566          return false;  // Timed out.
567        } else if ((errno != EAGAIN) && (errno != EINTR)) {
568          // EAGAIN and EINTR both indicate a spurious failure,
569          // recompute the relative time out from now and try again.
570          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
571          PLOG(FATAL) << "timed futex wait failed for " << name_;
572        }
573      }
574      --num_pending_writers_;
575    }
576  } while (!done);
577#else
578  timespec ts;
579  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
580  int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
581  if (result == ETIMEDOUT) {
582    return false;
583  }
584  if (result != 0) {
585    errno = result;
586    PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
587  }
588#endif
589  exclusive_owner_ = SafeGetTid(self);
590  RegisterAsLocked(self);
591  AssertSharedHeld(self);
592  return true;
593}
594#endif
595
596bool ReaderWriterMutex::SharedTryLock(Thread* self) {
597  DCHECK(self == NULL || self == Thread::Current());
598#if ART_USE_FUTEXES
599  bool done = false;
600  do {
601    int32_t cur_state = state_.LoadRelaxed();
602    if (cur_state >= 0) {
603      // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
604      done =  state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
605    } else {
606      // Owner holds it exclusively.
607      return false;
608    }
609  } while (!done);
610#else
611  int result = pthread_rwlock_tryrdlock(&rwlock_);
612  if (result == EBUSY) {
613    return false;
614  }
615  if (result != 0) {
616    errno = result;
617    PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
618  }
619#endif
620  RegisterAsLocked(self);
621  AssertSharedHeld(self);
622  return true;
623}
624
625bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
626  DCHECK(self == NULL || self == Thread::Current());
627  bool result;
628  if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
629    result = IsExclusiveHeld(self);  // TODO: a better best effort here.
630  } else {
631    result = (self->GetHeldMutex(level_) == this);
632  }
633  return result;
634}
635
636void ReaderWriterMutex::Dump(std::ostream& os) const {
637  os << name_
638      << " level=" << static_cast<int>(level_)
639      << " owner=" << GetExclusiveOwnerTid() << " ";
640  DumpContention(os);
641}
642
643std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
644  mu.Dump(os);
645  return os;
646}
647
648ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
649    : name_(name), guard_(guard) {
650#if ART_USE_FUTEXES
651  DCHECK_EQ(0, sequence_.LoadRelaxed());
652  num_waiters_ = 0;
653#else
654  pthread_condattr_t cond_attrs;
655  CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
656#if !defined(__APPLE__)
657  // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
658  CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
659#endif
660  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
661#endif
662}
663
664ConditionVariable::~ConditionVariable() {
665#if ART_USE_FUTEXES
666  if (num_waiters_!= 0) {
667    Runtime* runtime = Runtime::Current();
668    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
669    LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
670        << " called with " << num_waiters_ << " waiters.";
671  }
672#else
673  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
674  // may still be using condition variables.
675  int rc = pthread_cond_destroy(&cond_);
676  if (rc != 0) {
677    errno = rc;
678    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
679    Runtime* runtime = Runtime::Current();
680    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
681    PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
682  }
683#endif
684}
685
686void ConditionVariable::Broadcast(Thread* self) {
687  DCHECK(self == NULL || self == Thread::Current());
688  // TODO: enable below, there's a race in thread creation that causes false failures currently.
689  // guard_.AssertExclusiveHeld(self);
690  DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
691#if ART_USE_FUTEXES
692  if (num_waiters_ > 0) {
693    sequence_++;  // Indicate the broadcast occurred.
694    bool done = false;
695    do {
696      int32_t cur_sequence = sequence_.LoadRelaxed();
697      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
698      // mutex unlocks will awaken the requeued waiter thread.
699      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
700                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
701                   guard_.state_.Address(), cur_sequence) != -1;
702      if (!done) {
703        if (errno != EAGAIN) {
704          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
705        }
706      }
707    } while (!done);
708  }
709#else
710  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
711#endif
712}
713
714void ConditionVariable::Signal(Thread* self) {
715  DCHECK(self == NULL || self == Thread::Current());
716  guard_.AssertExclusiveHeld(self);
717#if ART_USE_FUTEXES
718  if (num_waiters_ > 0) {
719    sequence_++;  // Indicate a signal occurred.
720    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
721    // to avoid this, however, requeueing can only move all waiters.
722    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
723    // Check something was woken or else we changed sequence_ before they had chance to wait.
724    CHECK((num_woken == 0) || (num_woken == 1));
725  }
726#else
727  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
728#endif
729}
730
731void ConditionVariable::Wait(Thread* self) {
732  guard_.CheckSafeToWait(self);
733  WaitHoldingLocks(self);
734}
735
736void ConditionVariable::WaitHoldingLocks(Thread* self) {
737  DCHECK(self == NULL || self == Thread::Current());
738  guard_.AssertExclusiveHeld(self);
739  unsigned int old_recursion_count = guard_.recursion_count_;
740#if ART_USE_FUTEXES
741  num_waiters_++;
742  // Ensure the Mutex is contended so that requeued threads are awoken.
743  guard_.num_contenders_++;
744  guard_.recursion_count_ = 1;
745  int32_t cur_sequence = sequence_.LoadRelaxed();
746  guard_.ExclusiveUnlock(self);
747  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
748    // Futex failed, check it is an expected error.
749    // EAGAIN == EWOULDBLK, so we let the caller try again.
750    // EINTR implies a signal was sent to this thread.
751    if ((errno != EINTR) && (errno != EAGAIN)) {
752      PLOG(FATAL) << "futex wait failed for " << name_;
753    }
754  }
755  guard_.ExclusiveLock(self);
756  CHECK_GE(num_waiters_, 0);
757  num_waiters_--;
758  // We awoke and so no longer require awakes from the guard_'s unlock.
759  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
760  guard_.num_contenders_--;
761#else
762  uint64_t old_owner = guard_.exclusive_owner_;
763  guard_.exclusive_owner_ = 0;
764  guard_.recursion_count_ = 0;
765  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
766  guard_.exclusive_owner_ = old_owner;
767#endif
768  guard_.recursion_count_ = old_recursion_count;
769}
770
771void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
772  DCHECK(self == NULL || self == Thread::Current());
773  guard_.AssertExclusiveHeld(self);
774  guard_.CheckSafeToWait(self);
775  unsigned int old_recursion_count = guard_.recursion_count_;
776#if ART_USE_FUTEXES
777  timespec rel_ts;
778  InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
779  num_waiters_++;
780  // Ensure the Mutex is contended so that requeued threads are awoken.
781  guard_.num_contenders_++;
782  guard_.recursion_count_ = 1;
783  int32_t cur_sequence = sequence_.LoadRelaxed();
784  guard_.ExclusiveUnlock(self);
785  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
786    if (errno == ETIMEDOUT) {
787      // Timed out we're done.
788    } else if ((errno == EAGAIN) || (errno == EINTR)) {
789      // A signal or ConditionVariable::Signal/Broadcast has come in.
790    } else {
791      PLOG(FATAL) << "timed futex wait failed for " << name_;
792    }
793  }
794  guard_.ExclusiveLock(self);
795  CHECK_GE(num_waiters_, 0);
796  num_waiters_--;
797  // We awoke and so no longer require awakes from the guard_'s unlock.
798  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
799  guard_.num_contenders_--;
800#else
801#if !defined(__APPLE__)
802  int clock = CLOCK_MONOTONIC;
803#else
804  int clock = CLOCK_REALTIME;
805#endif
806  uint64_t old_owner = guard_.exclusive_owner_;
807  guard_.exclusive_owner_ = 0;
808  guard_.recursion_count_ = 0;
809  timespec ts;
810  InitTimeSpec(true, clock, ms, ns, &ts);
811  int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
812  if (rc != 0 && rc != ETIMEDOUT) {
813    errno = rc;
814    PLOG(FATAL) << "TimedWait failed for " << name_;
815  }
816  guard_.exclusive_owner_ = old_owner;
817#endif
818  guard_.recursion_count_ = old_recursion_count;
819}
820
821void Locks::Init() {
822  if (logging_lock_ != nullptr) {
823    // Already initialized.
824    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
825      DCHECK(modify_ldt_lock_ != nullptr);
826    } else {
827      DCHECK(modify_ldt_lock_ == nullptr);
828    }
829    DCHECK(abort_lock_ != nullptr);
830    DCHECK(allocated_monitor_ids_lock_ != nullptr);
831    DCHECK(allocated_thread_ids_lock_ != nullptr);
832    DCHECK(breakpoint_lock_ != nullptr);
833    DCHECK(classlinker_classes_lock_ != nullptr);
834    DCHECK(heap_bitmap_lock_ != nullptr);
835    DCHECK(logging_lock_ != nullptr);
836    DCHECK(mutator_lock_ != nullptr);
837    DCHECK(thread_list_lock_ != nullptr);
838    DCHECK(thread_suspend_count_lock_ != nullptr);
839    DCHECK(trace_lock_ != nullptr);
840    DCHECK(profiler_lock_ != nullptr);
841    DCHECK(unexpected_signal_lock_ != nullptr);
842    DCHECK(intern_table_lock_ != nullptr);
843  } else {
844    // Create global locks in level order from highest lock level to lowest.
845    LockLevel current_lock_level = kMutatorLock;
846    DCHECK(mutator_lock_ == nullptr);
847    mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
848
849    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
850        DCHECK_LT(new_level, current_lock_level); \
851        current_lock_level = new_level;
852
853    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
854    DCHECK(heap_bitmap_lock_ == nullptr);
855    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
856
857    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
858    DCHECK(runtime_shutdown_lock_ == nullptr);
859    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
860
861    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
862    DCHECK(profiler_lock_ == nullptr);
863    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
864
865    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
866    DCHECK(trace_lock_ == nullptr);
867    trace_lock_ = new Mutex("trace lock", current_lock_level);
868
869    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
870    DCHECK(thread_list_lock_ == nullptr);
871    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
872
873    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
874    DCHECK(breakpoint_lock_ == nullptr);
875    breakpoint_lock_ = new Mutex("breakpoint lock", current_lock_level);
876
877    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
878    DCHECK(classlinker_classes_lock_ == nullptr);
879    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
880                                                      current_lock_level);
881
882    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
883    DCHECK(allocated_monitor_ids_lock_ == nullptr);
884    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
885
886    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
887    DCHECK(allocated_thread_ids_lock_ == nullptr);
888    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
889
890    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
891      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
892      DCHECK(modify_ldt_lock_ == nullptr);
893      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
894    }
895
896    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
897    DCHECK(intern_table_lock_ == nullptr);
898    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
899
900
901    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
902    DCHECK(abort_lock_ == nullptr);
903    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
904
905    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
906    DCHECK(thread_suspend_count_lock_ == nullptr);
907    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
908
909    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
910    DCHECK(unexpected_signal_lock_ == nullptr);
911    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
912
913    UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
914    DCHECK(mem_maps_lock_ == nullptr);
915    mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
916
917    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
918    DCHECK(logging_lock_ == nullptr);
919    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
920
921    #undef UPDATE_CURRENT_LOCK_LEVEL
922  }
923}
924
925
926}  // namespace art
927