mutex.cc revision f4cb036808b88fe60d71a705b2744284155cbc01
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mutex.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#include "atomic.h"
23#include "base/logging.h"
24#include "mutex-inl.h"
25#include "runtime.h"
26#include "scoped_thread_state_change.h"
27#include "thread-inl.h"
28#include "utils.h"
29
30namespace art {
31
32Mutex* Locks::abort_lock_ = nullptr;
33Mutex* Locks::alloc_tracker_lock_ = nullptr;
34Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
35Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
36ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
37ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
38Mutex* Locks::deoptimization_lock_ = nullptr;
39ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
40Mutex* Locks::logging_lock_ = nullptr;
41Mutex* Locks::mem_maps_lock_ = nullptr;
42Mutex* Locks::modify_ldt_lock_ = nullptr;
43ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
44Mutex* Locks::profiler_lock_ = nullptr;
45Mutex* Locks::runtime_shutdown_lock_ = nullptr;
46Mutex* Locks::thread_list_lock_ = nullptr;
47Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
48Mutex* Locks::thread_suspend_count_lock_ = nullptr;
49Mutex* Locks::trace_lock_ = nullptr;
50Mutex* Locks::unexpected_signal_lock_ = nullptr;
51Mutex* Locks::intern_table_lock_ = nullptr;
52
53struct AllMutexData {
54  // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
55  Atomic<const BaseMutex*> all_mutexes_guard;
56  // All created mutexes guarded by all_mutexes_guard_.
57  std::set<BaseMutex*>* all_mutexes;
58  AllMutexData() : all_mutexes(NULL) {}
59};
60static struct AllMutexData gAllMutexData[kAllMutexDataSize];
61
62#if ART_USE_FUTEXES
63static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
64  const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
65  result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
66  result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
67  if (result_ts->tv_nsec < 0) {
68    result_ts->tv_sec--;
69    result_ts->tv_nsec += one_sec;
70  } else if (result_ts->tv_nsec > one_sec) {
71    result_ts->tv_sec++;
72    result_ts->tv_nsec -= one_sec;
73  }
74  return result_ts->tv_sec < 0;
75}
76#endif
77
78class ScopedAllMutexesLock {
79 public:
80  explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
81    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
82      NanoSleep(100);
83    }
84  }
85  ~ScopedAllMutexesLock() {
86    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
87      NanoSleep(100);
88    }
89  }
90 private:
91  const BaseMutex* const mutex_;
92};
93
94BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
95  if (kLogLockContentions) {
96    ScopedAllMutexesLock mu(this);
97    std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
98    if (*all_mutexes_ptr == NULL) {
99      // We leak the global set of all mutexes to avoid ordering issues in global variable
100      // construction/destruction.
101      *all_mutexes_ptr = new std::set<BaseMutex*>();
102    }
103    (*all_mutexes_ptr)->insert(this);
104  }
105}
106
107BaseMutex::~BaseMutex() {
108  if (kLogLockContentions) {
109    ScopedAllMutexesLock mu(this);
110    gAllMutexData->all_mutexes->erase(this);
111  }
112}
113
114void BaseMutex::DumpAll(std::ostream& os) {
115  if (kLogLockContentions) {
116    os << "Mutex logging:\n";
117    ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
118    std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
119    if (all_mutexes == NULL) {
120      // No mutexes have been created yet during at startup.
121      return;
122    }
123    typedef std::set<BaseMutex*>::const_iterator It;
124    os << "(Contended)\n";
125    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
126      BaseMutex* mutex = *it;
127      if (mutex->HasEverContended()) {
128        mutex->Dump(os);
129        os << "\n";
130      }
131    }
132    os << "(Never contented)\n";
133    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
134      BaseMutex* mutex = *it;
135      if (!mutex->HasEverContended()) {
136        mutex->Dump(os);
137        os << "\n";
138      }
139    }
140  }
141}
142
143void BaseMutex::CheckSafeToWait(Thread* self) {
144  if (self == NULL) {
145    CheckUnattachedThread(level_);
146    return;
147  }
148  if (kDebugLocking) {
149    CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
150        << "Waiting on unacquired mutex: " << name_;
151    bool bad_mutexes_held = false;
152    for (int i = kLockLevelCount - 1; i >= 0; --i) {
153      if (i != level_) {
154        BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
155        // We expect waits to happen while holding the thread list suspend thread lock.
156        if (held_mutex != NULL && i != kThreadListSuspendThreadLock) {
157          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
158                     << "(level " << LockLevel(i) << ") while performing wait on "
159                     << "\"" << name_ << "\" (level " << level_ << ")";
160          bad_mutexes_held = true;
161        }
162      }
163    }
164    CHECK(!bad_mutexes_held);
165  }
166}
167
168void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
169  if (kLogLockContentions) {
170    // Atomically add value to wait_time.
171    wait_time.FetchAndAddSequentiallyConsistent(value);
172  }
173}
174
175void BaseMutex::RecordContention(uint64_t blocked_tid,
176                                 uint64_t owner_tid,
177                                 uint64_t nano_time_blocked) {
178  if (kLogLockContentions) {
179    ContentionLogData* data = contention_log_data_;
180    ++(data->contention_count);
181    data->AddToWaitTime(nano_time_blocked);
182    ContentionLogEntry* log = data->contention_log;
183    // This code is intentionally racy as it is only used for diagnostics.
184    uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
185    if (log[slot].blocked_tid == blocked_tid &&
186        log[slot].owner_tid == blocked_tid) {
187      ++log[slot].count;
188    } else {
189      uint32_t new_slot;
190      do {
191        slot = data->cur_content_log_entry.LoadRelaxed();
192        new_slot = (slot + 1) % kContentionLogSize;
193      } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
194      log[new_slot].blocked_tid = blocked_tid;
195      log[new_slot].owner_tid = owner_tid;
196      log[new_slot].count.StoreRelaxed(1);
197    }
198  }
199}
200
201void BaseMutex::DumpContention(std::ostream& os) const {
202  if (kLogLockContentions) {
203    const ContentionLogData* data = contention_log_data_;
204    const ContentionLogEntry* log = data->contention_log;
205    uint64_t wait_time = data->wait_time.LoadRelaxed();
206    uint32_t contention_count = data->contention_count.LoadRelaxed();
207    if (contention_count == 0) {
208      os << "never contended";
209    } else {
210      os << "contended " << contention_count
211         << " total wait of contender " << PrettyDuration(wait_time)
212         << " average " << PrettyDuration(wait_time / contention_count);
213      SafeMap<uint64_t, size_t> most_common_blocker;
214      SafeMap<uint64_t, size_t> most_common_blocked;
215      for (size_t i = 0; i < kContentionLogSize; ++i) {
216        uint64_t blocked_tid = log[i].blocked_tid;
217        uint64_t owner_tid = log[i].owner_tid;
218        uint32_t count = log[i].count.LoadRelaxed();
219        if (count > 0) {
220          auto it = most_common_blocked.find(blocked_tid);
221          if (it != most_common_blocked.end()) {
222            most_common_blocked.Overwrite(blocked_tid, it->second + count);
223          } else {
224            most_common_blocked.Put(blocked_tid, count);
225          }
226          it = most_common_blocker.find(owner_tid);
227          if (it != most_common_blocker.end()) {
228            most_common_blocker.Overwrite(owner_tid, it->second + count);
229          } else {
230            most_common_blocker.Put(owner_tid, count);
231          }
232        }
233      }
234      uint64_t max_tid = 0;
235      size_t max_tid_count = 0;
236      for (const auto& pair : most_common_blocked) {
237        if (pair.second > max_tid_count) {
238          max_tid = pair.first;
239          max_tid_count = pair.second;
240        }
241      }
242      if (max_tid != 0) {
243        os << " sample shows most blocked tid=" << max_tid;
244      }
245      max_tid = 0;
246      max_tid_count = 0;
247      for (const auto& pair : most_common_blocker) {
248        if (pair.second > max_tid_count) {
249          max_tid = pair.first;
250          max_tid_count = pair.second;
251        }
252      }
253      if (max_tid != 0) {
254        os << " sample shows tid=" << max_tid << " owning during this time";
255      }
256    }
257  }
258}
259
260
261Mutex::Mutex(const char* name, LockLevel level, bool recursive)
262    : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
263#if ART_USE_FUTEXES
264  DCHECK_EQ(0, state_.LoadRelaxed());
265  DCHECK_EQ(0, num_contenders_.LoadRelaxed());
266#else
267  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
268#endif
269  exclusive_owner_ = 0;
270}
271
272Mutex::~Mutex() {
273#if ART_USE_FUTEXES
274  if (state_.LoadRelaxed() != 0) {
275    Runtime* runtime = Runtime::Current();
276    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
277    LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
278  } else {
279    CHECK_EQ(exclusive_owner_, 0U)  << "unexpectedly found an owner on unlocked mutex " << name_;
280    CHECK_EQ(num_contenders_.LoadSequentiallyConsistent(), 0)
281        << "unexpectedly found a contender on mutex " << name_;
282  }
283#else
284  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
285  // may still be using locks.
286  int rc = pthread_mutex_destroy(&mutex_);
287  if (rc != 0) {
288    errno = rc;
289    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
290    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
291    Runtime* runtime = Runtime::Current();
292    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
293    PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
294  }
295#endif
296}
297
298void Mutex::ExclusiveLock(Thread* self) {
299  DCHECK(self == NULL || self == Thread::Current());
300  if (kDebugLocking && !recursive_) {
301    AssertNotHeld(self);
302  }
303  if (!recursive_ || !IsExclusiveHeld(self)) {
304#if ART_USE_FUTEXES
305    bool done = false;
306    do {
307      int32_t cur_state = state_.LoadRelaxed();
308      if (LIKELY(cur_state == 0)) {
309        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
310        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
311      } else {
312        // Failed to acquire, hang up.
313        ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
314        num_contenders_++;
315        if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
316          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
317          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
318          if ((errno != EAGAIN) && (errno != EINTR)) {
319            PLOG(FATAL) << "futex wait failed for " << name_;
320          }
321        }
322        num_contenders_--;
323      }
324    } while (!done);
325    DCHECK_EQ(state_.LoadRelaxed(), 1);
326#else
327    CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
328#endif
329    DCHECK_EQ(exclusive_owner_, 0U);
330    exclusive_owner_ = SafeGetTid(self);
331    RegisterAsLocked(self);
332  }
333  recursion_count_++;
334  if (kDebugLocking) {
335    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
336        << name_ << " " << recursion_count_;
337    AssertHeld(self);
338  }
339}
340
341bool Mutex::ExclusiveTryLock(Thread* self) {
342  DCHECK(self == NULL || self == Thread::Current());
343  if (kDebugLocking && !recursive_) {
344    AssertNotHeld(self);
345  }
346  if (!recursive_ || !IsExclusiveHeld(self)) {
347#if ART_USE_FUTEXES
348    bool done = false;
349    do {
350      int32_t cur_state = state_.LoadRelaxed();
351      if (cur_state == 0) {
352        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
353        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
354      } else {
355        return false;
356      }
357    } while (!done);
358    DCHECK_EQ(state_.LoadRelaxed(), 1);
359#else
360    int result = pthread_mutex_trylock(&mutex_);
361    if (result == EBUSY) {
362      return false;
363    }
364    if (result != 0) {
365      errno = result;
366      PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
367    }
368#endif
369    DCHECK_EQ(exclusive_owner_, 0U);
370    exclusive_owner_ = SafeGetTid(self);
371    RegisterAsLocked(self);
372  }
373  recursion_count_++;
374  if (kDebugLocking) {
375    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
376        << name_ << " " << recursion_count_;
377    AssertHeld(self);
378  }
379  return true;
380}
381
382void Mutex::ExclusiveUnlock(Thread* self) {
383  DCHECK(self == NULL || self == Thread::Current());
384  AssertHeld(self);
385  DCHECK_NE(exclusive_owner_, 0U);
386  recursion_count_--;
387  if (!recursive_ || recursion_count_ == 0) {
388    if (kDebugLocking) {
389      CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
390          << name_ << " " << recursion_count_;
391    }
392    RegisterAsUnlocked(self);
393#if ART_USE_FUTEXES
394    bool done = false;
395    do {
396      int32_t cur_state = state_.LoadRelaxed();
397      if (LIKELY(cur_state == 1)) {
398        // We're no longer the owner.
399        exclusive_owner_ = 0;
400        // Change state to 0 and impose load/store ordering appropriate for lock release.
401        // Note, the relaxed loads below musn't reorder before the CompareExchange.
402        // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
403        // a status bit into the state on contention.
404        done =  state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */);
405        if (LIKELY(done)) {  // Spurious fail?
406          // Wake a contender.
407          if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
408            futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
409          }
410        }
411      } else {
412        // Logging acquires the logging lock, avoid infinite recursion in that case.
413        if (this != Locks::logging_lock_) {
414          LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
415        } else {
416          LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
417          LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
418                                                 cur_state, name_).c_str());
419          _exit(1);
420        }
421      }
422    } while (!done);
423#else
424    exclusive_owner_ = 0;
425    CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
426#endif
427  }
428}
429
430void Mutex::Dump(std::ostream& os) const {
431  os << (recursive_ ? "recursive " : "non-recursive ")
432      << name_
433      << " level=" << static_cast<int>(level_)
434      << " rec=" << recursion_count_
435      << " owner=" << GetExclusiveOwnerTid() << " ";
436  DumpContention(os);
437}
438
439std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
440  mu.Dump(os);
441  return os;
442}
443
444ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
445    : BaseMutex(name, level)
446#if ART_USE_FUTEXES
447    , state_(0), num_pending_readers_(0), num_pending_writers_(0)
448#endif
449{  // NOLINT(whitespace/braces)
450#if !ART_USE_FUTEXES
451  CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
452#endif
453  exclusive_owner_ = 0;
454}
455
456ReaderWriterMutex::~ReaderWriterMutex() {
457#if ART_USE_FUTEXES
458  CHECK_EQ(state_.LoadRelaxed(), 0);
459  CHECK_EQ(exclusive_owner_, 0U);
460  CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0);
461  CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
462#else
463  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
464  // may still be using locks.
465  int rc = pthread_rwlock_destroy(&rwlock_);
466  if (rc != 0) {
467    errno = rc;
468    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
469    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
470    Runtime* runtime = Runtime::Current();
471    bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
472    PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
473  }
474#endif
475}
476
477void ReaderWriterMutex::ExclusiveLock(Thread* self) {
478  DCHECK(self == NULL || self == Thread::Current());
479  AssertNotExclusiveHeld(self);
480#if ART_USE_FUTEXES
481  bool done = false;
482  do {
483    int32_t cur_state = state_.LoadRelaxed();
484    if (LIKELY(cur_state == 0)) {
485      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
486      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */);
487    } else {
488      // Failed to acquire, hang up.
489      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
490      ++num_pending_writers_;
491      if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
492        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
493        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
494        if ((errno != EAGAIN) && (errno != EINTR)) {
495          PLOG(FATAL) << "futex wait failed for " << name_;
496        }
497      }
498      --num_pending_writers_;
499    }
500  } while (!done);
501  DCHECK_EQ(state_.LoadRelaxed(), -1);
502#else
503  CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
504#endif
505  DCHECK_EQ(exclusive_owner_, 0U);
506  exclusive_owner_ = SafeGetTid(self);
507  RegisterAsLocked(self);
508  AssertExclusiveHeld(self);
509}
510
511void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
512  DCHECK(self == NULL || self == Thread::Current());
513  AssertExclusiveHeld(self);
514  RegisterAsUnlocked(self);
515  DCHECK_NE(exclusive_owner_, 0U);
516#if ART_USE_FUTEXES
517  bool done = false;
518  do {
519    int32_t cur_state = state_.LoadRelaxed();
520    if (LIKELY(cur_state == -1)) {
521      // We're no longer the owner.
522      exclusive_owner_ = 0;
523      // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
524      // Note, the relaxed loads below musn't reorder before the CompareExchange.
525      // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
526      // a status bit into the state on contention.
527      done =  state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
528      if (LIKELY(done)) {  // Weak CAS may fail spuriously.
529        // Wake any waiters.
530        if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
531                     num_pending_writers_.LoadRelaxed() > 0)) {
532          futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
533        }
534      }
535    } else {
536      LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
537    }
538  } while (!done);
539#else
540  exclusive_owner_ = 0;
541  CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
542#endif
543}
544
545#if HAVE_TIMED_RWLOCK
546bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
547  DCHECK(self == NULL || self == Thread::Current());
548#if ART_USE_FUTEXES
549  bool done = false;
550  timespec end_abs_ts;
551  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts);
552  do {
553    int32_t cur_state = state_.LoadRelaxed();
554    if (cur_state == 0) {
555      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
556      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */);
557    } else {
558      // Failed to acquire, hang up.
559      timespec now_abs_ts;
560      InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts);
561      timespec rel_ts;
562      if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
563        return false;  // Timed out.
564      }
565      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
566      ++num_pending_writers_;
567      if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
568        if (errno == ETIMEDOUT) {
569          --num_pending_writers_;
570          return false;  // Timed out.
571        } else if ((errno != EAGAIN) && (errno != EINTR)) {
572          // EAGAIN and EINTR both indicate a spurious failure,
573          // recompute the relative time out from now and try again.
574          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
575          PLOG(FATAL) << "timed futex wait failed for " << name_;
576        }
577      }
578      --num_pending_writers_;
579    }
580  } while (!done);
581#else
582  timespec ts;
583  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
584  int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
585  if (result == ETIMEDOUT) {
586    return false;
587  }
588  if (result != 0) {
589    errno = result;
590    PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
591  }
592#endif
593  exclusive_owner_ = SafeGetTid(self);
594  RegisterAsLocked(self);
595  AssertSharedHeld(self);
596  return true;
597}
598#endif
599
600bool ReaderWriterMutex::SharedTryLock(Thread* self) {
601  DCHECK(self == NULL || self == Thread::Current());
602#if ART_USE_FUTEXES
603  bool done = false;
604  do {
605    int32_t cur_state = state_.LoadRelaxed();
606    if (cur_state >= 0) {
607      // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
608      done =  state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
609    } else {
610      // Owner holds it exclusively.
611      return false;
612    }
613  } while (!done);
614#else
615  int result = pthread_rwlock_tryrdlock(&rwlock_);
616  if (result == EBUSY) {
617    return false;
618  }
619  if (result != 0) {
620    errno = result;
621    PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
622  }
623#endif
624  RegisterAsLocked(self);
625  AssertSharedHeld(self);
626  return true;
627}
628
629bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
630  DCHECK(self == NULL || self == Thread::Current());
631  bool result;
632  if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
633    result = IsExclusiveHeld(self);  // TODO: a better best effort here.
634  } else {
635    result = (self->GetHeldMutex(level_) == this);
636  }
637  return result;
638}
639
640void ReaderWriterMutex::Dump(std::ostream& os) const {
641  os << name_
642      << " level=" << static_cast<int>(level_)
643      << " owner=" << GetExclusiveOwnerTid() << " ";
644  DumpContention(os);
645}
646
647std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
648  mu.Dump(os);
649  return os;
650}
651
652ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
653    : name_(name), guard_(guard) {
654#if ART_USE_FUTEXES
655  DCHECK_EQ(0, sequence_.LoadRelaxed());
656  num_waiters_ = 0;
657#else
658  pthread_condattr_t cond_attrs;
659  CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
660#if !defined(__APPLE__)
661  // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
662  CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
663#endif
664  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
665#endif
666}
667
668ConditionVariable::~ConditionVariable() {
669#if ART_USE_FUTEXES
670  if (num_waiters_!= 0) {
671    Runtime* runtime = Runtime::Current();
672    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
673    LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
674        << " called with " << num_waiters_ << " waiters.";
675  }
676#else
677  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
678  // may still be using condition variables.
679  int rc = pthread_cond_destroy(&cond_);
680  if (rc != 0) {
681    errno = rc;
682    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
683    Runtime* runtime = Runtime::Current();
684    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
685    PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
686  }
687#endif
688}
689
690void ConditionVariable::Broadcast(Thread* self) {
691  DCHECK(self == NULL || self == Thread::Current());
692  // TODO: enable below, there's a race in thread creation that causes false failures currently.
693  // guard_.AssertExclusiveHeld(self);
694  DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
695#if ART_USE_FUTEXES
696  if (num_waiters_ > 0) {
697    sequence_++;  // Indicate the broadcast occurred.
698    bool done = false;
699    do {
700      int32_t cur_sequence = sequence_.LoadRelaxed();
701      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
702      // mutex unlocks will awaken the requeued waiter thread.
703      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
704                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
705                   guard_.state_.Address(), cur_sequence) != -1;
706      if (!done) {
707        if (errno != EAGAIN) {
708          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
709        }
710      }
711    } while (!done);
712  }
713#else
714  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
715#endif
716}
717
718void ConditionVariable::Signal(Thread* self) {
719  DCHECK(self == NULL || self == Thread::Current());
720  guard_.AssertExclusiveHeld(self);
721#if ART_USE_FUTEXES
722  if (num_waiters_ > 0) {
723    sequence_++;  // Indicate a signal occurred.
724    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
725    // to avoid this, however, requeueing can only move all waiters.
726    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
727    // Check something was woken or else we changed sequence_ before they had chance to wait.
728    CHECK((num_woken == 0) || (num_woken == 1));
729  }
730#else
731  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
732#endif
733}
734
735void ConditionVariable::Wait(Thread* self) {
736  guard_.CheckSafeToWait(self);
737  WaitHoldingLocks(self);
738}
739
740void ConditionVariable::WaitHoldingLocks(Thread* self) {
741  DCHECK(self == NULL || self == Thread::Current());
742  guard_.AssertExclusiveHeld(self);
743  unsigned int old_recursion_count = guard_.recursion_count_;
744#if ART_USE_FUTEXES
745  num_waiters_++;
746  // Ensure the Mutex is contended so that requeued threads are awoken.
747  guard_.num_contenders_++;
748  guard_.recursion_count_ = 1;
749  int32_t cur_sequence = sequence_.LoadRelaxed();
750  guard_.ExclusiveUnlock(self);
751  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
752    // Futex failed, check it is an expected error.
753    // EAGAIN == EWOULDBLK, so we let the caller try again.
754    // EINTR implies a signal was sent to this thread.
755    if ((errno != EINTR) && (errno != EAGAIN)) {
756      PLOG(FATAL) << "futex wait failed for " << name_;
757    }
758  }
759  guard_.ExclusiveLock(self);
760  CHECK_GE(num_waiters_, 0);
761  num_waiters_--;
762  // We awoke and so no longer require awakes from the guard_'s unlock.
763  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
764  guard_.num_contenders_--;
765#else
766  uint64_t old_owner = guard_.exclusive_owner_;
767  guard_.exclusive_owner_ = 0;
768  guard_.recursion_count_ = 0;
769  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
770  guard_.exclusive_owner_ = old_owner;
771#endif
772  guard_.recursion_count_ = old_recursion_count;
773}
774
775void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
776  DCHECK(self == NULL || self == Thread::Current());
777  guard_.AssertExclusiveHeld(self);
778  guard_.CheckSafeToWait(self);
779  unsigned int old_recursion_count = guard_.recursion_count_;
780#if ART_USE_FUTEXES
781  timespec rel_ts;
782  InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
783  num_waiters_++;
784  // Ensure the Mutex is contended so that requeued threads are awoken.
785  guard_.num_contenders_++;
786  guard_.recursion_count_ = 1;
787  int32_t cur_sequence = sequence_.LoadRelaxed();
788  guard_.ExclusiveUnlock(self);
789  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
790    if (errno == ETIMEDOUT) {
791      // Timed out we're done.
792    } else if ((errno == EAGAIN) || (errno == EINTR)) {
793      // A signal or ConditionVariable::Signal/Broadcast has come in.
794    } else {
795      PLOG(FATAL) << "timed futex wait failed for " << name_;
796    }
797  }
798  guard_.ExclusiveLock(self);
799  CHECK_GE(num_waiters_, 0);
800  num_waiters_--;
801  // We awoke and so no longer require awakes from the guard_'s unlock.
802  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
803  guard_.num_contenders_--;
804#else
805#if !defined(__APPLE__)
806  int clock = CLOCK_MONOTONIC;
807#else
808  int clock = CLOCK_REALTIME;
809#endif
810  uint64_t old_owner = guard_.exclusive_owner_;
811  guard_.exclusive_owner_ = 0;
812  guard_.recursion_count_ = 0;
813  timespec ts;
814  InitTimeSpec(true, clock, ms, ns, &ts);
815  int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
816  if (rc != 0 && rc != ETIMEDOUT) {
817    errno = rc;
818    PLOG(FATAL) << "TimedWait failed for " << name_;
819  }
820  guard_.exclusive_owner_ = old_owner;
821#endif
822  guard_.recursion_count_ = old_recursion_count;
823}
824
825void Locks::Init() {
826  if (logging_lock_ != nullptr) {
827    // Already initialized.
828    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
829      DCHECK(modify_ldt_lock_ != nullptr);
830    } else {
831      DCHECK(modify_ldt_lock_ == nullptr);
832    }
833    DCHECK(abort_lock_ != nullptr);
834    DCHECK(alloc_tracker_lock_ != nullptr);
835    DCHECK(allocated_monitor_ids_lock_ != nullptr);
836    DCHECK(allocated_thread_ids_lock_ != nullptr);
837    DCHECK(breakpoint_lock_ != nullptr);
838    DCHECK(classlinker_classes_lock_ != nullptr);
839    DCHECK(deoptimization_lock_ != nullptr);
840    DCHECK(heap_bitmap_lock_ != nullptr);
841    DCHECK(intern_table_lock_ != nullptr);
842    DCHECK(logging_lock_ != nullptr);
843    DCHECK(mutator_lock_ != nullptr);
844    DCHECK(profiler_lock_ != nullptr);
845    DCHECK(thread_list_lock_ != nullptr);
846    DCHECK(thread_list_suspend_thread_lock_ != nullptr);
847    DCHECK(thread_suspend_count_lock_ != nullptr);
848    DCHECK(trace_lock_ != nullptr);
849    DCHECK(unexpected_signal_lock_ != nullptr);
850  } else {
851    // Create global locks in level order from highest lock level to lowest.
852    LockLevel current_lock_level = kThreadListSuspendThreadLock;
853    DCHECK(thread_list_suspend_thread_lock_ == nullptr);
854    thread_list_suspend_thread_lock_ =
855        new Mutex("thread list suspend thread by .. lock", current_lock_level);
856
857    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
858      if (new_level >= current_lock_level) { \
859        /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
860        fprintf(stderr, "New local level %d is not less than current level %d\n", \
861                new_level, current_lock_level); \
862        exit(1); \
863      } \
864      current_lock_level = new_level;
865
866    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
867    DCHECK(mutator_lock_ == nullptr);
868    mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
869
870    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
871    DCHECK(heap_bitmap_lock_ == nullptr);
872    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
873
874    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
875    DCHECK(runtime_shutdown_lock_ == nullptr);
876    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
877
878    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
879    DCHECK(profiler_lock_ == nullptr);
880    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
881
882    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
883    DCHECK(trace_lock_ == nullptr);
884    trace_lock_ = new Mutex("trace lock", current_lock_level);
885
886    UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
887    DCHECK(deoptimization_lock_ == nullptr);
888    deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
889
890    UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
891    DCHECK(alloc_tracker_lock_ == nullptr);
892    alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
893
894    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
895    DCHECK(thread_list_lock_ == nullptr);
896    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
897
898    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
899    DCHECK(breakpoint_lock_ == nullptr);
900    breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
901
902    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
903    DCHECK(classlinker_classes_lock_ == nullptr);
904    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
905                                                      current_lock_level);
906
907    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
908    DCHECK(allocated_monitor_ids_lock_ == nullptr);
909    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
910
911    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
912    DCHECK(allocated_thread_ids_lock_ == nullptr);
913    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
914
915    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
916      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
917      DCHECK(modify_ldt_lock_ == nullptr);
918      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
919    }
920
921    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
922    DCHECK(intern_table_lock_ == nullptr);
923    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
924
925    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
926    DCHECK(abort_lock_ == nullptr);
927    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
928
929    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
930    DCHECK(thread_suspend_count_lock_ == nullptr);
931    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
932
933    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
934    DCHECK(unexpected_signal_lock_ == nullptr);
935    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
936
937    UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
938    DCHECK(mem_maps_lock_ == nullptr);
939    mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
940
941    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
942    DCHECK(logging_lock_ == nullptr);
943    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
944
945    #undef UPDATE_CURRENT_LOCK_LEVEL
946  }
947}
948
949
950}  // namespace art
951