mutex.cc revision 68d8b42ddec39ec0174162d90d4abaa004d1983e
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mutex.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#include "atomic.h"
23#include "base/logging.h"
24#include "mutex-inl.h"
25#include "runtime.h"
26#include "scoped_thread_state_change.h"
27#include "thread-inl.h"
28#include "utils.h"
29
30namespace art {
31
32Mutex* Locks::abort_lock_ = nullptr;
33Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
34Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
35Mutex* Locks::breakpoint_lock_ = nullptr;
36ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
37ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
38Mutex* Locks::jni_libraries_lock_ = nullptr;
39Mutex* Locks::logging_lock_ = nullptr;
40Mutex* Locks::mem_maps_lock_ = nullptr;
41Mutex* Locks::modify_ldt_lock_ = nullptr;
42ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
43Mutex* Locks::runtime_shutdown_lock_ = nullptr;
44Mutex* Locks::thread_list_lock_ = nullptr;
45Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
46Mutex* Locks::thread_suspend_count_lock_ = nullptr;
47Mutex* Locks::trace_lock_ = nullptr;
48Mutex* Locks::profiler_lock_ = nullptr;
49Mutex* Locks::unexpected_signal_lock_ = nullptr;
50Mutex* Locks::intern_table_lock_ = nullptr;
51
52struct AllMutexData {
53  // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
54  Atomic<const BaseMutex*> all_mutexes_guard;
55  // All created mutexes guarded by all_mutexes_guard_.
56  std::set<BaseMutex*>* all_mutexes;
57  AllMutexData() : all_mutexes(NULL) {}
58};
59static struct AllMutexData gAllMutexData[kAllMutexDataSize];
60
61#if ART_USE_FUTEXES
62static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
63  const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
64  result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
65  result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
66  if (result_ts->tv_nsec < 0) {
67    result_ts->tv_sec--;
68    result_ts->tv_nsec += one_sec;
69  } else if (result_ts->tv_nsec > one_sec) {
70    result_ts->tv_sec++;
71    result_ts->tv_nsec -= one_sec;
72  }
73  return result_ts->tv_sec < 0;
74}
75#endif
76
77class ScopedAllMutexesLock {
78 public:
79  explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
80    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
81      NanoSleep(100);
82    }
83  }
84  ~ScopedAllMutexesLock() {
85    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
86      NanoSleep(100);
87    }
88  }
89 private:
90  const BaseMutex* const mutex_;
91};
92
93BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
94  if (kLogLockContentions) {
95    ScopedAllMutexesLock mu(this);
96    std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
97    if (*all_mutexes_ptr == NULL) {
98      // We leak the global set of all mutexes to avoid ordering issues in global variable
99      // construction/destruction.
100      *all_mutexes_ptr = new std::set<BaseMutex*>();
101    }
102    (*all_mutexes_ptr)->insert(this);
103  }
104}
105
106BaseMutex::~BaseMutex() {
107  if (kLogLockContentions) {
108    ScopedAllMutexesLock mu(this);
109    gAllMutexData->all_mutexes->erase(this);
110  }
111}
112
113void BaseMutex::DumpAll(std::ostream& os) {
114  if (kLogLockContentions) {
115    os << "Mutex logging:\n";
116    ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
117    std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
118    if (all_mutexes == NULL) {
119      // No mutexes have been created yet during at startup.
120      return;
121    }
122    typedef std::set<BaseMutex*>::const_iterator It;
123    os << "(Contended)\n";
124    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
125      BaseMutex* mutex = *it;
126      if (mutex->HasEverContended()) {
127        mutex->Dump(os);
128        os << "\n";
129      }
130    }
131    os << "(Never contented)\n";
132    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
133      BaseMutex* mutex = *it;
134      if (!mutex->HasEverContended()) {
135        mutex->Dump(os);
136        os << "\n";
137      }
138    }
139  }
140}
141
142void BaseMutex::CheckSafeToWait(Thread* self) {
143  if (self == NULL) {
144    CheckUnattachedThread(level_);
145    return;
146  }
147  if (kDebugLocking) {
148    CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
149        << "Waiting on unacquired mutex: " << name_;
150    bool bad_mutexes_held = false;
151    for (int i = kLockLevelCount - 1; i >= 0; --i) {
152      if (i != level_) {
153        BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
154        // We expect waits to happen while holding the thread list suspend thread lock.
155        if (held_mutex != NULL && i != kThreadListSuspendThreadLock) {
156          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
157                     << "(level " << LockLevel(i) << ") while performing wait on "
158                     << "\"" << name_ << "\" (level " << level_ << ")";
159          bad_mutexes_held = true;
160        }
161      }
162    }
163    CHECK(!bad_mutexes_held);
164  }
165}
166
167void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
168  if (kLogLockContentions) {
169    // Atomically add value to wait_time.
170    wait_time.FetchAndAddSequentiallyConsistent(value);
171  }
172}
173
174void BaseMutex::RecordContention(uint64_t blocked_tid,
175                                 uint64_t owner_tid,
176                                 uint64_t nano_time_blocked) {
177  if (kLogLockContentions) {
178    ContentionLogData* data = contention_log_data_;
179    ++(data->contention_count);
180    data->AddToWaitTime(nano_time_blocked);
181    ContentionLogEntry* log = data->contention_log;
182    // This code is intentionally racy as it is only used for diagnostics.
183    uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
184    if (log[slot].blocked_tid == blocked_tid &&
185        log[slot].owner_tid == blocked_tid) {
186      ++log[slot].count;
187    } else {
188      uint32_t new_slot;
189      do {
190        slot = data->cur_content_log_entry.LoadRelaxed();
191        new_slot = (slot + 1) % kContentionLogSize;
192      } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
193      log[new_slot].blocked_tid = blocked_tid;
194      log[new_slot].owner_tid = owner_tid;
195      log[new_slot].count.StoreRelaxed(1);
196    }
197  }
198}
199
200void BaseMutex::DumpContention(std::ostream& os) const {
201  if (kLogLockContentions) {
202    const ContentionLogData* data = contention_log_data_;
203    const ContentionLogEntry* log = data->contention_log;
204    uint64_t wait_time = data->wait_time.LoadRelaxed();
205    uint32_t contention_count = data->contention_count.LoadRelaxed();
206    if (contention_count == 0) {
207      os << "never contended";
208    } else {
209      os << "contended " << contention_count
210         << " total wait of contender " << PrettyDuration(wait_time)
211         << " average " << PrettyDuration(wait_time / contention_count);
212      SafeMap<uint64_t, size_t> most_common_blocker;
213      SafeMap<uint64_t, size_t> most_common_blocked;
214      for (size_t i = 0; i < kContentionLogSize; ++i) {
215        uint64_t blocked_tid = log[i].blocked_tid;
216        uint64_t owner_tid = log[i].owner_tid;
217        uint32_t count = log[i].count.LoadRelaxed();
218        if (count > 0) {
219          auto it = most_common_blocked.find(blocked_tid);
220          if (it != most_common_blocked.end()) {
221            most_common_blocked.Overwrite(blocked_tid, it->second + count);
222          } else {
223            most_common_blocked.Put(blocked_tid, count);
224          }
225          it = most_common_blocker.find(owner_tid);
226          if (it != most_common_blocker.end()) {
227            most_common_blocker.Overwrite(owner_tid, it->second + count);
228          } else {
229            most_common_blocker.Put(owner_tid, count);
230          }
231        }
232      }
233      uint64_t max_tid = 0;
234      size_t max_tid_count = 0;
235      for (const auto& pair : most_common_blocked) {
236        if (pair.second > max_tid_count) {
237          max_tid = pair.first;
238          max_tid_count = pair.second;
239        }
240      }
241      if (max_tid != 0) {
242        os << " sample shows most blocked tid=" << max_tid;
243      }
244      max_tid = 0;
245      max_tid_count = 0;
246      for (const auto& pair : most_common_blocker) {
247        if (pair.second > max_tid_count) {
248          max_tid = pair.first;
249          max_tid_count = pair.second;
250        }
251      }
252      if (max_tid != 0) {
253        os << " sample shows tid=" << max_tid << " owning during this time";
254      }
255    }
256  }
257}
258
259
260Mutex::Mutex(const char* name, LockLevel level, bool recursive)
261    : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
262#if ART_USE_FUTEXES
263  DCHECK_EQ(0, state_.LoadRelaxed());
264  DCHECK_EQ(0, num_contenders_.LoadRelaxed());
265#else
266  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
267#endif
268  exclusive_owner_ = 0;
269}
270
271Mutex::~Mutex() {
272#if ART_USE_FUTEXES
273  if (state_.LoadRelaxed() != 0) {
274    Runtime* runtime = Runtime::Current();
275    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
276    LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
277  } else {
278    CHECK_EQ(exclusive_owner_, 0U)  << "unexpectedly found an owner on unlocked mutex " << name_;
279    CHECK_EQ(num_contenders_.LoadSequentiallyConsistent(), 0)
280        << "unexpectedly found a contender on mutex " << name_;
281  }
282#else
283  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
284  // may still be using locks.
285  int rc = pthread_mutex_destroy(&mutex_);
286  if (rc != 0) {
287    errno = rc;
288    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
289    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
290    Runtime* runtime = Runtime::Current();
291    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
292    PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
293  }
294#endif
295}
296
297void Mutex::ExclusiveLock(Thread* self) {
298  DCHECK(self == NULL || self == Thread::Current());
299  if (kDebugLocking && !recursive_) {
300    AssertNotHeld(self);
301  }
302  if (!recursive_ || !IsExclusiveHeld(self)) {
303#if ART_USE_FUTEXES
304    bool done = false;
305    do {
306      int32_t cur_state = state_.LoadRelaxed();
307      if (LIKELY(cur_state == 0)) {
308        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
309        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
310      } else {
311        // Failed to acquire, hang up.
312        ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
313        num_contenders_++;
314        if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
315          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
316          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
317          if ((errno != EAGAIN) && (errno != EINTR)) {
318            PLOG(FATAL) << "futex wait failed for " << name_;
319          }
320        }
321        num_contenders_--;
322      }
323    } while (!done);
324    DCHECK_EQ(state_.LoadRelaxed(), 1);
325#else
326    CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
327#endif
328    DCHECK_EQ(exclusive_owner_, 0U);
329    exclusive_owner_ = SafeGetTid(self);
330    RegisterAsLocked(self);
331  }
332  recursion_count_++;
333  if (kDebugLocking) {
334    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
335        << name_ << " " << recursion_count_;
336    AssertHeld(self);
337  }
338}
339
340bool Mutex::ExclusiveTryLock(Thread* self) {
341  DCHECK(self == NULL || self == Thread::Current());
342  if (kDebugLocking && !recursive_) {
343    AssertNotHeld(self);
344  }
345  if (!recursive_ || !IsExclusiveHeld(self)) {
346#if ART_USE_FUTEXES
347    bool done = false;
348    do {
349      int32_t cur_state = state_.LoadRelaxed();
350      if (cur_state == 0) {
351        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
352        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
353      } else {
354        return false;
355      }
356    } while (!done);
357    DCHECK_EQ(state_.LoadRelaxed(), 1);
358#else
359    int result = pthread_mutex_trylock(&mutex_);
360    if (result == EBUSY) {
361      return false;
362    }
363    if (result != 0) {
364      errno = result;
365      PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
366    }
367#endif
368    DCHECK_EQ(exclusive_owner_, 0U);
369    exclusive_owner_ = SafeGetTid(self);
370    RegisterAsLocked(self);
371  }
372  recursion_count_++;
373  if (kDebugLocking) {
374    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
375        << name_ << " " << recursion_count_;
376    AssertHeld(self);
377  }
378  return true;
379}
380
381void Mutex::ExclusiveUnlock(Thread* self) {
382  DCHECK(self == NULL || self == Thread::Current());
383  AssertHeld(self);
384  DCHECK_NE(exclusive_owner_, 0U);
385  recursion_count_--;
386  if (!recursive_ || recursion_count_ == 0) {
387    if (kDebugLocking) {
388      CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
389          << name_ << " " << recursion_count_;
390    }
391    RegisterAsUnlocked(self);
392#if ART_USE_FUTEXES
393    bool done = false;
394    do {
395      int32_t cur_state = state_.LoadRelaxed();
396      if (LIKELY(cur_state == 1)) {
397        // We're no longer the owner.
398        exclusive_owner_ = 0;
399        // Change state to 0 and impose load/store ordering appropriate for lock release.
400        // Note, the relaxed loads below musn't reorder before the CompareExchange.
401        // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
402        // a status bit into the state on contention.
403        done =  state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */);
404        if (LIKELY(done)) {  // Spurious fail?
405          // Wake a contender.
406          if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
407            futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
408          }
409        }
410      } else {
411        // Logging acquires the logging lock, avoid infinite recursion in that case.
412        if (this != Locks::logging_lock_) {
413          LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
414        } else {
415          LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
416          LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
417                                                 cur_state, name_).c_str());
418          _exit(1);
419        }
420      }
421    } while (!done);
422#else
423    exclusive_owner_ = 0;
424    CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
425#endif
426  }
427}
428
429void Mutex::Dump(std::ostream& os) const {
430  os << (recursive_ ? "recursive " : "non-recursive ")
431      << name_
432      << " level=" << static_cast<int>(level_)
433      << " rec=" << recursion_count_
434      << " owner=" << GetExclusiveOwnerTid() << " ";
435  DumpContention(os);
436}
437
438std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
439  mu.Dump(os);
440  return os;
441}
442
443ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
444    : BaseMutex(name, level)
445#if ART_USE_FUTEXES
446    , state_(0), num_pending_readers_(0), num_pending_writers_(0)
447#endif
448{  // NOLINT(whitespace/braces)
449#if !ART_USE_FUTEXES
450  CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
451#endif
452  exclusive_owner_ = 0;
453}
454
455ReaderWriterMutex::~ReaderWriterMutex() {
456#if ART_USE_FUTEXES
457  CHECK_EQ(state_.LoadRelaxed(), 0);
458  CHECK_EQ(exclusive_owner_, 0U);
459  CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0);
460  CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
461#else
462  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
463  // may still be using locks.
464  int rc = pthread_rwlock_destroy(&rwlock_);
465  if (rc != 0) {
466    errno = rc;
467    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
468    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
469    Runtime* runtime = Runtime::Current();
470    bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
471    PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
472  }
473#endif
474}
475
476void ReaderWriterMutex::ExclusiveLock(Thread* self) {
477  DCHECK(self == NULL || self == Thread::Current());
478  AssertNotExclusiveHeld(self);
479#if ART_USE_FUTEXES
480  bool done = false;
481  do {
482    int32_t cur_state = state_.LoadRelaxed();
483    if (LIKELY(cur_state == 0)) {
484      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
485      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */);
486    } else {
487      // Failed to acquire, hang up.
488      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
489      ++num_pending_writers_;
490      if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
491        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
492        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
493        if ((errno != EAGAIN) && (errno != EINTR)) {
494          PLOG(FATAL) << "futex wait failed for " << name_;
495        }
496      }
497      --num_pending_writers_;
498    }
499  } while (!done);
500  DCHECK_EQ(state_.LoadRelaxed(), -1);
501#else
502  CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
503#endif
504  DCHECK_EQ(exclusive_owner_, 0U);
505  exclusive_owner_ = SafeGetTid(self);
506  RegisterAsLocked(self);
507  AssertExclusiveHeld(self);
508}
509
510void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
511  DCHECK(self == NULL || self == Thread::Current());
512  AssertExclusiveHeld(self);
513  RegisterAsUnlocked(self);
514  DCHECK_NE(exclusive_owner_, 0U);
515#if ART_USE_FUTEXES
516  bool done = false;
517  do {
518    int32_t cur_state = state_.LoadRelaxed();
519    if (LIKELY(cur_state == -1)) {
520      // We're no longer the owner.
521      exclusive_owner_ = 0;
522      // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
523      // Note, the relaxed loads below musn't reorder before the CompareExchange.
524      // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
525      // a status bit into the state on contention.
526      done =  state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
527      if (LIKELY(done)) {  // Weak CAS may fail spuriously.
528        // Wake any waiters.
529        if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
530                     num_pending_writers_.LoadRelaxed() > 0)) {
531          futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
532        }
533      }
534    } else {
535      LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
536    }
537  } while (!done);
538#else
539  exclusive_owner_ = 0;
540  CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
541#endif
542}
543
544#if HAVE_TIMED_RWLOCK
545bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
546  DCHECK(self == NULL || self == Thread::Current());
547#if ART_USE_FUTEXES
548  bool done = false;
549  timespec end_abs_ts;
550  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts);
551  do {
552    int32_t cur_state = state_.LoadRelaxed();
553    if (cur_state == 0) {
554      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
555      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */);
556    } else {
557      // Failed to acquire, hang up.
558      timespec now_abs_ts;
559      InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts);
560      timespec rel_ts;
561      if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
562        return false;  // Timed out.
563      }
564      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
565      ++num_pending_writers_;
566      if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
567        if (errno == ETIMEDOUT) {
568          --num_pending_writers_;
569          return false;  // Timed out.
570        } else if ((errno != EAGAIN) && (errno != EINTR)) {
571          // EAGAIN and EINTR both indicate a spurious failure,
572          // recompute the relative time out from now and try again.
573          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
574          PLOG(FATAL) << "timed futex wait failed for " << name_;
575        }
576      }
577      --num_pending_writers_;
578    }
579  } while (!done);
580#else
581  timespec ts;
582  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
583  int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
584  if (result == ETIMEDOUT) {
585    return false;
586  }
587  if (result != 0) {
588    errno = result;
589    PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
590  }
591#endif
592  exclusive_owner_ = SafeGetTid(self);
593  RegisterAsLocked(self);
594  AssertSharedHeld(self);
595  return true;
596}
597#endif
598
599bool ReaderWriterMutex::SharedTryLock(Thread* self) {
600  DCHECK(self == NULL || self == Thread::Current());
601#if ART_USE_FUTEXES
602  bool done = false;
603  do {
604    int32_t cur_state = state_.LoadRelaxed();
605    if (cur_state >= 0) {
606      // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
607      done =  state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
608    } else {
609      // Owner holds it exclusively.
610      return false;
611    }
612  } while (!done);
613#else
614  int result = pthread_rwlock_tryrdlock(&rwlock_);
615  if (result == EBUSY) {
616    return false;
617  }
618  if (result != 0) {
619    errno = result;
620    PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
621  }
622#endif
623  RegisterAsLocked(self);
624  AssertSharedHeld(self);
625  return true;
626}
627
628bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
629  DCHECK(self == NULL || self == Thread::Current());
630  bool result;
631  if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
632    result = IsExclusiveHeld(self);  // TODO: a better best effort here.
633  } else {
634    result = (self->GetHeldMutex(level_) == this);
635  }
636  return result;
637}
638
639void ReaderWriterMutex::Dump(std::ostream& os) const {
640  os << name_
641      << " level=" << static_cast<int>(level_)
642      << " owner=" << GetExclusiveOwnerTid() << " ";
643  DumpContention(os);
644}
645
646std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
647  mu.Dump(os);
648  return os;
649}
650
651ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
652    : name_(name), guard_(guard) {
653#if ART_USE_FUTEXES
654  DCHECK_EQ(0, sequence_.LoadRelaxed());
655  num_waiters_ = 0;
656#else
657  pthread_condattr_t cond_attrs;
658  CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
659#if !defined(__APPLE__)
660  // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
661  CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
662#endif
663  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
664#endif
665}
666
667ConditionVariable::~ConditionVariable() {
668#if ART_USE_FUTEXES
669  if (num_waiters_!= 0) {
670    Runtime* runtime = Runtime::Current();
671    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
672    LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
673        << " called with " << num_waiters_ << " waiters.";
674  }
675#else
676  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
677  // may still be using condition variables.
678  int rc = pthread_cond_destroy(&cond_);
679  if (rc != 0) {
680    errno = rc;
681    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
682    Runtime* runtime = Runtime::Current();
683    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
684    PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
685  }
686#endif
687}
688
689void ConditionVariable::Broadcast(Thread* self) {
690  DCHECK(self == NULL || self == Thread::Current());
691  // TODO: enable below, there's a race in thread creation that causes false failures currently.
692  // guard_.AssertExclusiveHeld(self);
693  DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
694#if ART_USE_FUTEXES
695  if (num_waiters_ > 0) {
696    sequence_++;  // Indicate the broadcast occurred.
697    bool done = false;
698    do {
699      int32_t cur_sequence = sequence_.LoadRelaxed();
700      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
701      // mutex unlocks will awaken the requeued waiter thread.
702      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
703                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
704                   guard_.state_.Address(), cur_sequence) != -1;
705      if (!done) {
706        if (errno != EAGAIN) {
707          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
708        }
709      }
710    } while (!done);
711  }
712#else
713  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
714#endif
715}
716
717void ConditionVariable::Signal(Thread* self) {
718  DCHECK(self == NULL || self == Thread::Current());
719  guard_.AssertExclusiveHeld(self);
720#if ART_USE_FUTEXES
721  if (num_waiters_ > 0) {
722    sequence_++;  // Indicate a signal occurred.
723    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
724    // to avoid this, however, requeueing can only move all waiters.
725    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
726    // Check something was woken or else we changed sequence_ before they had chance to wait.
727    CHECK((num_woken == 0) || (num_woken == 1));
728  }
729#else
730  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
731#endif
732}
733
734void ConditionVariable::Wait(Thread* self) {
735  guard_.CheckSafeToWait(self);
736  WaitHoldingLocks(self);
737}
738
739void ConditionVariable::WaitHoldingLocks(Thread* self) {
740  DCHECK(self == NULL || self == Thread::Current());
741  guard_.AssertExclusiveHeld(self);
742  unsigned int old_recursion_count = guard_.recursion_count_;
743#if ART_USE_FUTEXES
744  num_waiters_++;
745  // Ensure the Mutex is contended so that requeued threads are awoken.
746  guard_.num_contenders_++;
747  guard_.recursion_count_ = 1;
748  int32_t cur_sequence = sequence_.LoadRelaxed();
749  guard_.ExclusiveUnlock(self);
750  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
751    // Futex failed, check it is an expected error.
752    // EAGAIN == EWOULDBLK, so we let the caller try again.
753    // EINTR implies a signal was sent to this thread.
754    if ((errno != EINTR) && (errno != EAGAIN)) {
755      PLOG(FATAL) << "futex wait failed for " << name_;
756    }
757  }
758  guard_.ExclusiveLock(self);
759  CHECK_GE(num_waiters_, 0);
760  num_waiters_--;
761  // We awoke and so no longer require awakes from the guard_'s unlock.
762  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
763  guard_.num_contenders_--;
764#else
765  uint64_t old_owner = guard_.exclusive_owner_;
766  guard_.exclusive_owner_ = 0;
767  guard_.recursion_count_ = 0;
768  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
769  guard_.exclusive_owner_ = old_owner;
770#endif
771  guard_.recursion_count_ = old_recursion_count;
772}
773
774void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
775  DCHECK(self == NULL || self == Thread::Current());
776  guard_.AssertExclusiveHeld(self);
777  guard_.CheckSafeToWait(self);
778  unsigned int old_recursion_count = guard_.recursion_count_;
779#if ART_USE_FUTEXES
780  timespec rel_ts;
781  InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
782  num_waiters_++;
783  // Ensure the Mutex is contended so that requeued threads are awoken.
784  guard_.num_contenders_++;
785  guard_.recursion_count_ = 1;
786  int32_t cur_sequence = sequence_.LoadRelaxed();
787  guard_.ExclusiveUnlock(self);
788  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
789    if (errno == ETIMEDOUT) {
790      // Timed out we're done.
791    } else if ((errno == EAGAIN) || (errno == EINTR)) {
792      // A signal or ConditionVariable::Signal/Broadcast has come in.
793    } else {
794      PLOG(FATAL) << "timed futex wait failed for " << name_;
795    }
796  }
797  guard_.ExclusiveLock(self);
798  CHECK_GE(num_waiters_, 0);
799  num_waiters_--;
800  // We awoke and so no longer require awakes from the guard_'s unlock.
801  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
802  guard_.num_contenders_--;
803#else
804#if !defined(__APPLE__)
805  int clock = CLOCK_MONOTONIC;
806#else
807  int clock = CLOCK_REALTIME;
808#endif
809  uint64_t old_owner = guard_.exclusive_owner_;
810  guard_.exclusive_owner_ = 0;
811  guard_.recursion_count_ = 0;
812  timespec ts;
813  InitTimeSpec(true, clock, ms, ns, &ts);
814  int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
815  if (rc != 0 && rc != ETIMEDOUT) {
816    errno = rc;
817    PLOG(FATAL) << "TimedWait failed for " << name_;
818  }
819  guard_.exclusive_owner_ = old_owner;
820#endif
821  guard_.recursion_count_ = old_recursion_count;
822}
823
824void Locks::Init() {
825  if (logging_lock_ != nullptr) {
826    // Already initialized.
827    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
828      DCHECK(modify_ldt_lock_ != nullptr);
829    } else {
830      DCHECK(modify_ldt_lock_ == nullptr);
831    }
832    DCHECK(abort_lock_ != nullptr);
833    DCHECK(allocated_monitor_ids_lock_ != nullptr);
834    DCHECK(allocated_thread_ids_lock_ != nullptr);
835    DCHECK(breakpoint_lock_ != nullptr);
836    DCHECK(classlinker_classes_lock_ != nullptr);
837    DCHECK(heap_bitmap_lock_ != nullptr);
838    DCHECK(jni_libraries_lock_ != nullptr);
839    DCHECK(logging_lock_ != nullptr);
840    DCHECK(mutator_lock_ != nullptr);
841    DCHECK(thread_list_lock_ != nullptr);
842    DCHECK(thread_list_suspend_thread_lock_ != nullptr);
843    DCHECK(thread_suspend_count_lock_ != nullptr);
844    DCHECK(trace_lock_ != nullptr);
845    DCHECK(profiler_lock_ != nullptr);
846    DCHECK(unexpected_signal_lock_ != nullptr);
847    DCHECK(intern_table_lock_ != nullptr);
848  } else {
849    // Create global locks in level order from highest lock level to lowest.
850    LockLevel current_lock_level = kThreadListSuspendThreadLock;
851    DCHECK(thread_list_suspend_thread_lock_ == nullptr);
852    thread_list_suspend_thread_lock_ =
853        new Mutex("thread list suspend thread by .. lock", current_lock_level);
854
855    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
856      DCHECK_LT(new_level, current_lock_level); \
857      current_lock_level = new_level;
858
859    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
860    DCHECK(mutator_lock_ == nullptr);
861    mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
862
863    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
864    DCHECK(heap_bitmap_lock_ == nullptr);
865    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
866
867    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
868    DCHECK(runtime_shutdown_lock_ == nullptr);
869    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
870
871    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
872    DCHECK(profiler_lock_ == nullptr);
873    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
874
875    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
876    DCHECK(trace_lock_ == nullptr);
877    trace_lock_ = new Mutex("trace lock", current_lock_level);
878
879    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
880    DCHECK(thread_list_lock_ == nullptr);
881    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
882
883    UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
884    DCHECK(jni_libraries_lock_ == nullptr);
885    jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
886
887    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
888    DCHECK(breakpoint_lock_ == nullptr);
889    breakpoint_lock_ = new Mutex("breakpoint lock", current_lock_level);
890
891    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
892    DCHECK(classlinker_classes_lock_ == nullptr);
893    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
894                                                      current_lock_level);
895
896    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
897    DCHECK(allocated_monitor_ids_lock_ == nullptr);
898    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
899
900    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
901    DCHECK(allocated_thread_ids_lock_ == nullptr);
902    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
903
904    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
905      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
906      DCHECK(modify_ldt_lock_ == nullptr);
907      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
908    }
909
910    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
911    DCHECK(intern_table_lock_ == nullptr);
912    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
913
914
915    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
916    DCHECK(abort_lock_ == nullptr);
917    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
918
919    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
920    DCHECK(thread_suspend_count_lock_ == nullptr);
921    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
922
923    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
924    DCHECK(unexpected_signal_lock_ == nullptr);
925    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
926
927    UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
928    DCHECK(mem_maps_lock_ == nullptr);
929    mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
930
931    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
932    DCHECK(logging_lock_ == nullptr);
933    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
934
935    #undef UPDATE_CURRENT_LOCK_LEVEL
936  }
937}
938
939
940}  // namespace art
941