mutex.cc revision f3d874c60ee3ada19ce26a5c4e532312b6f3a9e9
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mutex.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#include "atomic.h"
23#include "base/logging.h"
24#include "mutex-inl.h"
25#include "runtime.h"
26#include "scoped_thread_state_change.h"
27#include "thread-inl.h"
28#include "utils.h"
29
30namespace art {
31
32Mutex* Locks::abort_lock_ = nullptr;
33Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
34Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
35Mutex* Locks::breakpoint_lock_ = nullptr;
36ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
37ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
38Mutex* Locks::logging_lock_ = nullptr;
39Mutex* Locks::mem_maps_lock_ = nullptr;
40Mutex* Locks::modify_ldt_lock_ = nullptr;
41ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
42Mutex* Locks::runtime_shutdown_lock_ = nullptr;
43Mutex* Locks::thread_list_lock_ = nullptr;
44Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
45Mutex* Locks::thread_suspend_count_lock_ = nullptr;
46Mutex* Locks::trace_lock_ = nullptr;
47Mutex* Locks::profiler_lock_ = nullptr;
48Mutex* Locks::unexpected_signal_lock_ = nullptr;
49Mutex* Locks::intern_table_lock_ = nullptr;
50
51struct AllMutexData {
52  // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
53  Atomic<const BaseMutex*> all_mutexes_guard;
54  // All created mutexes guarded by all_mutexes_guard_.
55  std::set<BaseMutex*>* all_mutexes;
56  AllMutexData() : all_mutexes(NULL) {}
57};
58static struct AllMutexData gAllMutexData[kAllMutexDataSize];
59
60#if ART_USE_FUTEXES
61static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
62  const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
63  result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
64  result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
65  if (result_ts->tv_nsec < 0) {
66    result_ts->tv_sec--;
67    result_ts->tv_nsec += one_sec;
68  } else if (result_ts->tv_nsec > one_sec) {
69    result_ts->tv_sec++;
70    result_ts->tv_nsec -= one_sec;
71  }
72  return result_ts->tv_sec < 0;
73}
74#endif
75
76class ScopedAllMutexesLock {
77 public:
78  explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
79    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
80      NanoSleep(100);
81    }
82  }
83  ~ScopedAllMutexesLock() {
84    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
85      NanoSleep(100);
86    }
87  }
88 private:
89  const BaseMutex* const mutex_;
90};
91
92BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
93  if (kLogLockContentions) {
94    ScopedAllMutexesLock mu(this);
95    std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
96    if (*all_mutexes_ptr == NULL) {
97      // We leak the global set of all mutexes to avoid ordering issues in global variable
98      // construction/destruction.
99      *all_mutexes_ptr = new std::set<BaseMutex*>();
100    }
101    (*all_mutexes_ptr)->insert(this);
102  }
103}
104
105BaseMutex::~BaseMutex() {
106  if (kLogLockContentions) {
107    ScopedAllMutexesLock mu(this);
108    gAllMutexData->all_mutexes->erase(this);
109  }
110}
111
112void BaseMutex::DumpAll(std::ostream& os) {
113  if (kLogLockContentions) {
114    os << "Mutex logging:\n";
115    ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
116    std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
117    if (all_mutexes == NULL) {
118      // No mutexes have been created yet during at startup.
119      return;
120    }
121    typedef std::set<BaseMutex*>::const_iterator It;
122    os << "(Contended)\n";
123    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
124      BaseMutex* mutex = *it;
125      if (mutex->HasEverContended()) {
126        mutex->Dump(os);
127        os << "\n";
128      }
129    }
130    os << "(Never contented)\n";
131    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
132      BaseMutex* mutex = *it;
133      if (!mutex->HasEverContended()) {
134        mutex->Dump(os);
135        os << "\n";
136      }
137    }
138  }
139}
140
141void BaseMutex::CheckSafeToWait(Thread* self) {
142  if (self == NULL) {
143    CheckUnattachedThread(level_);
144    return;
145  }
146  if (kDebugLocking) {
147    CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
148        << "Waiting on unacquired mutex: " << name_;
149    bool bad_mutexes_held = false;
150    for (int i = kLockLevelCount - 1; i >= 0; --i) {
151      if (i != level_) {
152        BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
153        // We expect waits to happen while holding the thread list suspend thread lock.
154        if (held_mutex != NULL && i != kThreadListSuspendThreadLock) {
155          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
156                     << "(level " << LockLevel(i) << ") while performing wait on "
157                     << "\"" << name_ << "\" (level " << level_ << ")";
158          bad_mutexes_held = true;
159        }
160      }
161    }
162    CHECK(!bad_mutexes_held);
163  }
164}
165
166void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
167  if (kLogLockContentions) {
168    // Atomically add value to wait_time.
169    wait_time.FetchAndAddSequentiallyConsistent(value);
170  }
171}
172
173void BaseMutex::RecordContention(uint64_t blocked_tid,
174                                 uint64_t owner_tid,
175                                 uint64_t nano_time_blocked) {
176  if (kLogLockContentions) {
177    ContentionLogData* data = contention_log_data_;
178    ++(data->contention_count);
179    data->AddToWaitTime(nano_time_blocked);
180    ContentionLogEntry* log = data->contention_log;
181    // This code is intentionally racy as it is only used for diagnostics.
182    uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
183    if (log[slot].blocked_tid == blocked_tid &&
184        log[slot].owner_tid == blocked_tid) {
185      ++log[slot].count;
186    } else {
187      uint32_t new_slot;
188      do {
189        slot = data->cur_content_log_entry.LoadRelaxed();
190        new_slot = (slot + 1) % kContentionLogSize;
191      } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
192      log[new_slot].blocked_tid = blocked_tid;
193      log[new_slot].owner_tid = owner_tid;
194      log[new_slot].count.StoreRelaxed(1);
195    }
196  }
197}
198
199void BaseMutex::DumpContention(std::ostream& os) const {
200  if (kLogLockContentions) {
201    const ContentionLogData* data = contention_log_data_;
202    const ContentionLogEntry* log = data->contention_log;
203    uint64_t wait_time = data->wait_time.LoadRelaxed();
204    uint32_t contention_count = data->contention_count.LoadRelaxed();
205    if (contention_count == 0) {
206      os << "never contended";
207    } else {
208      os << "contended " << contention_count
209         << " total wait of contender " << PrettyDuration(wait_time)
210         << " average " << PrettyDuration(wait_time / contention_count);
211      SafeMap<uint64_t, size_t> most_common_blocker;
212      SafeMap<uint64_t, size_t> most_common_blocked;
213      for (size_t i = 0; i < kContentionLogSize; ++i) {
214        uint64_t blocked_tid = log[i].blocked_tid;
215        uint64_t owner_tid = log[i].owner_tid;
216        uint32_t count = log[i].count.LoadRelaxed();
217        if (count > 0) {
218          auto it = most_common_blocked.find(blocked_tid);
219          if (it != most_common_blocked.end()) {
220            most_common_blocked.Overwrite(blocked_tid, it->second + count);
221          } else {
222            most_common_blocked.Put(blocked_tid, count);
223          }
224          it = most_common_blocker.find(owner_tid);
225          if (it != most_common_blocker.end()) {
226            most_common_blocker.Overwrite(owner_tid, it->second + count);
227          } else {
228            most_common_blocker.Put(owner_tid, count);
229          }
230        }
231      }
232      uint64_t max_tid = 0;
233      size_t max_tid_count = 0;
234      for (const auto& pair : most_common_blocked) {
235        if (pair.second > max_tid_count) {
236          max_tid = pair.first;
237          max_tid_count = pair.second;
238        }
239      }
240      if (max_tid != 0) {
241        os << " sample shows most blocked tid=" << max_tid;
242      }
243      max_tid = 0;
244      max_tid_count = 0;
245      for (const auto& pair : most_common_blocker) {
246        if (pair.second > max_tid_count) {
247          max_tid = pair.first;
248          max_tid_count = pair.second;
249        }
250      }
251      if (max_tid != 0) {
252        os << " sample shows tid=" << max_tid << " owning during this time";
253      }
254    }
255  }
256}
257
258
259Mutex::Mutex(const char* name, LockLevel level, bool recursive)
260    : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
261#if ART_USE_FUTEXES
262  DCHECK_EQ(0, state_.LoadRelaxed());
263  DCHECK_EQ(0, num_contenders_.LoadRelaxed());
264#else
265  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
266#endif
267  exclusive_owner_ = 0;
268}
269
270Mutex::~Mutex() {
271#if ART_USE_FUTEXES
272  if (state_.LoadRelaxed() != 0) {
273    Runtime* runtime = Runtime::Current();
274    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
275    LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
276  } else {
277    CHECK_EQ(exclusive_owner_, 0U)  << "unexpectedly found an owner on unlocked mutex " << name_;
278    CHECK_EQ(num_contenders_.LoadSequentiallyConsistent(), 0)
279        << "unexpectedly found a contender on mutex " << name_;
280  }
281#else
282  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
283  // may still be using locks.
284  int rc = pthread_mutex_destroy(&mutex_);
285  if (rc != 0) {
286    errno = rc;
287    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
288    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
289    Runtime* runtime = Runtime::Current();
290    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
291    PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
292  }
293#endif
294}
295
296void Mutex::ExclusiveLock(Thread* self) {
297  DCHECK(self == NULL || self == Thread::Current());
298  if (kDebugLocking && !recursive_) {
299    AssertNotHeld(self);
300  }
301  if (!recursive_ || !IsExclusiveHeld(self)) {
302#if ART_USE_FUTEXES
303    bool done = false;
304    do {
305      int32_t cur_state = state_.LoadRelaxed();
306      if (LIKELY(cur_state == 0)) {
307        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
308        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
309      } else {
310        // Failed to acquire, hang up.
311        ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
312        num_contenders_++;
313        if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
314          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
315          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
316          if ((errno != EAGAIN) && (errno != EINTR)) {
317            PLOG(FATAL) << "futex wait failed for " << name_;
318          }
319        }
320        num_contenders_--;
321      }
322    } while (!done);
323    DCHECK_EQ(state_.LoadRelaxed(), 1);
324#else
325    CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
326#endif
327    DCHECK_EQ(exclusive_owner_, 0U);
328    exclusive_owner_ = SafeGetTid(self);
329    RegisterAsLocked(self);
330  }
331  recursion_count_++;
332  if (kDebugLocking) {
333    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
334        << name_ << " " << recursion_count_;
335    AssertHeld(self);
336  }
337}
338
339bool Mutex::ExclusiveTryLock(Thread* self) {
340  DCHECK(self == NULL || self == Thread::Current());
341  if (kDebugLocking && !recursive_) {
342    AssertNotHeld(self);
343  }
344  if (!recursive_ || !IsExclusiveHeld(self)) {
345#if ART_USE_FUTEXES
346    bool done = false;
347    do {
348      int32_t cur_state = state_.LoadRelaxed();
349      if (cur_state == 0) {
350        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
351        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
352      } else {
353        return false;
354      }
355    } while (!done);
356    DCHECK_EQ(state_.LoadRelaxed(), 1);
357#else
358    int result = pthread_mutex_trylock(&mutex_);
359    if (result == EBUSY) {
360      return false;
361    }
362    if (result != 0) {
363      errno = result;
364      PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
365    }
366#endif
367    DCHECK_EQ(exclusive_owner_, 0U);
368    exclusive_owner_ = SafeGetTid(self);
369    RegisterAsLocked(self);
370  }
371  recursion_count_++;
372  if (kDebugLocking) {
373    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
374        << name_ << " " << recursion_count_;
375    AssertHeld(self);
376  }
377  return true;
378}
379
380void Mutex::ExclusiveUnlock(Thread* self) {
381  DCHECK(self == NULL || self == Thread::Current());
382  AssertHeld(self);
383  DCHECK_NE(exclusive_owner_, 0U);
384  recursion_count_--;
385  if (!recursive_ || recursion_count_ == 0) {
386    if (kDebugLocking) {
387      CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
388          << name_ << " " << recursion_count_;
389    }
390    RegisterAsUnlocked(self);
391#if ART_USE_FUTEXES
392    bool done = false;
393    do {
394      int32_t cur_state = state_.LoadRelaxed();
395      if (LIKELY(cur_state == 1)) {
396        // We're no longer the owner.
397        exclusive_owner_ = 0;
398        // Change state to 0 and impose load/store ordering appropriate for lock release.
399        // Note, the relaxed loads below musn't reorder before the CompareExchange.
400        // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
401        // a status bit into the state on contention.
402        done =  state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */);
403        if (LIKELY(done)) {  // Spurious fail?
404          // Wake a contender.
405          if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
406            futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
407          }
408        }
409      } else {
410        // Logging acquires the logging lock, avoid infinite recursion in that case.
411        if (this != Locks::logging_lock_) {
412          LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
413        } else {
414          LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
415          LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
416                                                 cur_state, name_).c_str());
417          _exit(1);
418        }
419      }
420    } while (!done);
421#else
422    exclusive_owner_ = 0;
423    CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
424#endif
425  }
426}
427
428void Mutex::Dump(std::ostream& os) const {
429  os << (recursive_ ? "recursive " : "non-recursive ")
430      << name_
431      << " level=" << static_cast<int>(level_)
432      << " rec=" << recursion_count_
433      << " owner=" << GetExclusiveOwnerTid() << " ";
434  DumpContention(os);
435}
436
437std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
438  mu.Dump(os);
439  return os;
440}
441
442ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
443    : BaseMutex(name, level)
444#if ART_USE_FUTEXES
445    , state_(0), num_pending_readers_(0), num_pending_writers_(0)
446#endif
447{  // NOLINT(whitespace/braces)
448#if !ART_USE_FUTEXES
449  CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
450#endif
451  exclusive_owner_ = 0;
452}
453
454ReaderWriterMutex::~ReaderWriterMutex() {
455#if ART_USE_FUTEXES
456  CHECK_EQ(state_.LoadRelaxed(), 0);
457  CHECK_EQ(exclusive_owner_, 0U);
458  CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0);
459  CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
460#else
461  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
462  // may still be using locks.
463  int rc = pthread_rwlock_destroy(&rwlock_);
464  if (rc != 0) {
465    errno = rc;
466    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
467    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
468    Runtime* runtime = Runtime::Current();
469    bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
470    PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
471  }
472#endif
473}
474
475void ReaderWriterMutex::ExclusiveLock(Thread* self) {
476  DCHECK(self == NULL || self == Thread::Current());
477  AssertNotExclusiveHeld(self);
478#if ART_USE_FUTEXES
479  bool done = false;
480  do {
481    int32_t cur_state = state_.LoadRelaxed();
482    if (LIKELY(cur_state == 0)) {
483      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
484      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */);
485    } else {
486      // Failed to acquire, hang up.
487      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
488      ++num_pending_writers_;
489      if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
490        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
491        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
492        if ((errno != EAGAIN) && (errno != EINTR)) {
493          PLOG(FATAL) << "futex wait failed for " << name_;
494        }
495      }
496      --num_pending_writers_;
497    }
498  } while (!done);
499  DCHECK_EQ(state_.LoadRelaxed(), -1);
500#else
501  CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
502#endif
503  DCHECK_EQ(exclusive_owner_, 0U);
504  exclusive_owner_ = SafeGetTid(self);
505  RegisterAsLocked(self);
506  AssertExclusiveHeld(self);
507}
508
509void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
510  DCHECK(self == NULL || self == Thread::Current());
511  AssertExclusiveHeld(self);
512  RegisterAsUnlocked(self);
513  DCHECK_NE(exclusive_owner_, 0U);
514#if ART_USE_FUTEXES
515  bool done = false;
516  do {
517    int32_t cur_state = state_.LoadRelaxed();
518    if (LIKELY(cur_state == -1)) {
519      // We're no longer the owner.
520      exclusive_owner_ = 0;
521      // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
522      // Note, the relaxed loads below musn't reorder before the CompareExchange.
523      // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
524      // a status bit into the state on contention.
525      done =  state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
526      if (LIKELY(done)) {  // Weak CAS may fail spuriously.
527        // Wake any waiters.
528        if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
529                     num_pending_writers_.LoadRelaxed() > 0)) {
530          futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
531        }
532      }
533    } else {
534      LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
535    }
536  } while (!done);
537#else
538  exclusive_owner_ = 0;
539  CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
540#endif
541}
542
543#if HAVE_TIMED_RWLOCK
544bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
545  DCHECK(self == NULL || self == Thread::Current());
546#if ART_USE_FUTEXES
547  bool done = false;
548  timespec end_abs_ts;
549  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts);
550  do {
551    int32_t cur_state = state_.LoadRelaxed();
552    if (cur_state == 0) {
553      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
554      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */);
555    } else {
556      // Failed to acquire, hang up.
557      timespec now_abs_ts;
558      InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts);
559      timespec rel_ts;
560      if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
561        return false;  // Timed out.
562      }
563      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
564      ++num_pending_writers_;
565      if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
566        if (errno == ETIMEDOUT) {
567          --num_pending_writers_;
568          return false;  // Timed out.
569        } else if ((errno != EAGAIN) && (errno != EINTR)) {
570          // EAGAIN and EINTR both indicate a spurious failure,
571          // recompute the relative time out from now and try again.
572          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
573          PLOG(FATAL) << "timed futex wait failed for " << name_;
574        }
575      }
576      --num_pending_writers_;
577    }
578  } while (!done);
579#else
580  timespec ts;
581  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
582  int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
583  if (result == ETIMEDOUT) {
584    return false;
585  }
586  if (result != 0) {
587    errno = result;
588    PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
589  }
590#endif
591  exclusive_owner_ = SafeGetTid(self);
592  RegisterAsLocked(self);
593  AssertSharedHeld(self);
594  return true;
595}
596#endif
597
598bool ReaderWriterMutex::SharedTryLock(Thread* self) {
599  DCHECK(self == NULL || self == Thread::Current());
600#if ART_USE_FUTEXES
601  bool done = false;
602  do {
603    int32_t cur_state = state_.LoadRelaxed();
604    if (cur_state >= 0) {
605      // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
606      done =  state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
607    } else {
608      // Owner holds it exclusively.
609      return false;
610    }
611  } while (!done);
612#else
613  int result = pthread_rwlock_tryrdlock(&rwlock_);
614  if (result == EBUSY) {
615    return false;
616  }
617  if (result != 0) {
618    errno = result;
619    PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
620  }
621#endif
622  RegisterAsLocked(self);
623  AssertSharedHeld(self);
624  return true;
625}
626
627bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
628  DCHECK(self == NULL || self == Thread::Current());
629  bool result;
630  if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
631    result = IsExclusiveHeld(self);  // TODO: a better best effort here.
632  } else {
633    result = (self->GetHeldMutex(level_) == this);
634  }
635  return result;
636}
637
638void ReaderWriterMutex::Dump(std::ostream& os) const {
639  os << name_
640      << " level=" << static_cast<int>(level_)
641      << " owner=" << GetExclusiveOwnerTid() << " ";
642  DumpContention(os);
643}
644
645std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
646  mu.Dump(os);
647  return os;
648}
649
650ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
651    : name_(name), guard_(guard) {
652#if ART_USE_FUTEXES
653  DCHECK_EQ(0, sequence_.LoadRelaxed());
654  num_waiters_ = 0;
655#else
656  pthread_condattr_t cond_attrs;
657  CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
658#if !defined(__APPLE__)
659  // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
660  CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
661#endif
662  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
663#endif
664}
665
666ConditionVariable::~ConditionVariable() {
667#if ART_USE_FUTEXES
668  if (num_waiters_!= 0) {
669    Runtime* runtime = Runtime::Current();
670    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
671    LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
672        << " called with " << num_waiters_ << " waiters.";
673  }
674#else
675  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
676  // may still be using condition variables.
677  int rc = pthread_cond_destroy(&cond_);
678  if (rc != 0) {
679    errno = rc;
680    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
681    Runtime* runtime = Runtime::Current();
682    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
683    PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
684  }
685#endif
686}
687
688void ConditionVariable::Broadcast(Thread* self) {
689  DCHECK(self == NULL || self == Thread::Current());
690  // TODO: enable below, there's a race in thread creation that causes false failures currently.
691  // guard_.AssertExclusiveHeld(self);
692  DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
693#if ART_USE_FUTEXES
694  if (num_waiters_ > 0) {
695    sequence_++;  // Indicate the broadcast occurred.
696    bool done = false;
697    do {
698      int32_t cur_sequence = sequence_.LoadRelaxed();
699      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
700      // mutex unlocks will awaken the requeued waiter thread.
701      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
702                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
703                   guard_.state_.Address(), cur_sequence) != -1;
704      if (!done) {
705        if (errno != EAGAIN) {
706          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
707        }
708      }
709    } while (!done);
710  }
711#else
712  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
713#endif
714}
715
716void ConditionVariable::Signal(Thread* self) {
717  DCHECK(self == NULL || self == Thread::Current());
718  guard_.AssertExclusiveHeld(self);
719#if ART_USE_FUTEXES
720  if (num_waiters_ > 0) {
721    sequence_++;  // Indicate a signal occurred.
722    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
723    // to avoid this, however, requeueing can only move all waiters.
724    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
725    // Check something was woken or else we changed sequence_ before they had chance to wait.
726    CHECK((num_woken == 0) || (num_woken == 1));
727  }
728#else
729  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
730#endif
731}
732
733void ConditionVariable::Wait(Thread* self) {
734  guard_.CheckSafeToWait(self);
735  WaitHoldingLocks(self);
736}
737
738void ConditionVariable::WaitHoldingLocks(Thread* self) {
739  DCHECK(self == NULL || self == Thread::Current());
740  guard_.AssertExclusiveHeld(self);
741  unsigned int old_recursion_count = guard_.recursion_count_;
742#if ART_USE_FUTEXES
743  num_waiters_++;
744  // Ensure the Mutex is contended so that requeued threads are awoken.
745  guard_.num_contenders_++;
746  guard_.recursion_count_ = 1;
747  int32_t cur_sequence = sequence_.LoadRelaxed();
748  guard_.ExclusiveUnlock(self);
749  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
750    // Futex failed, check it is an expected error.
751    // EAGAIN == EWOULDBLK, so we let the caller try again.
752    // EINTR implies a signal was sent to this thread.
753    if ((errno != EINTR) && (errno != EAGAIN)) {
754      PLOG(FATAL) << "futex wait failed for " << name_;
755    }
756  }
757  guard_.ExclusiveLock(self);
758  CHECK_GE(num_waiters_, 0);
759  num_waiters_--;
760  // We awoke and so no longer require awakes from the guard_'s unlock.
761  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
762  guard_.num_contenders_--;
763#else
764  uint64_t old_owner = guard_.exclusive_owner_;
765  guard_.exclusive_owner_ = 0;
766  guard_.recursion_count_ = 0;
767  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
768  guard_.exclusive_owner_ = old_owner;
769#endif
770  guard_.recursion_count_ = old_recursion_count;
771}
772
773void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
774  DCHECK(self == NULL || self == Thread::Current());
775  guard_.AssertExclusiveHeld(self);
776  guard_.CheckSafeToWait(self);
777  unsigned int old_recursion_count = guard_.recursion_count_;
778#if ART_USE_FUTEXES
779  timespec rel_ts;
780  InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
781  num_waiters_++;
782  // Ensure the Mutex is contended so that requeued threads are awoken.
783  guard_.num_contenders_++;
784  guard_.recursion_count_ = 1;
785  int32_t cur_sequence = sequence_.LoadRelaxed();
786  guard_.ExclusiveUnlock(self);
787  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
788    if (errno == ETIMEDOUT) {
789      // Timed out we're done.
790    } else if ((errno == EAGAIN) || (errno == EINTR)) {
791      // A signal or ConditionVariable::Signal/Broadcast has come in.
792    } else {
793      PLOG(FATAL) << "timed futex wait failed for " << name_;
794    }
795  }
796  guard_.ExclusiveLock(self);
797  CHECK_GE(num_waiters_, 0);
798  num_waiters_--;
799  // We awoke and so no longer require awakes from the guard_'s unlock.
800  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
801  guard_.num_contenders_--;
802#else
803#if !defined(__APPLE__)
804  int clock = CLOCK_MONOTONIC;
805#else
806  int clock = CLOCK_REALTIME;
807#endif
808  uint64_t old_owner = guard_.exclusive_owner_;
809  guard_.exclusive_owner_ = 0;
810  guard_.recursion_count_ = 0;
811  timespec ts;
812  InitTimeSpec(true, clock, ms, ns, &ts);
813  int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
814  if (rc != 0 && rc != ETIMEDOUT) {
815    errno = rc;
816    PLOG(FATAL) << "TimedWait failed for " << name_;
817  }
818  guard_.exclusive_owner_ = old_owner;
819#endif
820  guard_.recursion_count_ = old_recursion_count;
821}
822
823void Locks::Init() {
824  if (logging_lock_ != nullptr) {
825    // Already initialized.
826    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
827      DCHECK(modify_ldt_lock_ != nullptr);
828    } else {
829      DCHECK(modify_ldt_lock_ == nullptr);
830    }
831    DCHECK(abort_lock_ != nullptr);
832    DCHECK(allocated_monitor_ids_lock_ != nullptr);
833    DCHECK(allocated_thread_ids_lock_ != nullptr);
834    DCHECK(breakpoint_lock_ != nullptr);
835    DCHECK(classlinker_classes_lock_ != nullptr);
836    DCHECK(heap_bitmap_lock_ != nullptr);
837    DCHECK(logging_lock_ != nullptr);
838    DCHECK(mutator_lock_ != nullptr);
839    DCHECK(thread_list_lock_ != nullptr);
840    DCHECK(thread_list_suspend_thread_lock_ != nullptr);
841    DCHECK(thread_suspend_count_lock_ != nullptr);
842    DCHECK(trace_lock_ != nullptr);
843    DCHECK(profiler_lock_ != nullptr);
844    DCHECK(unexpected_signal_lock_ != nullptr);
845    DCHECK(intern_table_lock_ != nullptr);
846  } else {
847    // Create global locks in level order from highest lock level to lowest.
848    LockLevel current_lock_level = kThreadListSuspendThreadLock;
849    DCHECK(thread_list_suspend_thread_lock_ == nullptr);
850    thread_list_suspend_thread_lock_ =
851        new Mutex("thread list suspend thread by .. lock", current_lock_level);
852
853    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
854      DCHECK_LT(new_level, current_lock_level); \
855      current_lock_level = new_level;
856
857    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
858    DCHECK(mutator_lock_ == nullptr);
859    mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
860
861    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
862    DCHECK(heap_bitmap_lock_ == nullptr);
863    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
864
865    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
866    DCHECK(runtime_shutdown_lock_ == nullptr);
867    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
868
869    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
870    DCHECK(profiler_lock_ == nullptr);
871    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
872
873    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
874    DCHECK(trace_lock_ == nullptr);
875    trace_lock_ = new Mutex("trace lock", current_lock_level);
876
877    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
878    DCHECK(thread_list_lock_ == nullptr);
879    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
880
881    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
882    DCHECK(breakpoint_lock_ == nullptr);
883    breakpoint_lock_ = new Mutex("breakpoint lock", current_lock_level);
884
885    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
886    DCHECK(classlinker_classes_lock_ == nullptr);
887    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
888                                                      current_lock_level);
889
890    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
891    DCHECK(allocated_monitor_ids_lock_ == nullptr);
892    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
893
894    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
895    DCHECK(allocated_thread_ids_lock_ == nullptr);
896    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
897
898    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
899      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
900      DCHECK(modify_ldt_lock_ == nullptr);
901      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
902    }
903
904    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
905    DCHECK(intern_table_lock_ == nullptr);
906    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
907
908
909    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
910    DCHECK(abort_lock_ == nullptr);
911    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
912
913    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
914    DCHECK(thread_suspend_count_lock_ == nullptr);
915    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
916
917    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
918    DCHECK(unexpected_signal_lock_ == nullptr);
919    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
920
921    UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
922    DCHECK(mem_maps_lock_ == nullptr);
923    mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
924
925    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
926    DCHECK(logging_lock_ == nullptr);
927    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
928
929    #undef UPDATE_CURRENT_LOCK_LEVEL
930  }
931}
932
933
934}  // namespace art
935