mutex.cc revision 1d6ee090fddd4bfd35c304d6ceb929d5c529dfcc
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mutex.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#include "atomic.h"
23#include "base/logging.h"
24#include "mutex-inl.h"
25#include "runtime.h"
26#include "scoped_thread_state_change.h"
27#include "thread-inl.h"
28#include "utils.h"
29
30namespace art {
31
32Mutex* Locks::abort_lock_ = nullptr;
33Mutex* Locks::alloc_tracker_lock_ = nullptr;
34Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
35Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
36ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
37ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
38Mutex* Locks::deoptimization_lock_ = nullptr;
39ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
40Mutex* Locks::intern_table_lock_ = nullptr;
41Mutex* Locks::logging_lock_ = nullptr;
42Mutex* Locks::mem_maps_lock_ = nullptr;
43Mutex* Locks::modify_ldt_lock_ = nullptr;
44ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
45Mutex* Locks::profiler_lock_ = nullptr;
46Mutex* Locks::reference_processor_lock_ = nullptr;
47Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
48Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
49Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
50Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
51Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
52Mutex* Locks::runtime_shutdown_lock_ = nullptr;
53Mutex* Locks::thread_list_lock_ = nullptr;
54Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
55Mutex* Locks::thread_suspend_count_lock_ = nullptr;
56Mutex* Locks::trace_lock_ = nullptr;
57Mutex* Locks::unexpected_signal_lock_ = nullptr;
58
59struct AllMutexData {
60  // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
61  Atomic<const BaseMutex*> all_mutexes_guard;
62  // All created mutexes guarded by all_mutexes_guard_.
63  std::set<BaseMutex*>* all_mutexes;
64  AllMutexData() : all_mutexes(NULL) {}
65};
66static struct AllMutexData gAllMutexData[kAllMutexDataSize];
67
68#if ART_USE_FUTEXES
69static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
70  const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
71  result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
72  result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
73  if (result_ts->tv_nsec < 0) {
74    result_ts->tv_sec--;
75    result_ts->tv_nsec += one_sec;
76  } else if (result_ts->tv_nsec > one_sec) {
77    result_ts->tv_sec++;
78    result_ts->tv_nsec -= one_sec;
79  }
80  return result_ts->tv_sec < 0;
81}
82#endif
83
84class ScopedAllMutexesLock {
85 public:
86  explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
87    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
88      NanoSleep(100);
89    }
90  }
91  ~ScopedAllMutexesLock() {
92    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
93      NanoSleep(100);
94    }
95  }
96 private:
97  const BaseMutex* const mutex_;
98};
99
100BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
101  if (kLogLockContentions) {
102    ScopedAllMutexesLock mu(this);
103    std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
104    if (*all_mutexes_ptr == NULL) {
105      // We leak the global set of all mutexes to avoid ordering issues in global variable
106      // construction/destruction.
107      *all_mutexes_ptr = new std::set<BaseMutex*>();
108    }
109    (*all_mutexes_ptr)->insert(this);
110  }
111}
112
113BaseMutex::~BaseMutex() {
114  if (kLogLockContentions) {
115    ScopedAllMutexesLock mu(this);
116    gAllMutexData->all_mutexes->erase(this);
117  }
118}
119
120void BaseMutex::DumpAll(std::ostream& os) {
121  if (kLogLockContentions) {
122    os << "Mutex logging:\n";
123    ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
124    std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
125    if (all_mutexes == NULL) {
126      // No mutexes have been created yet during at startup.
127      return;
128    }
129    typedef std::set<BaseMutex*>::const_iterator It;
130    os << "(Contended)\n";
131    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
132      BaseMutex* mutex = *it;
133      if (mutex->HasEverContended()) {
134        mutex->Dump(os);
135        os << "\n";
136      }
137    }
138    os << "(Never contented)\n";
139    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
140      BaseMutex* mutex = *it;
141      if (!mutex->HasEverContended()) {
142        mutex->Dump(os);
143        os << "\n";
144      }
145    }
146  }
147}
148
149void BaseMutex::CheckSafeToWait(Thread* self) {
150  if (self == NULL) {
151    CheckUnattachedThread(level_);
152    return;
153  }
154  if (kDebugLocking) {
155    CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
156        << "Waiting on unacquired mutex: " << name_;
157    bool bad_mutexes_held = false;
158    for (int i = kLockLevelCount - 1; i >= 0; --i) {
159      if (i != level_) {
160        BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
161        // We expect waits to happen while holding the thread list suspend thread lock.
162        if (held_mutex != NULL && i != kThreadListSuspendThreadLock) {
163          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
164                     << "(level " << LockLevel(i) << ") while performing wait on "
165                     << "\"" << name_ << "\" (level " << level_ << ")";
166          bad_mutexes_held = true;
167        }
168      }
169    }
170    CHECK(!bad_mutexes_held);
171  }
172}
173
174void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
175  if (kLogLockContentions) {
176    // Atomically add value to wait_time.
177    wait_time.FetchAndAddSequentiallyConsistent(value);
178  }
179}
180
181void BaseMutex::RecordContention(uint64_t blocked_tid,
182                                 uint64_t owner_tid,
183                                 uint64_t nano_time_blocked) {
184  if (kLogLockContentions) {
185    ContentionLogData* data = contention_log_data_;
186    ++(data->contention_count);
187    data->AddToWaitTime(nano_time_blocked);
188    ContentionLogEntry* log = data->contention_log;
189    // This code is intentionally racy as it is only used for diagnostics.
190    uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
191    if (log[slot].blocked_tid == blocked_tid &&
192        log[slot].owner_tid == blocked_tid) {
193      ++log[slot].count;
194    } else {
195      uint32_t new_slot;
196      do {
197        slot = data->cur_content_log_entry.LoadRelaxed();
198        new_slot = (slot + 1) % kContentionLogSize;
199      } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
200      log[new_slot].blocked_tid = blocked_tid;
201      log[new_slot].owner_tid = owner_tid;
202      log[new_slot].count.StoreRelaxed(1);
203    }
204  }
205}
206
207void BaseMutex::DumpContention(std::ostream& os) const {
208  if (kLogLockContentions) {
209    const ContentionLogData* data = contention_log_data_;
210    const ContentionLogEntry* log = data->contention_log;
211    uint64_t wait_time = data->wait_time.LoadRelaxed();
212    uint32_t contention_count = data->contention_count.LoadRelaxed();
213    if (contention_count == 0) {
214      os << "never contended";
215    } else {
216      os << "contended " << contention_count
217         << " total wait of contender " << PrettyDuration(wait_time)
218         << " average " << PrettyDuration(wait_time / contention_count);
219      SafeMap<uint64_t, size_t> most_common_blocker;
220      SafeMap<uint64_t, size_t> most_common_blocked;
221      for (size_t i = 0; i < kContentionLogSize; ++i) {
222        uint64_t blocked_tid = log[i].blocked_tid;
223        uint64_t owner_tid = log[i].owner_tid;
224        uint32_t count = log[i].count.LoadRelaxed();
225        if (count > 0) {
226          auto it = most_common_blocked.find(blocked_tid);
227          if (it != most_common_blocked.end()) {
228            most_common_blocked.Overwrite(blocked_tid, it->second + count);
229          } else {
230            most_common_blocked.Put(blocked_tid, count);
231          }
232          it = most_common_blocker.find(owner_tid);
233          if (it != most_common_blocker.end()) {
234            most_common_blocker.Overwrite(owner_tid, it->second + count);
235          } else {
236            most_common_blocker.Put(owner_tid, count);
237          }
238        }
239      }
240      uint64_t max_tid = 0;
241      size_t max_tid_count = 0;
242      for (const auto& pair : most_common_blocked) {
243        if (pair.second > max_tid_count) {
244          max_tid = pair.first;
245          max_tid_count = pair.second;
246        }
247      }
248      if (max_tid != 0) {
249        os << " sample shows most blocked tid=" << max_tid;
250      }
251      max_tid = 0;
252      max_tid_count = 0;
253      for (const auto& pair : most_common_blocker) {
254        if (pair.second > max_tid_count) {
255          max_tid = pair.first;
256          max_tid_count = pair.second;
257        }
258      }
259      if (max_tid != 0) {
260        os << " sample shows tid=" << max_tid << " owning during this time";
261      }
262    }
263  }
264}
265
266
267Mutex::Mutex(const char* name, LockLevel level, bool recursive)
268    : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
269#if ART_USE_FUTEXES
270  DCHECK_EQ(0, state_.LoadRelaxed());
271  DCHECK_EQ(0, num_contenders_.LoadRelaxed());
272#else
273  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
274#endif
275  exclusive_owner_ = 0;
276}
277
278Mutex::~Mutex() {
279#if ART_USE_FUTEXES
280  if (state_.LoadRelaxed() != 0) {
281    Runtime* runtime = Runtime::Current();
282    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
283    LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
284  } else {
285    CHECK_EQ(exclusive_owner_, 0U)  << "unexpectedly found an owner on unlocked mutex " << name_;
286    CHECK_EQ(num_contenders_.LoadSequentiallyConsistent(), 0)
287        << "unexpectedly found a contender on mutex " << name_;
288  }
289#else
290  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
291  // may still be using locks.
292  int rc = pthread_mutex_destroy(&mutex_);
293  if (rc != 0) {
294    errno = rc;
295    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
296    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
297    Runtime* runtime = Runtime::Current();
298    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
299    PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
300  }
301#endif
302}
303
304void Mutex::ExclusiveLock(Thread* self) {
305  DCHECK(self == NULL || self == Thread::Current());
306  if (kDebugLocking && !recursive_) {
307    AssertNotHeld(self);
308  }
309  if (!recursive_ || !IsExclusiveHeld(self)) {
310#if ART_USE_FUTEXES
311    bool done = false;
312    do {
313      int32_t cur_state = state_.LoadRelaxed();
314      if (LIKELY(cur_state == 0)) {
315        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
316        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
317      } else {
318        // Failed to acquire, hang up.
319        ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
320        num_contenders_++;
321        if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
322          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
323          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
324          if ((errno != EAGAIN) && (errno != EINTR)) {
325            PLOG(FATAL) << "futex wait failed for " << name_;
326          }
327        }
328        num_contenders_--;
329      }
330    } while (!done);
331    DCHECK_EQ(state_.LoadRelaxed(), 1);
332#else
333    CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
334#endif
335    DCHECK_EQ(exclusive_owner_, 0U);
336    exclusive_owner_ = SafeGetTid(self);
337    RegisterAsLocked(self);
338  }
339  recursion_count_++;
340  if (kDebugLocking) {
341    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
342        << name_ << " " << recursion_count_;
343    AssertHeld(self);
344  }
345}
346
347bool Mutex::ExclusiveTryLock(Thread* self) {
348  DCHECK(self == NULL || self == Thread::Current());
349  if (kDebugLocking && !recursive_) {
350    AssertNotHeld(self);
351  }
352  if (!recursive_ || !IsExclusiveHeld(self)) {
353#if ART_USE_FUTEXES
354    bool done = false;
355    do {
356      int32_t cur_state = state_.LoadRelaxed();
357      if (cur_state == 0) {
358        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
359        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
360      } else {
361        return false;
362      }
363    } while (!done);
364    DCHECK_EQ(state_.LoadRelaxed(), 1);
365#else
366    int result = pthread_mutex_trylock(&mutex_);
367    if (result == EBUSY) {
368      return false;
369    }
370    if (result != 0) {
371      errno = result;
372      PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
373    }
374#endif
375    DCHECK_EQ(exclusive_owner_, 0U);
376    exclusive_owner_ = SafeGetTid(self);
377    RegisterAsLocked(self);
378  }
379  recursion_count_++;
380  if (kDebugLocking) {
381    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
382        << name_ << " " << recursion_count_;
383    AssertHeld(self);
384  }
385  return true;
386}
387
388void Mutex::ExclusiveUnlock(Thread* self) {
389  DCHECK(self == NULL || self == Thread::Current());
390  AssertHeld(self);
391  DCHECK_NE(exclusive_owner_, 0U);
392  recursion_count_--;
393  if (!recursive_ || recursion_count_ == 0) {
394    if (kDebugLocking) {
395      CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
396          << name_ << " " << recursion_count_;
397    }
398    RegisterAsUnlocked(self);
399#if ART_USE_FUTEXES
400    bool done = false;
401    do {
402      int32_t cur_state = state_.LoadRelaxed();
403      if (LIKELY(cur_state == 1)) {
404        // We're no longer the owner.
405        exclusive_owner_ = 0;
406        // Change state to 0 and impose load/store ordering appropriate for lock release.
407        // Note, the relaxed loads below musn't reorder before the CompareExchange.
408        // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
409        // a status bit into the state on contention.
410        done =  state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */);
411        if (LIKELY(done)) {  // Spurious fail?
412          // Wake a contender.
413          if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
414            futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
415          }
416        }
417      } else {
418        // Logging acquires the logging lock, avoid infinite recursion in that case.
419        if (this != Locks::logging_lock_) {
420          LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
421        } else {
422          LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
423          LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
424                                                 cur_state, name_).c_str());
425          _exit(1);
426        }
427      }
428    } while (!done);
429#else
430    exclusive_owner_ = 0;
431    CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
432#endif
433  }
434}
435
436void Mutex::Dump(std::ostream& os) const {
437  os << (recursive_ ? "recursive " : "non-recursive ")
438      << name_
439      << " level=" << static_cast<int>(level_)
440      << " rec=" << recursion_count_
441      << " owner=" << GetExclusiveOwnerTid() << " ";
442  DumpContention(os);
443}
444
445std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
446  mu.Dump(os);
447  return os;
448}
449
450ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
451    : BaseMutex(name, level)
452#if ART_USE_FUTEXES
453    , state_(0), num_pending_readers_(0), num_pending_writers_(0)
454#endif
455{  // NOLINT(whitespace/braces)
456#if !ART_USE_FUTEXES
457  CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
458#endif
459  exclusive_owner_ = 0;
460}
461
462ReaderWriterMutex::~ReaderWriterMutex() {
463#if ART_USE_FUTEXES
464  CHECK_EQ(state_.LoadRelaxed(), 0);
465  CHECK_EQ(exclusive_owner_, 0U);
466  CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0);
467  CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
468#else
469  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
470  // may still be using locks.
471  int rc = pthread_rwlock_destroy(&rwlock_);
472  if (rc != 0) {
473    errno = rc;
474    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
475    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
476    Runtime* runtime = Runtime::Current();
477    bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
478    PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
479  }
480#endif
481}
482
483void ReaderWriterMutex::ExclusiveLock(Thread* self) {
484  DCHECK(self == NULL || self == Thread::Current());
485  AssertNotExclusiveHeld(self);
486#if ART_USE_FUTEXES
487  bool done = false;
488  do {
489    int32_t cur_state = state_.LoadRelaxed();
490    if (LIKELY(cur_state == 0)) {
491      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
492      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */);
493    } else {
494      // Failed to acquire, hang up.
495      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
496      ++num_pending_writers_;
497      if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
498        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
499        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
500        if ((errno != EAGAIN) && (errno != EINTR)) {
501          PLOG(FATAL) << "futex wait failed for " << name_;
502        }
503      }
504      --num_pending_writers_;
505    }
506  } while (!done);
507  DCHECK_EQ(state_.LoadRelaxed(), -1);
508#else
509  CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
510#endif
511  DCHECK_EQ(exclusive_owner_, 0U);
512  exclusive_owner_ = SafeGetTid(self);
513  RegisterAsLocked(self);
514  AssertExclusiveHeld(self);
515}
516
517void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
518  DCHECK(self == NULL || self == Thread::Current());
519  AssertExclusiveHeld(self);
520  RegisterAsUnlocked(self);
521  DCHECK_NE(exclusive_owner_, 0U);
522#if ART_USE_FUTEXES
523  bool done = false;
524  do {
525    int32_t cur_state = state_.LoadRelaxed();
526    if (LIKELY(cur_state == -1)) {
527      // We're no longer the owner.
528      exclusive_owner_ = 0;
529      // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
530      // Note, the relaxed loads below musn't reorder before the CompareExchange.
531      // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
532      // a status bit into the state on contention.
533      done =  state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
534      if (LIKELY(done)) {  // Weak CAS may fail spuriously.
535        // Wake any waiters.
536        if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
537                     num_pending_writers_.LoadRelaxed() > 0)) {
538          futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
539        }
540      }
541    } else {
542      LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
543    }
544  } while (!done);
545#else
546  exclusive_owner_ = 0;
547  CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
548#endif
549}
550
551#if HAVE_TIMED_RWLOCK
552bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
553  DCHECK(self == NULL || self == Thread::Current());
554#if ART_USE_FUTEXES
555  bool done = false;
556  timespec end_abs_ts;
557  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts);
558  do {
559    int32_t cur_state = state_.LoadRelaxed();
560    if (cur_state == 0) {
561      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
562      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */);
563    } else {
564      // Failed to acquire, hang up.
565      timespec now_abs_ts;
566      InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts);
567      timespec rel_ts;
568      if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
569        return false;  // Timed out.
570      }
571      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
572      ++num_pending_writers_;
573      if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
574        if (errno == ETIMEDOUT) {
575          --num_pending_writers_;
576          return false;  // Timed out.
577        } else if ((errno != EAGAIN) && (errno != EINTR)) {
578          // EAGAIN and EINTR both indicate a spurious failure,
579          // recompute the relative time out from now and try again.
580          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
581          PLOG(FATAL) << "timed futex wait failed for " << name_;
582        }
583      }
584      --num_pending_writers_;
585    }
586  } while (!done);
587#else
588  timespec ts;
589  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
590  int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
591  if (result == ETIMEDOUT) {
592    return false;
593  }
594  if (result != 0) {
595    errno = result;
596    PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
597  }
598#endif
599  exclusive_owner_ = SafeGetTid(self);
600  RegisterAsLocked(self);
601  AssertSharedHeld(self);
602  return true;
603}
604#endif
605
606bool ReaderWriterMutex::SharedTryLock(Thread* self) {
607  DCHECK(self == NULL || self == Thread::Current());
608#if ART_USE_FUTEXES
609  bool done = false;
610  do {
611    int32_t cur_state = state_.LoadRelaxed();
612    if (cur_state >= 0) {
613      // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
614      done =  state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
615    } else {
616      // Owner holds it exclusively.
617      return false;
618    }
619  } while (!done);
620#else
621  int result = pthread_rwlock_tryrdlock(&rwlock_);
622  if (result == EBUSY) {
623    return false;
624  }
625  if (result != 0) {
626    errno = result;
627    PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
628  }
629#endif
630  RegisterAsLocked(self);
631  AssertSharedHeld(self);
632  return true;
633}
634
635bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
636  DCHECK(self == NULL || self == Thread::Current());
637  bool result;
638  if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
639    result = IsExclusiveHeld(self);  // TODO: a better best effort here.
640  } else {
641    result = (self->GetHeldMutex(level_) == this);
642  }
643  return result;
644}
645
646void ReaderWriterMutex::Dump(std::ostream& os) const {
647  os << name_
648      << " level=" << static_cast<int>(level_)
649      << " owner=" << GetExclusiveOwnerTid() << " ";
650  DumpContention(os);
651}
652
653std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
654  mu.Dump(os);
655  return os;
656}
657
658ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
659    : name_(name), guard_(guard) {
660#if ART_USE_FUTEXES
661  DCHECK_EQ(0, sequence_.LoadRelaxed());
662  num_waiters_ = 0;
663#else
664  pthread_condattr_t cond_attrs;
665  CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
666#if !defined(__APPLE__)
667  // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
668  CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
669#endif
670  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
671#endif
672}
673
674ConditionVariable::~ConditionVariable() {
675#if ART_USE_FUTEXES
676  if (num_waiters_!= 0) {
677    Runtime* runtime = Runtime::Current();
678    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
679    LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
680        << " called with " << num_waiters_ << " waiters.";
681  }
682#else
683  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
684  // may still be using condition variables.
685  int rc = pthread_cond_destroy(&cond_);
686  if (rc != 0) {
687    errno = rc;
688    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
689    Runtime* runtime = Runtime::Current();
690    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
691    PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
692  }
693#endif
694}
695
696void ConditionVariable::Broadcast(Thread* self) {
697  DCHECK(self == NULL || self == Thread::Current());
698  // TODO: enable below, there's a race in thread creation that causes false failures currently.
699  // guard_.AssertExclusiveHeld(self);
700  DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
701#if ART_USE_FUTEXES
702  if (num_waiters_ > 0) {
703    sequence_++;  // Indicate the broadcast occurred.
704    bool done = false;
705    do {
706      int32_t cur_sequence = sequence_.LoadRelaxed();
707      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
708      // mutex unlocks will awaken the requeued waiter thread.
709      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
710                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
711                   guard_.state_.Address(), cur_sequence) != -1;
712      if (!done) {
713        if (errno != EAGAIN) {
714          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
715        }
716      }
717    } while (!done);
718  }
719#else
720  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
721#endif
722}
723
724void ConditionVariable::Signal(Thread* self) {
725  DCHECK(self == NULL || self == Thread::Current());
726  guard_.AssertExclusiveHeld(self);
727#if ART_USE_FUTEXES
728  if (num_waiters_ > 0) {
729    sequence_++;  // Indicate a signal occurred.
730    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
731    // to avoid this, however, requeueing can only move all waiters.
732    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
733    // Check something was woken or else we changed sequence_ before they had chance to wait.
734    CHECK((num_woken == 0) || (num_woken == 1));
735  }
736#else
737  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
738#endif
739}
740
741void ConditionVariable::Wait(Thread* self) {
742  guard_.CheckSafeToWait(self);
743  WaitHoldingLocks(self);
744}
745
746void ConditionVariable::WaitHoldingLocks(Thread* self) {
747  DCHECK(self == NULL || self == Thread::Current());
748  guard_.AssertExclusiveHeld(self);
749  unsigned int old_recursion_count = guard_.recursion_count_;
750#if ART_USE_FUTEXES
751  num_waiters_++;
752  // Ensure the Mutex is contended so that requeued threads are awoken.
753  guard_.num_contenders_++;
754  guard_.recursion_count_ = 1;
755  int32_t cur_sequence = sequence_.LoadRelaxed();
756  guard_.ExclusiveUnlock(self);
757  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
758    // Futex failed, check it is an expected error.
759    // EAGAIN == EWOULDBLK, so we let the caller try again.
760    // EINTR implies a signal was sent to this thread.
761    if ((errno != EINTR) && (errno != EAGAIN)) {
762      PLOG(FATAL) << "futex wait failed for " << name_;
763    }
764  }
765  guard_.ExclusiveLock(self);
766  CHECK_GE(num_waiters_, 0);
767  num_waiters_--;
768  // We awoke and so no longer require awakes from the guard_'s unlock.
769  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
770  guard_.num_contenders_--;
771#else
772  uint64_t old_owner = guard_.exclusive_owner_;
773  guard_.exclusive_owner_ = 0;
774  guard_.recursion_count_ = 0;
775  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
776  guard_.exclusive_owner_ = old_owner;
777#endif
778  guard_.recursion_count_ = old_recursion_count;
779}
780
781void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
782  DCHECK(self == NULL || self == Thread::Current());
783  guard_.AssertExclusiveHeld(self);
784  guard_.CheckSafeToWait(self);
785  unsigned int old_recursion_count = guard_.recursion_count_;
786#if ART_USE_FUTEXES
787  timespec rel_ts;
788  InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
789  num_waiters_++;
790  // Ensure the Mutex is contended so that requeued threads are awoken.
791  guard_.num_contenders_++;
792  guard_.recursion_count_ = 1;
793  int32_t cur_sequence = sequence_.LoadRelaxed();
794  guard_.ExclusiveUnlock(self);
795  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
796    if (errno == ETIMEDOUT) {
797      // Timed out we're done.
798    } else if ((errno == EAGAIN) || (errno == EINTR)) {
799      // A signal or ConditionVariable::Signal/Broadcast has come in.
800    } else {
801      PLOG(FATAL) << "timed futex wait failed for " << name_;
802    }
803  }
804  guard_.ExclusiveLock(self);
805  CHECK_GE(num_waiters_, 0);
806  num_waiters_--;
807  // We awoke and so no longer require awakes from the guard_'s unlock.
808  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
809  guard_.num_contenders_--;
810#else
811#if !defined(__APPLE__)
812  int clock = CLOCK_MONOTONIC;
813#else
814  int clock = CLOCK_REALTIME;
815#endif
816  uint64_t old_owner = guard_.exclusive_owner_;
817  guard_.exclusive_owner_ = 0;
818  guard_.recursion_count_ = 0;
819  timespec ts;
820  InitTimeSpec(true, clock, ms, ns, &ts);
821  int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
822  if (rc != 0 && rc != ETIMEDOUT) {
823    errno = rc;
824    PLOG(FATAL) << "TimedWait failed for " << name_;
825  }
826  guard_.exclusive_owner_ = old_owner;
827#endif
828  guard_.recursion_count_ = old_recursion_count;
829}
830
831void Locks::Init() {
832  if (logging_lock_ != nullptr) {
833    // Already initialized.
834    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
835      DCHECK(modify_ldt_lock_ != nullptr);
836    } else {
837      DCHECK(modify_ldt_lock_ == nullptr);
838    }
839    DCHECK(abort_lock_ != nullptr);
840    DCHECK(alloc_tracker_lock_ != nullptr);
841    DCHECK(allocated_monitor_ids_lock_ != nullptr);
842    DCHECK(allocated_thread_ids_lock_ != nullptr);
843    DCHECK(breakpoint_lock_ != nullptr);
844    DCHECK(classlinker_classes_lock_ != nullptr);
845    DCHECK(deoptimization_lock_ != nullptr);
846    DCHECK(heap_bitmap_lock_ != nullptr);
847    DCHECK(intern_table_lock_ != nullptr);
848    DCHECK(logging_lock_ != nullptr);
849    DCHECK(mutator_lock_ != nullptr);
850    DCHECK(profiler_lock_ != nullptr);
851    DCHECK(thread_list_lock_ != nullptr);
852    DCHECK(thread_list_suspend_thread_lock_ != nullptr);
853    DCHECK(thread_suspend_count_lock_ != nullptr);
854    DCHECK(trace_lock_ != nullptr);
855    DCHECK(unexpected_signal_lock_ != nullptr);
856  } else {
857    // Create global locks in level order from highest lock level to lowest.
858    LockLevel current_lock_level = kThreadListSuspendThreadLock;
859    DCHECK(thread_list_suspend_thread_lock_ == nullptr);
860    thread_list_suspend_thread_lock_ =
861        new Mutex("thread list suspend thread by .. lock", current_lock_level);
862
863    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
864      if (new_level >= current_lock_level) { \
865        /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
866        fprintf(stderr, "New local level %d is not less than current level %d\n", \
867                new_level, current_lock_level); \
868        exit(1); \
869      } \
870      current_lock_level = new_level;
871
872    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
873    DCHECK(mutator_lock_ == nullptr);
874    mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
875
876    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
877    DCHECK(heap_bitmap_lock_ == nullptr);
878    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
879
880    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
881    DCHECK(trace_lock_ == nullptr);
882    trace_lock_ = new Mutex("trace lock", current_lock_level);
883
884    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
885    DCHECK(runtime_shutdown_lock_ == nullptr);
886    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
887
888    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
889    DCHECK(profiler_lock_ == nullptr);
890    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
891
892    UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
893    DCHECK(deoptimization_lock_ == nullptr);
894    deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
895
896    UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
897    DCHECK(alloc_tracker_lock_ == nullptr);
898    alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
899
900    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
901    DCHECK(thread_list_lock_ == nullptr);
902    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
903
904    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
905    DCHECK(breakpoint_lock_ == nullptr);
906    breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
907
908    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
909    DCHECK(classlinker_classes_lock_ == nullptr);
910    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
911                                                      current_lock_level);
912
913    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
914    DCHECK(allocated_monitor_ids_lock_ == nullptr);
915    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
916
917    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
918    DCHECK(allocated_thread_ids_lock_ == nullptr);
919    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
920
921    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
922      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
923      DCHECK(modify_ldt_lock_ == nullptr);
924      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
925    }
926
927    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
928    DCHECK(intern_table_lock_ == nullptr);
929    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
930
931    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
932    DCHECK(reference_processor_lock_ == nullptr);
933    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
934
935    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
936    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
937    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
938
939    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
940    DCHECK(reference_queue_weak_references_lock_ == nullptr);
941    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
942
943    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
944    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
945    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
946
947    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
948    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
949    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
950
951    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
952    DCHECK(reference_queue_soft_references_lock_ == nullptr);
953    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
954
955    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
956    DCHECK(abort_lock_ == nullptr);
957    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
958
959    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
960    DCHECK(thread_suspend_count_lock_ == nullptr);
961    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
962
963    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
964    DCHECK(unexpected_signal_lock_ == nullptr);
965    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
966
967    UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
968    DCHECK(mem_maps_lock_ == nullptr);
969    mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
970
971    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
972    DCHECK(logging_lock_ == nullptr);
973    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
974
975    #undef UPDATE_CURRENT_LOCK_LEVEL
976  }
977}
978
979
980}  // namespace art
981