mutex.cc revision eb0a179508f3c0533dd7db86ec7ab9dfa3773256
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mutex.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#define ATRACE_TAG ATRACE_TAG_DALVIK
23#include "cutils/trace.h"
24
25#include "atomic.h"
26#include "base/logging.h"
27#include "base/value_object.h"
28#include "mutex-inl.h"
29#include "runtime.h"
30#include "scoped_thread_state_change.h"
31#include "thread-inl.h"
32#include "utils.h"
33
34namespace art {
35
36Mutex* Locks::abort_lock_ = nullptr;
37Mutex* Locks::alloc_tracker_lock_ = nullptr;
38Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
39Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
40ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
41ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
42Mutex* Locks::deoptimization_lock_ = nullptr;
43ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
44Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
45Mutex* Locks::intern_table_lock_ = nullptr;
46Mutex* Locks::jni_libraries_lock_ = nullptr;
47Mutex* Locks::logging_lock_ = nullptr;
48Mutex* Locks::mem_maps_lock_ = nullptr;
49Mutex* Locks::modify_ldt_lock_ = nullptr;
50ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
51Mutex* Locks::profiler_lock_ = nullptr;
52Mutex* Locks::reference_processor_lock_ = nullptr;
53Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
54Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
55Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
56Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
57Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
58Mutex* Locks::runtime_shutdown_lock_ = nullptr;
59Mutex* Locks::thread_list_lock_ = nullptr;
60Mutex* Locks::thread_suspend_count_lock_ = nullptr;
61Mutex* Locks::trace_lock_ = nullptr;
62Mutex* Locks::unexpected_signal_lock_ = nullptr;
63
64struct AllMutexData {
65  // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
66  Atomic<const BaseMutex*> all_mutexes_guard;
67  // All created mutexes guarded by all_mutexes_guard_.
68  std::set<BaseMutex*>* all_mutexes;
69  AllMutexData() : all_mutexes(NULL) {}
70};
71static struct AllMutexData gAllMutexData[kAllMutexDataSize];
72
73#if ART_USE_FUTEXES
74static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
75  const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
76  result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
77  result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
78  if (result_ts->tv_nsec < 0) {
79    result_ts->tv_sec--;
80    result_ts->tv_nsec += one_sec;
81  } else if (result_ts->tv_nsec > one_sec) {
82    result_ts->tv_sec++;
83    result_ts->tv_nsec -= one_sec;
84  }
85  return result_ts->tv_sec < 0;
86}
87#endif
88
89class ScopedAllMutexesLock FINAL {
90 public:
91  explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
92    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
93      NanoSleep(100);
94    }
95  }
96
97  ~ScopedAllMutexesLock() {
98#if !defined(__clang__)
99    // TODO: remove this workaround target GCC/libc++/bionic bug "invalid failure memory model".
100    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakSequentiallyConsistent(mutex_, 0)) {
101#else
102    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
103#endif
104      NanoSleep(100);
105    }
106  }
107
108 private:
109  const BaseMutex* const mutex_;
110};
111
112// Scoped class that generates events at the beginning and end of lock contention.
113class ScopedContentionRecorder FINAL : public ValueObject {
114 public:
115  ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
116      : mutex_(kLogLockContentions ? mutex : NULL),
117        blocked_tid_(kLogLockContentions ? blocked_tid : 0),
118        owner_tid_(kLogLockContentions ? owner_tid : 0),
119        start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
120    if (ATRACE_ENABLED()) {
121      std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
122                                     mutex->GetName(), owner_tid);
123      ATRACE_BEGIN(msg.c_str());
124    }
125  }
126
127  ~ScopedContentionRecorder() {
128    ATRACE_END();
129    if (kLogLockContentions) {
130      uint64_t end_nano_time = NanoTime();
131      mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
132    }
133  }
134
135 private:
136  BaseMutex* const mutex_;
137  const uint64_t blocked_tid_;
138  const uint64_t owner_tid_;
139  const uint64_t start_nano_time_;
140};
141
142BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
143  if (kLogLockContentions) {
144    ScopedAllMutexesLock mu(this);
145    std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
146    if (*all_mutexes_ptr == NULL) {
147      // We leak the global set of all mutexes to avoid ordering issues in global variable
148      // construction/destruction.
149      *all_mutexes_ptr = new std::set<BaseMutex*>();
150    }
151    (*all_mutexes_ptr)->insert(this);
152  }
153}
154
155BaseMutex::~BaseMutex() {
156  if (kLogLockContentions) {
157    ScopedAllMutexesLock mu(this);
158    gAllMutexData->all_mutexes->erase(this);
159  }
160}
161
162void BaseMutex::DumpAll(std::ostream& os) {
163  if (kLogLockContentions) {
164    os << "Mutex logging:\n";
165    ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
166    std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
167    if (all_mutexes == NULL) {
168      // No mutexes have been created yet during at startup.
169      return;
170    }
171    typedef std::set<BaseMutex*>::const_iterator It;
172    os << "(Contended)\n";
173    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
174      BaseMutex* mutex = *it;
175      if (mutex->HasEverContended()) {
176        mutex->Dump(os);
177        os << "\n";
178      }
179    }
180    os << "(Never contented)\n";
181    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
182      BaseMutex* mutex = *it;
183      if (!mutex->HasEverContended()) {
184        mutex->Dump(os);
185        os << "\n";
186      }
187    }
188  }
189}
190
191void BaseMutex::CheckSafeToWait(Thread* self) {
192  if (self == NULL) {
193    CheckUnattachedThread(level_);
194    return;
195  }
196  if (kDebugLocking) {
197    CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
198        << "Waiting on unacquired mutex: " << name_;
199    bool bad_mutexes_held = false;
200    for (int i = kLockLevelCount - 1; i >= 0; --i) {
201      if (i != level_) {
202        BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
203        // We expect waits to happen while holding the thread list suspend thread lock.
204        if (held_mutex != NULL) {
205          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
206                     << "(level " << LockLevel(i) << ") while performing wait on "
207                     << "\"" << name_ << "\" (level " << level_ << ")";
208          bad_mutexes_held = true;
209        }
210      }
211    }
212    if (gAborting == 0) {  // Avoid recursive aborts.
213      CHECK(!bad_mutexes_held);
214    }
215  }
216}
217
218void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
219  if (kLogLockContentions) {
220    // Atomically add value to wait_time.
221    wait_time.FetchAndAddSequentiallyConsistent(value);
222  }
223}
224
225void BaseMutex::RecordContention(uint64_t blocked_tid,
226                                 uint64_t owner_tid,
227                                 uint64_t nano_time_blocked) {
228  if (kLogLockContentions) {
229    ContentionLogData* data = contention_log_data_;
230    ++(data->contention_count);
231    data->AddToWaitTime(nano_time_blocked);
232    ContentionLogEntry* log = data->contention_log;
233    // This code is intentionally racy as it is only used for diagnostics.
234    uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
235    if (log[slot].blocked_tid == blocked_tid &&
236        log[slot].owner_tid == blocked_tid) {
237      ++log[slot].count;
238    } else {
239      uint32_t new_slot;
240      do {
241        slot = data->cur_content_log_entry.LoadRelaxed();
242        new_slot = (slot + 1) % kContentionLogSize;
243      } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
244      log[new_slot].blocked_tid = blocked_tid;
245      log[new_slot].owner_tid = owner_tid;
246      log[new_slot].count.StoreRelaxed(1);
247    }
248  }
249}
250
251void BaseMutex::DumpContention(std::ostream& os) const {
252  if (kLogLockContentions) {
253    const ContentionLogData* data = contention_log_data_;
254    const ContentionLogEntry* log = data->contention_log;
255    uint64_t wait_time = data->wait_time.LoadRelaxed();
256    uint32_t contention_count = data->contention_count.LoadRelaxed();
257    if (contention_count == 0) {
258      os << "never contended";
259    } else {
260      os << "contended " << contention_count
261         << " total wait of contender " << PrettyDuration(wait_time)
262         << " average " << PrettyDuration(wait_time / contention_count);
263      SafeMap<uint64_t, size_t> most_common_blocker;
264      SafeMap<uint64_t, size_t> most_common_blocked;
265      for (size_t i = 0; i < kContentionLogSize; ++i) {
266        uint64_t blocked_tid = log[i].blocked_tid;
267        uint64_t owner_tid = log[i].owner_tid;
268        uint32_t count = log[i].count.LoadRelaxed();
269        if (count > 0) {
270          auto it = most_common_blocked.find(blocked_tid);
271          if (it != most_common_blocked.end()) {
272            most_common_blocked.Overwrite(blocked_tid, it->second + count);
273          } else {
274            most_common_blocked.Put(blocked_tid, count);
275          }
276          it = most_common_blocker.find(owner_tid);
277          if (it != most_common_blocker.end()) {
278            most_common_blocker.Overwrite(owner_tid, it->second + count);
279          } else {
280            most_common_blocker.Put(owner_tid, count);
281          }
282        }
283      }
284      uint64_t max_tid = 0;
285      size_t max_tid_count = 0;
286      for (const auto& pair : most_common_blocked) {
287        if (pair.second > max_tid_count) {
288          max_tid = pair.first;
289          max_tid_count = pair.second;
290        }
291      }
292      if (max_tid != 0) {
293        os << " sample shows most blocked tid=" << max_tid;
294      }
295      max_tid = 0;
296      max_tid_count = 0;
297      for (const auto& pair : most_common_blocker) {
298        if (pair.second > max_tid_count) {
299          max_tid = pair.first;
300          max_tid_count = pair.second;
301        }
302      }
303      if (max_tid != 0) {
304        os << " sample shows tid=" << max_tid << " owning during this time";
305      }
306    }
307  }
308}
309
310
311Mutex::Mutex(const char* name, LockLevel level, bool recursive)
312    : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
313#if ART_USE_FUTEXES
314  DCHECK_EQ(0, state_.LoadRelaxed());
315  DCHECK_EQ(0, num_contenders_.LoadRelaxed());
316#else
317  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
318#endif
319  exclusive_owner_ = 0;
320}
321
322Mutex::~Mutex() {
323#if ART_USE_FUTEXES
324  if (state_.LoadRelaxed() != 0) {
325    Runtime* runtime = Runtime::Current();
326    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
327    LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
328  } else {
329    CHECK_EQ(exclusive_owner_, 0U)  << "unexpectedly found an owner on unlocked mutex " << name_;
330    if (level_ != kMonitorLock) {
331      // Only check the lock level for non monitor locks since we may still have java threads
332      // waiting on monitors.
333      CHECK_EQ(num_contenders_.LoadSequentiallyConsistent(), 0)
334          << "unexpectedly found a contender on mutex " << name_;
335    }
336  }
337#else
338  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
339  // may still be using locks.
340  int rc = pthread_mutex_destroy(&mutex_);
341  if (rc != 0) {
342    errno = rc;
343    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
344    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
345    Runtime* runtime = Runtime::Current();
346    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
347    PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
348  }
349#endif
350}
351
352void Mutex::ExclusiveLock(Thread* self) {
353  DCHECK(self == NULL || self == Thread::Current());
354  if (kDebugLocking && !recursive_) {
355    AssertNotHeld(self);
356  }
357  if (!recursive_ || !IsExclusiveHeld(self)) {
358#if ART_USE_FUTEXES
359    bool done = false;
360    do {
361      int32_t cur_state = state_.LoadRelaxed();
362      if (LIKELY(cur_state == 0)) {
363        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
364        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
365      } else {
366        // Failed to acquire, hang up.
367        ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
368        num_contenders_++;
369        if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
370          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
371          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
372          if ((errno != EAGAIN) && (errno != EINTR)) {
373            PLOG(FATAL) << "futex wait failed for " << name_;
374          }
375        }
376        num_contenders_--;
377      }
378    } while (!done);
379    DCHECK_EQ(state_.LoadRelaxed(), 1);
380#else
381    CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
382#endif
383    DCHECK_EQ(exclusive_owner_, 0U);
384    exclusive_owner_ = SafeGetTid(self);
385    RegisterAsLocked(self);
386  }
387  recursion_count_++;
388  if (kDebugLocking) {
389    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
390        << name_ << " " << recursion_count_;
391    AssertHeld(self);
392  }
393}
394
395bool Mutex::ExclusiveTryLock(Thread* self) {
396  DCHECK(self == NULL || self == Thread::Current());
397  if (kDebugLocking && !recursive_) {
398    AssertNotHeld(self);
399  }
400  if (!recursive_ || !IsExclusiveHeld(self)) {
401#if ART_USE_FUTEXES
402    bool done = false;
403    do {
404      int32_t cur_state = state_.LoadRelaxed();
405      if (cur_state == 0) {
406        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
407        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
408      } else {
409        return false;
410      }
411    } while (!done);
412    DCHECK_EQ(state_.LoadRelaxed(), 1);
413#else
414    int result = pthread_mutex_trylock(&mutex_);
415    if (result == EBUSY) {
416      return false;
417    }
418    if (result != 0) {
419      errno = result;
420      PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
421    }
422#endif
423    DCHECK_EQ(exclusive_owner_, 0U);
424    exclusive_owner_ = SafeGetTid(self);
425    RegisterAsLocked(self);
426  }
427  recursion_count_++;
428  if (kDebugLocking) {
429    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
430        << name_ << " " << recursion_count_;
431    AssertHeld(self);
432  }
433  return true;
434}
435
436void Mutex::ExclusiveUnlock(Thread* self) {
437  if (kIsDebugBuild && self != nullptr && self != Thread::Current()) {
438    std::string name1 = "<null>";
439    std::string name2 = "<null>";
440    if (self != nullptr) {
441      self->GetThreadName(name1);
442    }
443    if (Thread::Current() != nullptr) {
444      Thread::Current()->GetThreadName(name2);
445    }
446    LOG(FATAL) << name1 << " " << name2;
447  }
448  AssertHeld(self);
449  DCHECK_NE(exclusive_owner_, 0U);
450  recursion_count_--;
451  if (!recursive_ || recursion_count_ == 0) {
452    if (kDebugLocking) {
453      CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
454          << name_ << " " << recursion_count_;
455    }
456    RegisterAsUnlocked(self);
457#if ART_USE_FUTEXES
458    bool done = false;
459    do {
460      int32_t cur_state = state_.LoadRelaxed();
461      if (LIKELY(cur_state == 1)) {
462        // We're no longer the owner.
463        exclusive_owner_ = 0;
464        // Change state to 0 and impose load/store ordering appropriate for lock release.
465        // Note, the relaxed loads below musn't reorder before the CompareExchange.
466        // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
467        // a status bit into the state on contention.
468        done =  state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */);
469        if (LIKELY(done)) {  // Spurious fail?
470          // Wake a contender.
471          if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
472            futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
473          }
474        }
475      } else {
476        // Logging acquires the logging lock, avoid infinite recursion in that case.
477        if (this != Locks::logging_lock_) {
478          LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
479        } else {
480          LogMessage::LogLine(__FILE__, __LINE__, INTERNAL_FATAL,
481                              StringPrintf("Unexpected state_ %d in unlock for %s",
482                                           cur_state, name_).c_str());
483          _exit(1);
484        }
485      }
486    } while (!done);
487#else
488    exclusive_owner_ = 0;
489    CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
490#endif
491  }
492}
493
494void Mutex::Dump(std::ostream& os) const {
495  os << (recursive_ ? "recursive " : "non-recursive ")
496      << name_
497      << " level=" << static_cast<int>(level_)
498      << " rec=" << recursion_count_
499      << " owner=" << GetExclusiveOwnerTid() << " ";
500  DumpContention(os);
501}
502
503std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
504  mu.Dump(os);
505  return os;
506}
507
508ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
509    : BaseMutex(name, level)
510#if ART_USE_FUTEXES
511    , state_(0), num_pending_readers_(0), num_pending_writers_(0)
512#endif
513{  // NOLINT(whitespace/braces)
514#if !ART_USE_FUTEXES
515  CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
516#endif
517  exclusive_owner_ = 0;
518}
519
520ReaderWriterMutex::~ReaderWriterMutex() {
521#if ART_USE_FUTEXES
522  CHECK_EQ(state_.LoadRelaxed(), 0);
523  CHECK_EQ(exclusive_owner_, 0U);
524  CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0);
525  CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
526#else
527  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
528  // may still be using locks.
529  int rc = pthread_rwlock_destroy(&rwlock_);
530  if (rc != 0) {
531    errno = rc;
532    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
533    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
534    Runtime* runtime = Runtime::Current();
535    bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
536    PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
537  }
538#endif
539}
540
541void ReaderWriterMutex::ExclusiveLock(Thread* self) {
542  DCHECK(self == NULL || self == Thread::Current());
543  AssertNotExclusiveHeld(self);
544#if ART_USE_FUTEXES
545  bool done = false;
546  do {
547    int32_t cur_state = state_.LoadRelaxed();
548    if (LIKELY(cur_state == 0)) {
549      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
550      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */);
551    } else {
552      // Failed to acquire, hang up.
553      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
554      ++num_pending_writers_;
555      if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
556        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
557        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
558        if ((errno != EAGAIN) && (errno != EINTR)) {
559          PLOG(FATAL) << "futex wait failed for " << name_;
560        }
561      }
562      --num_pending_writers_;
563    }
564  } while (!done);
565  DCHECK_EQ(state_.LoadRelaxed(), -1);
566#else
567  CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
568#endif
569  DCHECK_EQ(exclusive_owner_, 0U);
570  exclusive_owner_ = SafeGetTid(self);
571  RegisterAsLocked(self);
572  AssertExclusiveHeld(self);
573}
574
575void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
576  DCHECK(self == NULL || self == Thread::Current());
577  AssertExclusiveHeld(self);
578  RegisterAsUnlocked(self);
579  DCHECK_NE(exclusive_owner_, 0U);
580#if ART_USE_FUTEXES
581  bool done = false;
582  do {
583    int32_t cur_state = state_.LoadRelaxed();
584    if (LIKELY(cur_state == -1)) {
585      // We're no longer the owner.
586      exclusive_owner_ = 0;
587      // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
588      // Note, the relaxed loads below musn't reorder before the CompareExchange.
589      // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
590      // a status bit into the state on contention.
591      done =  state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
592      if (LIKELY(done)) {  // Weak CAS may fail spuriously.
593        // Wake any waiters.
594        if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
595                     num_pending_writers_.LoadRelaxed() > 0)) {
596          futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
597        }
598      }
599    } else {
600      LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
601    }
602  } while (!done);
603#else
604  exclusive_owner_ = 0;
605  CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
606#endif
607}
608
609#if HAVE_TIMED_RWLOCK
610bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
611  DCHECK(self == NULL || self == Thread::Current());
612#if ART_USE_FUTEXES
613  bool done = false;
614  timespec end_abs_ts;
615  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts);
616  do {
617    int32_t cur_state = state_.LoadRelaxed();
618    if (cur_state == 0) {
619      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
620      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */);
621    } else {
622      // Failed to acquire, hang up.
623      timespec now_abs_ts;
624      InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts);
625      timespec rel_ts;
626      if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
627        return false;  // Timed out.
628      }
629      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
630      ++num_pending_writers_;
631      if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
632        if (errno == ETIMEDOUT) {
633          --num_pending_writers_;
634          return false;  // Timed out.
635        } else if ((errno != EAGAIN) && (errno != EINTR)) {
636          // EAGAIN and EINTR both indicate a spurious failure,
637          // recompute the relative time out from now and try again.
638          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
639          PLOG(FATAL) << "timed futex wait failed for " << name_;
640        }
641      }
642      --num_pending_writers_;
643    }
644  } while (!done);
645#else
646  timespec ts;
647  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
648  int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
649  if (result == ETIMEDOUT) {
650    return false;
651  }
652  if (result != 0) {
653    errno = result;
654    PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
655  }
656#endif
657  exclusive_owner_ = SafeGetTid(self);
658  RegisterAsLocked(self);
659  AssertSharedHeld(self);
660  return true;
661}
662#endif
663
664#if ART_USE_FUTEXES
665void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
666  // Owner holds it exclusively, hang up.
667  ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
668  ++num_pending_readers_;
669  if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
670    if (errno != EAGAIN) {
671      PLOG(FATAL) << "futex wait failed for " << name_;
672    }
673  }
674  --num_pending_readers_;
675}
676#endif
677
678bool ReaderWriterMutex::SharedTryLock(Thread* self) {
679  DCHECK(self == NULL || self == Thread::Current());
680#if ART_USE_FUTEXES
681  bool done = false;
682  do {
683    int32_t cur_state = state_.LoadRelaxed();
684    if (cur_state >= 0) {
685      // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
686      done =  state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
687    } else {
688      // Owner holds it exclusively.
689      return false;
690    }
691  } while (!done);
692#else
693  int result = pthread_rwlock_tryrdlock(&rwlock_);
694  if (result == EBUSY) {
695    return false;
696  }
697  if (result != 0) {
698    errno = result;
699    PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
700  }
701#endif
702  RegisterAsLocked(self);
703  AssertSharedHeld(self);
704  return true;
705}
706
707bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
708  DCHECK(self == NULL || self == Thread::Current());
709  bool result;
710  if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
711    result = IsExclusiveHeld(self);  // TODO: a better best effort here.
712  } else {
713    result = (self->GetHeldMutex(level_) == this);
714  }
715  return result;
716}
717
718void ReaderWriterMutex::Dump(std::ostream& os) const {
719  os << name_
720      << " level=" << static_cast<int>(level_)
721      << " owner=" << GetExclusiveOwnerTid()
722#if ART_USE_FUTEXES
723      << " state=" << state_.LoadSequentiallyConsistent()
724      << " num_pending_writers=" << num_pending_writers_.LoadSequentiallyConsistent()
725      << " num_pending_readers=" << num_pending_readers_.LoadSequentiallyConsistent()
726#endif
727      << " ";
728  DumpContention(os);
729}
730
731std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
732  mu.Dump(os);
733  return os;
734}
735
736ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
737    : name_(name), guard_(guard) {
738#if ART_USE_FUTEXES
739  DCHECK_EQ(0, sequence_.LoadRelaxed());
740  num_waiters_ = 0;
741#else
742  pthread_condattr_t cond_attrs;
743  CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
744#if !defined(__APPLE__)
745  // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
746  CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC));
747#endif
748  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
749#endif
750}
751
752ConditionVariable::~ConditionVariable() {
753#if ART_USE_FUTEXES
754  if (num_waiters_!= 0) {
755    Runtime* runtime = Runtime::Current();
756    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
757    LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
758        << " called with " << num_waiters_ << " waiters.";
759  }
760#else
761  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
762  // may still be using condition variables.
763  int rc = pthread_cond_destroy(&cond_);
764  if (rc != 0) {
765    errno = rc;
766    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
767    Runtime* runtime = Runtime::Current();
768    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
769    PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
770  }
771#endif
772}
773
774void ConditionVariable::Broadcast(Thread* self) {
775  DCHECK(self == NULL || self == Thread::Current());
776  // TODO: enable below, there's a race in thread creation that causes false failures currently.
777  // guard_.AssertExclusiveHeld(self);
778  DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
779#if ART_USE_FUTEXES
780  if (num_waiters_ > 0) {
781    sequence_++;  // Indicate the broadcast occurred.
782    bool done = false;
783    do {
784      int32_t cur_sequence = sequence_.LoadRelaxed();
785      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
786      // mutex unlocks will awaken the requeued waiter thread.
787      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
788                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
789                   guard_.state_.Address(), cur_sequence) != -1;
790      if (!done) {
791        if (errno != EAGAIN) {
792          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
793        }
794      }
795    } while (!done);
796  }
797#else
798  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
799#endif
800}
801
802void ConditionVariable::Signal(Thread* self) {
803  DCHECK(self == NULL || self == Thread::Current());
804  guard_.AssertExclusiveHeld(self);
805#if ART_USE_FUTEXES
806  if (num_waiters_ > 0) {
807    sequence_++;  // Indicate a signal occurred.
808    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
809    // to avoid this, however, requeueing can only move all waiters.
810    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
811    // Check something was woken or else we changed sequence_ before they had chance to wait.
812    CHECK((num_woken == 0) || (num_woken == 1));
813  }
814#else
815  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
816#endif
817}
818
819void ConditionVariable::Wait(Thread* self) {
820  guard_.CheckSafeToWait(self);
821  WaitHoldingLocks(self);
822}
823
824void ConditionVariable::WaitHoldingLocks(Thread* self) {
825  DCHECK(self == NULL || self == Thread::Current());
826  guard_.AssertExclusiveHeld(self);
827  unsigned int old_recursion_count = guard_.recursion_count_;
828#if ART_USE_FUTEXES
829  num_waiters_++;
830  // Ensure the Mutex is contended so that requeued threads are awoken.
831  guard_.num_contenders_++;
832  guard_.recursion_count_ = 1;
833  int32_t cur_sequence = sequence_.LoadRelaxed();
834  guard_.ExclusiveUnlock(self);
835  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
836    // Futex failed, check it is an expected error.
837    // EAGAIN == EWOULDBLK, so we let the caller try again.
838    // EINTR implies a signal was sent to this thread.
839    if ((errno != EINTR) && (errno != EAGAIN)) {
840      PLOG(FATAL) << "futex wait failed for " << name_;
841    }
842  }
843  guard_.ExclusiveLock(self);
844  CHECK_GE(num_waiters_, 0);
845  num_waiters_--;
846  // We awoke and so no longer require awakes from the guard_'s unlock.
847  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
848  guard_.num_contenders_--;
849#else
850  uint64_t old_owner = guard_.exclusive_owner_;
851  guard_.exclusive_owner_ = 0;
852  guard_.recursion_count_ = 0;
853  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
854  guard_.exclusive_owner_ = old_owner;
855#endif
856  guard_.recursion_count_ = old_recursion_count;
857}
858
859bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
860  DCHECK(self == NULL || self == Thread::Current());
861  bool timed_out = false;
862  guard_.AssertExclusiveHeld(self);
863  guard_.CheckSafeToWait(self);
864  unsigned int old_recursion_count = guard_.recursion_count_;
865#if ART_USE_FUTEXES
866  timespec rel_ts;
867  InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
868  num_waiters_++;
869  // Ensure the Mutex is contended so that requeued threads are awoken.
870  guard_.num_contenders_++;
871  guard_.recursion_count_ = 1;
872  int32_t cur_sequence = sequence_.LoadRelaxed();
873  guard_.ExclusiveUnlock(self);
874  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
875    if (errno == ETIMEDOUT) {
876      // Timed out we're done.
877      timed_out = true;
878    } else if ((errno == EAGAIN) || (errno == EINTR)) {
879      // A signal or ConditionVariable::Signal/Broadcast has come in.
880    } else {
881      PLOG(FATAL) << "timed futex wait failed for " << name_;
882    }
883  }
884  guard_.ExclusiveLock(self);
885  CHECK_GE(num_waiters_, 0);
886  num_waiters_--;
887  // We awoke and so no longer require awakes from the guard_'s unlock.
888  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
889  guard_.num_contenders_--;
890#else
891#if !defined(__APPLE__)
892  int clock = CLOCK_MONOTONIC;
893#else
894  int clock = CLOCK_REALTIME;
895#endif
896  uint64_t old_owner = guard_.exclusive_owner_;
897  guard_.exclusive_owner_ = 0;
898  guard_.recursion_count_ = 0;
899  timespec ts;
900  InitTimeSpec(true, clock, ms, ns, &ts);
901  int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
902  if (rc == ETIMEDOUT) {
903    timed_out = true;
904  } else if (rc != 0) {
905    errno = rc;
906    PLOG(FATAL) << "TimedWait failed for " << name_;
907  }
908  guard_.exclusive_owner_ = old_owner;
909#endif
910  guard_.recursion_count_ = old_recursion_count;
911  return timed_out;
912}
913
914void Locks::Init() {
915  if (logging_lock_ != nullptr) {
916    // Already initialized.
917    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
918      DCHECK(modify_ldt_lock_ != nullptr);
919    } else {
920      DCHECK(modify_ldt_lock_ == nullptr);
921    }
922    DCHECK(abort_lock_ != nullptr);
923    DCHECK(alloc_tracker_lock_ != nullptr);
924    DCHECK(allocated_monitor_ids_lock_ != nullptr);
925    DCHECK(allocated_thread_ids_lock_ != nullptr);
926    DCHECK(breakpoint_lock_ != nullptr);
927    DCHECK(classlinker_classes_lock_ != nullptr);
928    DCHECK(deoptimization_lock_ != nullptr);
929    DCHECK(heap_bitmap_lock_ != nullptr);
930    DCHECK(intern_table_lock_ != nullptr);
931    DCHECK(jni_libraries_lock_ != nullptr);
932    DCHECK(logging_lock_ != nullptr);
933    DCHECK(mutator_lock_ != nullptr);
934    DCHECK(profiler_lock_ != nullptr);
935    DCHECK(thread_list_lock_ != nullptr);
936    DCHECK(thread_suspend_count_lock_ != nullptr);
937    DCHECK(trace_lock_ != nullptr);
938    DCHECK(unexpected_signal_lock_ != nullptr);
939  } else {
940    // Create global locks in level order from highest lock level to lowest.
941    LockLevel current_lock_level = kInstrumentEntrypointsLock;
942    DCHECK(instrument_entrypoints_lock_ == nullptr);
943    instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
944
945    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
946      if (new_level >= current_lock_level) { \
947        /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
948        fprintf(stderr, "New local level %d is not less than current level %d\n", \
949                new_level, current_lock_level); \
950        exit(1); \
951      } \
952      current_lock_level = new_level;
953
954    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
955    DCHECK(mutator_lock_ == nullptr);
956    mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
957
958    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
959    DCHECK(heap_bitmap_lock_ == nullptr);
960    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
961
962    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
963    DCHECK(trace_lock_ == nullptr);
964    trace_lock_ = new Mutex("trace lock", current_lock_level);
965
966    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
967    DCHECK(runtime_shutdown_lock_ == nullptr);
968    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
969
970    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
971    DCHECK(profiler_lock_ == nullptr);
972    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
973
974    UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
975    DCHECK(deoptimization_lock_ == nullptr);
976    deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
977
978    UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
979    DCHECK(alloc_tracker_lock_ == nullptr);
980    alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
981
982    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
983    DCHECK(thread_list_lock_ == nullptr);
984    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
985
986    UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
987    DCHECK(jni_libraries_lock_ == nullptr);
988    jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
989
990    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
991    DCHECK(breakpoint_lock_ == nullptr);
992    breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
993
994    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
995    DCHECK(classlinker_classes_lock_ == nullptr);
996    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
997                                                      current_lock_level);
998
999    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
1000    DCHECK(allocated_monitor_ids_lock_ == nullptr);
1001    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
1002
1003    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
1004    DCHECK(allocated_thread_ids_lock_ == nullptr);
1005    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
1006
1007    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
1008      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
1009      DCHECK(modify_ldt_lock_ == nullptr);
1010      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
1011    }
1012
1013    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
1014    DCHECK(intern_table_lock_ == nullptr);
1015    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
1016
1017    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
1018    DCHECK(reference_processor_lock_ == nullptr);
1019    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
1020
1021    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
1022    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
1023    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
1024
1025    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
1026    DCHECK(reference_queue_weak_references_lock_ == nullptr);
1027    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
1028
1029    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
1030    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
1031    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
1032
1033    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
1034    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
1035    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
1036
1037    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
1038    DCHECK(reference_queue_soft_references_lock_ == nullptr);
1039    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
1040
1041    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
1042    DCHECK(abort_lock_ == nullptr);
1043    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
1044
1045    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
1046    DCHECK(thread_suspend_count_lock_ == nullptr);
1047    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
1048
1049    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
1050    DCHECK(unexpected_signal_lock_ == nullptr);
1051    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
1052
1053    UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
1054    DCHECK(mem_maps_lock_ == nullptr);
1055    mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
1056
1057    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
1058    DCHECK(logging_lock_ == nullptr);
1059    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
1060
1061    #undef UPDATE_CURRENT_LOCK_LEVEL
1062  }
1063}
1064
1065
1066}  // namespace art
1067