mutex.cc revision 5869a2c27ee0dbd7b420614c76ff14a706f0c5fb
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mutex.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#include "atomic.h"
23#include "base/logging.h"
24#include "mutex-inl.h"
25#include "runtime.h"
26#include "scoped_thread_state_change.h"
27#include "thread-inl.h"
28#include "utils.h"
29
30namespace art {
31
32Mutex* Locks::abort_lock_ = nullptr;
33Mutex* Locks::alloc_tracker_lock_ = nullptr;
34Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
35Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
36ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
37ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
38Mutex* Locks::deoptimization_lock_ = nullptr;
39ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
40Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
41Mutex* Locks::intern_table_lock_ = nullptr;
42Mutex* Locks::jni_libraries_lock_ = nullptr;
43Mutex* Locks::logging_lock_ = nullptr;
44Mutex* Locks::mem_maps_lock_ = nullptr;
45Mutex* Locks::modify_ldt_lock_ = nullptr;
46ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
47Mutex* Locks::profiler_lock_ = nullptr;
48Mutex* Locks::reference_processor_lock_ = nullptr;
49Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
50Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
51Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
52Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
53Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
54Mutex* Locks::runtime_shutdown_lock_ = nullptr;
55Mutex* Locks::thread_list_lock_ = nullptr;
56Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
57Mutex* Locks::thread_suspend_count_lock_ = nullptr;
58Mutex* Locks::trace_lock_ = nullptr;
59Mutex* Locks::unexpected_signal_lock_ = nullptr;
60
61struct AllMutexData {
62  // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
63  Atomic<const BaseMutex*> all_mutexes_guard;
64  // All created mutexes guarded by all_mutexes_guard_.
65  std::set<BaseMutex*>* all_mutexes;
66  AllMutexData() : all_mutexes(NULL) {}
67};
68static struct AllMutexData gAllMutexData[kAllMutexDataSize];
69
70#if ART_USE_FUTEXES
71static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
72  const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
73  result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
74  result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
75  if (result_ts->tv_nsec < 0) {
76    result_ts->tv_sec--;
77    result_ts->tv_nsec += one_sec;
78  } else if (result_ts->tv_nsec > one_sec) {
79    result_ts->tv_sec++;
80    result_ts->tv_nsec -= one_sec;
81  }
82  return result_ts->tv_sec < 0;
83}
84#endif
85
86class ScopedAllMutexesLock {
87 public:
88  explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
89    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
90      NanoSleep(100);
91    }
92  }
93  ~ScopedAllMutexesLock() {
94    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
95      NanoSleep(100);
96    }
97  }
98 private:
99  const BaseMutex* const mutex_;
100};
101
102BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
103  if (kLogLockContentions) {
104    ScopedAllMutexesLock mu(this);
105    std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
106    if (*all_mutexes_ptr == NULL) {
107      // We leak the global set of all mutexes to avoid ordering issues in global variable
108      // construction/destruction.
109      *all_mutexes_ptr = new std::set<BaseMutex*>();
110    }
111    (*all_mutexes_ptr)->insert(this);
112  }
113}
114
115BaseMutex::~BaseMutex() {
116  if (kLogLockContentions) {
117    ScopedAllMutexesLock mu(this);
118    gAllMutexData->all_mutexes->erase(this);
119  }
120}
121
122void BaseMutex::DumpAll(std::ostream& os) {
123  if (kLogLockContentions) {
124    os << "Mutex logging:\n";
125    ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
126    std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
127    if (all_mutexes == NULL) {
128      // No mutexes have been created yet during at startup.
129      return;
130    }
131    typedef std::set<BaseMutex*>::const_iterator It;
132    os << "(Contended)\n";
133    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
134      BaseMutex* mutex = *it;
135      if (mutex->HasEverContended()) {
136        mutex->Dump(os);
137        os << "\n";
138      }
139    }
140    os << "(Never contented)\n";
141    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
142      BaseMutex* mutex = *it;
143      if (!mutex->HasEverContended()) {
144        mutex->Dump(os);
145        os << "\n";
146      }
147    }
148  }
149}
150
151void BaseMutex::CheckSafeToWait(Thread* self) {
152  if (self == NULL) {
153    CheckUnattachedThread(level_);
154    return;
155  }
156  if (kDebugLocking) {
157    CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
158        << "Waiting on unacquired mutex: " << name_;
159    bool bad_mutexes_held = false;
160    for (int i = kLockLevelCount - 1; i >= 0; --i) {
161      if (i != level_) {
162        BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
163        // We expect waits to happen while holding the thread list suspend thread lock.
164        if (held_mutex != NULL && i != kThreadListSuspendThreadLock) {
165          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
166                     << "(level " << LockLevel(i) << ") while performing wait on "
167                     << "\"" << name_ << "\" (level " << level_ << ")";
168          bad_mutexes_held = true;
169        }
170      }
171    }
172    CHECK(!bad_mutexes_held);
173  }
174}
175
176void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
177  if (kLogLockContentions) {
178    // Atomically add value to wait_time.
179    wait_time.FetchAndAddSequentiallyConsistent(value);
180  }
181}
182
183void BaseMutex::RecordContention(uint64_t blocked_tid,
184                                 uint64_t owner_tid,
185                                 uint64_t nano_time_blocked) {
186  if (kLogLockContentions) {
187    ContentionLogData* data = contention_log_data_;
188    ++(data->contention_count);
189    data->AddToWaitTime(nano_time_blocked);
190    ContentionLogEntry* log = data->contention_log;
191    // This code is intentionally racy as it is only used for diagnostics.
192    uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
193    if (log[slot].blocked_tid == blocked_tid &&
194        log[slot].owner_tid == blocked_tid) {
195      ++log[slot].count;
196    } else {
197      uint32_t new_slot;
198      do {
199        slot = data->cur_content_log_entry.LoadRelaxed();
200        new_slot = (slot + 1) % kContentionLogSize;
201      } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
202      log[new_slot].blocked_tid = blocked_tid;
203      log[new_slot].owner_tid = owner_tid;
204      log[new_slot].count.StoreRelaxed(1);
205    }
206  }
207}
208
209void BaseMutex::DumpContention(std::ostream& os) const {
210  if (kLogLockContentions) {
211    const ContentionLogData* data = contention_log_data_;
212    const ContentionLogEntry* log = data->contention_log;
213    uint64_t wait_time = data->wait_time.LoadRelaxed();
214    uint32_t contention_count = data->contention_count.LoadRelaxed();
215    if (contention_count == 0) {
216      os << "never contended";
217    } else {
218      os << "contended " << contention_count
219         << " total wait of contender " << PrettyDuration(wait_time)
220         << " average " << PrettyDuration(wait_time / contention_count);
221      SafeMap<uint64_t, size_t> most_common_blocker;
222      SafeMap<uint64_t, size_t> most_common_blocked;
223      for (size_t i = 0; i < kContentionLogSize; ++i) {
224        uint64_t blocked_tid = log[i].blocked_tid;
225        uint64_t owner_tid = log[i].owner_tid;
226        uint32_t count = log[i].count.LoadRelaxed();
227        if (count > 0) {
228          auto it = most_common_blocked.find(blocked_tid);
229          if (it != most_common_blocked.end()) {
230            most_common_blocked.Overwrite(blocked_tid, it->second + count);
231          } else {
232            most_common_blocked.Put(blocked_tid, count);
233          }
234          it = most_common_blocker.find(owner_tid);
235          if (it != most_common_blocker.end()) {
236            most_common_blocker.Overwrite(owner_tid, it->second + count);
237          } else {
238            most_common_blocker.Put(owner_tid, count);
239          }
240        }
241      }
242      uint64_t max_tid = 0;
243      size_t max_tid_count = 0;
244      for (const auto& pair : most_common_blocked) {
245        if (pair.second > max_tid_count) {
246          max_tid = pair.first;
247          max_tid_count = pair.second;
248        }
249      }
250      if (max_tid != 0) {
251        os << " sample shows most blocked tid=" << max_tid;
252      }
253      max_tid = 0;
254      max_tid_count = 0;
255      for (const auto& pair : most_common_blocker) {
256        if (pair.second > max_tid_count) {
257          max_tid = pair.first;
258          max_tid_count = pair.second;
259        }
260      }
261      if (max_tid != 0) {
262        os << " sample shows tid=" << max_tid << " owning during this time";
263      }
264    }
265  }
266}
267
268
269Mutex::Mutex(const char* name, LockLevel level, bool recursive)
270    : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
271#if ART_USE_FUTEXES
272  DCHECK_EQ(0, state_.LoadRelaxed());
273  DCHECK_EQ(0, num_contenders_.LoadRelaxed());
274#else
275  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
276#endif
277  exclusive_owner_ = 0;
278}
279
280Mutex::~Mutex() {
281#if ART_USE_FUTEXES
282  if (state_.LoadRelaxed() != 0) {
283    Runtime* runtime = Runtime::Current();
284    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
285    LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
286  } else {
287    CHECK_EQ(exclusive_owner_, 0U)  << "unexpectedly found an owner on unlocked mutex " << name_;
288    CHECK_EQ(num_contenders_.LoadSequentiallyConsistent(), 0)
289        << "unexpectedly found a contender on mutex " << name_;
290  }
291#else
292  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
293  // may still be using locks.
294  int rc = pthread_mutex_destroy(&mutex_);
295  if (rc != 0) {
296    errno = rc;
297    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
298    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
299    Runtime* runtime = Runtime::Current();
300    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
301    PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
302  }
303#endif
304}
305
306void Mutex::ExclusiveLock(Thread* self) {
307  DCHECK(self == NULL || self == Thread::Current());
308  if (kDebugLocking && !recursive_) {
309    AssertNotHeld(self);
310  }
311  if (!recursive_ || !IsExclusiveHeld(self)) {
312#if ART_USE_FUTEXES
313    bool done = false;
314    do {
315      int32_t cur_state = state_.LoadRelaxed();
316      if (LIKELY(cur_state == 0)) {
317        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
318        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
319      } else {
320        // Failed to acquire, hang up.
321        ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
322        num_contenders_++;
323        if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
324          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
325          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
326          if ((errno != EAGAIN) && (errno != EINTR)) {
327            PLOG(FATAL) << "futex wait failed for " << name_;
328          }
329        }
330        num_contenders_--;
331      }
332    } while (!done);
333    DCHECK_EQ(state_.LoadRelaxed(), 1);
334#else
335    CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
336#endif
337    DCHECK_EQ(exclusive_owner_, 0U);
338    exclusive_owner_ = SafeGetTid(self);
339    RegisterAsLocked(self);
340  }
341  recursion_count_++;
342  if (kDebugLocking) {
343    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
344        << name_ << " " << recursion_count_;
345    AssertHeld(self);
346  }
347}
348
349bool Mutex::ExclusiveTryLock(Thread* self) {
350  DCHECK(self == NULL || self == Thread::Current());
351  if (kDebugLocking && !recursive_) {
352    AssertNotHeld(self);
353  }
354  if (!recursive_ || !IsExclusiveHeld(self)) {
355#if ART_USE_FUTEXES
356    bool done = false;
357    do {
358      int32_t cur_state = state_.LoadRelaxed();
359      if (cur_state == 0) {
360        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
361        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
362      } else {
363        return false;
364      }
365    } while (!done);
366    DCHECK_EQ(state_.LoadRelaxed(), 1);
367#else
368    int result = pthread_mutex_trylock(&mutex_);
369    if (result == EBUSY) {
370      return false;
371    }
372    if (result != 0) {
373      errno = result;
374      PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
375    }
376#endif
377    DCHECK_EQ(exclusive_owner_, 0U);
378    exclusive_owner_ = SafeGetTid(self);
379    RegisterAsLocked(self);
380  }
381  recursion_count_++;
382  if (kDebugLocking) {
383    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
384        << name_ << " " << recursion_count_;
385    AssertHeld(self);
386  }
387  return true;
388}
389
390void Mutex::ExclusiveUnlock(Thread* self) {
391  DCHECK(self == NULL || self == Thread::Current());
392  AssertHeld(self);
393  DCHECK_NE(exclusive_owner_, 0U);
394  recursion_count_--;
395  if (!recursive_ || recursion_count_ == 0) {
396    if (kDebugLocking) {
397      CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
398          << name_ << " " << recursion_count_;
399    }
400    RegisterAsUnlocked(self);
401#if ART_USE_FUTEXES
402    bool done = false;
403    do {
404      int32_t cur_state = state_.LoadRelaxed();
405      if (LIKELY(cur_state == 1)) {
406        // We're no longer the owner.
407        exclusive_owner_ = 0;
408        // Change state to 0 and impose load/store ordering appropriate for lock release.
409        // Note, the relaxed loads below musn't reorder before the CompareExchange.
410        // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
411        // a status bit into the state on contention.
412        done =  state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */);
413        if (LIKELY(done)) {  // Spurious fail?
414          // Wake a contender.
415          if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
416            futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
417          }
418        }
419      } else {
420        // Logging acquires the logging lock, avoid infinite recursion in that case.
421        if (this != Locks::logging_lock_) {
422          LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
423        } else {
424          LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
425          LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
426                                                 cur_state, name_).c_str());
427          _exit(1);
428        }
429      }
430    } while (!done);
431#else
432    exclusive_owner_ = 0;
433    CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
434#endif
435  }
436}
437
438void Mutex::Dump(std::ostream& os) const {
439  os << (recursive_ ? "recursive " : "non-recursive ")
440      << name_
441      << " level=" << static_cast<int>(level_)
442      << " rec=" << recursion_count_
443      << " owner=" << GetExclusiveOwnerTid() << " ";
444  DumpContention(os);
445}
446
447std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
448  mu.Dump(os);
449  return os;
450}
451
452ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
453    : BaseMutex(name, level)
454#if ART_USE_FUTEXES
455    , state_(0), num_pending_readers_(0), num_pending_writers_(0)
456#endif
457{  // NOLINT(whitespace/braces)
458#if !ART_USE_FUTEXES
459  CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
460#endif
461  exclusive_owner_ = 0;
462}
463
464ReaderWriterMutex::~ReaderWriterMutex() {
465#if ART_USE_FUTEXES
466  CHECK_EQ(state_.LoadRelaxed(), 0);
467  CHECK_EQ(exclusive_owner_, 0U);
468  CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0);
469  CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
470#else
471  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
472  // may still be using locks.
473  int rc = pthread_rwlock_destroy(&rwlock_);
474  if (rc != 0) {
475    errno = rc;
476    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
477    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
478    Runtime* runtime = Runtime::Current();
479    bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
480    PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
481  }
482#endif
483}
484
485void ReaderWriterMutex::ExclusiveLock(Thread* self) {
486  DCHECK(self == NULL || self == Thread::Current());
487  AssertNotExclusiveHeld(self);
488#if ART_USE_FUTEXES
489  bool done = false;
490  do {
491    int32_t cur_state = state_.LoadRelaxed();
492    if (LIKELY(cur_state == 0)) {
493      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
494      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */);
495    } else {
496      // Failed to acquire, hang up.
497      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
498      ++num_pending_writers_;
499      if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
500        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
501        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
502        if ((errno != EAGAIN) && (errno != EINTR)) {
503          PLOG(FATAL) << "futex wait failed for " << name_;
504        }
505      }
506      --num_pending_writers_;
507    }
508  } while (!done);
509  DCHECK_EQ(state_.LoadRelaxed(), -1);
510#else
511  CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
512#endif
513  DCHECK_EQ(exclusive_owner_, 0U);
514  exclusive_owner_ = SafeGetTid(self);
515  RegisterAsLocked(self);
516  AssertExclusiveHeld(self);
517}
518
519void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
520  DCHECK(self == NULL || self == Thread::Current());
521  AssertExclusiveHeld(self);
522  RegisterAsUnlocked(self);
523  DCHECK_NE(exclusive_owner_, 0U);
524#if ART_USE_FUTEXES
525  bool done = false;
526  do {
527    int32_t cur_state = state_.LoadRelaxed();
528    if (LIKELY(cur_state == -1)) {
529      // We're no longer the owner.
530      exclusive_owner_ = 0;
531      // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
532      // Note, the relaxed loads below musn't reorder before the CompareExchange.
533      // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
534      // a status bit into the state on contention.
535      done =  state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
536      if (LIKELY(done)) {  // Weak CAS may fail spuriously.
537        // Wake any waiters.
538        if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
539                     num_pending_writers_.LoadRelaxed() > 0)) {
540          futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
541        }
542      }
543    } else {
544      LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
545    }
546  } while (!done);
547#else
548  exclusive_owner_ = 0;
549  CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
550#endif
551}
552
553#if HAVE_TIMED_RWLOCK
554bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
555  DCHECK(self == NULL || self == Thread::Current());
556#if ART_USE_FUTEXES
557  bool done = false;
558  timespec end_abs_ts;
559  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts);
560  do {
561    int32_t cur_state = state_.LoadRelaxed();
562    if (cur_state == 0) {
563      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
564      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */);
565    } else {
566      // Failed to acquire, hang up.
567      timespec now_abs_ts;
568      InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts);
569      timespec rel_ts;
570      if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
571        return false;  // Timed out.
572      }
573      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
574      ++num_pending_writers_;
575      if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
576        if (errno == ETIMEDOUT) {
577          --num_pending_writers_;
578          return false;  // Timed out.
579        } else if ((errno != EAGAIN) && (errno != EINTR)) {
580          // EAGAIN and EINTR both indicate a spurious failure,
581          // recompute the relative time out from now and try again.
582          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
583          PLOG(FATAL) << "timed futex wait failed for " << name_;
584        }
585      }
586      --num_pending_writers_;
587    }
588  } while (!done);
589#else
590  timespec ts;
591  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
592  int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
593  if (result == ETIMEDOUT) {
594    return false;
595  }
596  if (result != 0) {
597    errno = result;
598    PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
599  }
600#endif
601  exclusive_owner_ = SafeGetTid(self);
602  RegisterAsLocked(self);
603  AssertSharedHeld(self);
604  return true;
605}
606#endif
607
608bool ReaderWriterMutex::SharedTryLock(Thread* self) {
609  DCHECK(self == NULL || self == Thread::Current());
610#if ART_USE_FUTEXES
611  bool done = false;
612  do {
613    int32_t cur_state = state_.LoadRelaxed();
614    if (cur_state >= 0) {
615      // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
616      done =  state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
617    } else {
618      // Owner holds it exclusively.
619      return false;
620    }
621  } while (!done);
622#else
623  int result = pthread_rwlock_tryrdlock(&rwlock_);
624  if (result == EBUSY) {
625    return false;
626  }
627  if (result != 0) {
628    errno = result;
629    PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
630  }
631#endif
632  RegisterAsLocked(self);
633  AssertSharedHeld(self);
634  return true;
635}
636
637bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
638  DCHECK(self == NULL || self == Thread::Current());
639  bool result;
640  if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
641    result = IsExclusiveHeld(self);  // TODO: a better best effort here.
642  } else {
643    result = (self->GetHeldMutex(level_) == this);
644  }
645  return result;
646}
647
648void ReaderWriterMutex::Dump(std::ostream& os) const {
649  os << name_
650      << " level=" << static_cast<int>(level_)
651      << " owner=" << GetExclusiveOwnerTid()
652#if ART_USE_FUTEXES
653      << " state=" << state_.LoadSequentiallyConsistent()
654      << " num_pending_writers=" << num_pending_writers_.LoadSequentiallyConsistent()
655      << " num_pending_readers=" << num_pending_readers_.LoadSequentiallyConsistent()
656#endif
657      << " ";
658  DumpContention(os);
659}
660
661std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
662  mu.Dump(os);
663  return os;
664}
665
666ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
667    : name_(name), guard_(guard) {
668#if ART_USE_FUTEXES
669  DCHECK_EQ(0, sequence_.LoadRelaxed());
670  num_waiters_ = 0;
671#else
672  pthread_condattr_t cond_attrs;
673  CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
674#if !defined(__APPLE__)
675  // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
676  CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
677#endif
678  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
679#endif
680}
681
682ConditionVariable::~ConditionVariable() {
683#if ART_USE_FUTEXES
684  if (num_waiters_!= 0) {
685    Runtime* runtime = Runtime::Current();
686    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
687    LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
688        << " called with " << num_waiters_ << " waiters.";
689  }
690#else
691  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
692  // may still be using condition variables.
693  int rc = pthread_cond_destroy(&cond_);
694  if (rc != 0) {
695    errno = rc;
696    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
697    Runtime* runtime = Runtime::Current();
698    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
699    PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
700  }
701#endif
702}
703
704void ConditionVariable::Broadcast(Thread* self) {
705  DCHECK(self == NULL || self == Thread::Current());
706  // TODO: enable below, there's a race in thread creation that causes false failures currently.
707  // guard_.AssertExclusiveHeld(self);
708  DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
709#if ART_USE_FUTEXES
710  if (num_waiters_ > 0) {
711    sequence_++;  // Indicate the broadcast occurred.
712    bool done = false;
713    do {
714      int32_t cur_sequence = sequence_.LoadRelaxed();
715      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
716      // mutex unlocks will awaken the requeued waiter thread.
717      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
718                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
719                   guard_.state_.Address(), cur_sequence) != -1;
720      if (!done) {
721        if (errno != EAGAIN) {
722          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
723        }
724      }
725    } while (!done);
726  }
727#else
728  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
729#endif
730}
731
732void ConditionVariable::Signal(Thread* self) {
733  DCHECK(self == NULL || self == Thread::Current());
734  guard_.AssertExclusiveHeld(self);
735#if ART_USE_FUTEXES
736  if (num_waiters_ > 0) {
737    sequence_++;  // Indicate a signal occurred.
738    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
739    // to avoid this, however, requeueing can only move all waiters.
740    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
741    // Check something was woken or else we changed sequence_ before they had chance to wait.
742    CHECK((num_woken == 0) || (num_woken == 1));
743  }
744#else
745  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
746#endif
747}
748
749void ConditionVariable::Wait(Thread* self) {
750  guard_.CheckSafeToWait(self);
751  WaitHoldingLocks(self);
752}
753
754void ConditionVariable::WaitHoldingLocks(Thread* self) {
755  DCHECK(self == NULL || self == Thread::Current());
756  guard_.AssertExclusiveHeld(self);
757  unsigned int old_recursion_count = guard_.recursion_count_;
758#if ART_USE_FUTEXES
759  num_waiters_++;
760  // Ensure the Mutex is contended so that requeued threads are awoken.
761  guard_.num_contenders_++;
762  guard_.recursion_count_ = 1;
763  int32_t cur_sequence = sequence_.LoadRelaxed();
764  guard_.ExclusiveUnlock(self);
765  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
766    // Futex failed, check it is an expected error.
767    // EAGAIN == EWOULDBLK, so we let the caller try again.
768    // EINTR implies a signal was sent to this thread.
769    if ((errno != EINTR) && (errno != EAGAIN)) {
770      PLOG(FATAL) << "futex wait failed for " << name_;
771    }
772  }
773  guard_.ExclusiveLock(self);
774  CHECK_GE(num_waiters_, 0);
775  num_waiters_--;
776  // We awoke and so no longer require awakes from the guard_'s unlock.
777  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
778  guard_.num_contenders_--;
779#else
780  uint64_t old_owner = guard_.exclusive_owner_;
781  guard_.exclusive_owner_ = 0;
782  guard_.recursion_count_ = 0;
783  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
784  guard_.exclusive_owner_ = old_owner;
785#endif
786  guard_.recursion_count_ = old_recursion_count;
787}
788
789bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
790  DCHECK(self == NULL || self == Thread::Current());
791  bool timed_out = false;
792  guard_.AssertExclusiveHeld(self);
793  guard_.CheckSafeToWait(self);
794  unsigned int old_recursion_count = guard_.recursion_count_;
795#if ART_USE_FUTEXES
796  timespec rel_ts;
797  InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
798  num_waiters_++;
799  // Ensure the Mutex is contended so that requeued threads are awoken.
800  guard_.num_contenders_++;
801  guard_.recursion_count_ = 1;
802  int32_t cur_sequence = sequence_.LoadRelaxed();
803  guard_.ExclusiveUnlock(self);
804  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
805    if (errno == ETIMEDOUT) {
806      // Timed out we're done.
807      timed_out = true;
808    } else if ((errno == EAGAIN) || (errno == EINTR)) {
809      // A signal or ConditionVariable::Signal/Broadcast has come in.
810    } else {
811      PLOG(FATAL) << "timed futex wait failed for " << name_;
812    }
813  }
814  guard_.ExclusiveLock(self);
815  CHECK_GE(num_waiters_, 0);
816  num_waiters_--;
817  // We awoke and so no longer require awakes from the guard_'s unlock.
818  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
819  guard_.num_contenders_--;
820#else
821#if !defined(__APPLE__)
822  int clock = CLOCK_MONOTONIC;
823#else
824  int clock = CLOCK_REALTIME;
825#endif
826  uint64_t old_owner = guard_.exclusive_owner_;
827  guard_.exclusive_owner_ = 0;
828  guard_.recursion_count_ = 0;
829  timespec ts;
830  InitTimeSpec(true, clock, ms, ns, &ts);
831  int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
832  if (rc == ETIMEDOUT) {
833    timed_out = true;
834  } else if (rc != 0) {
835    errno = rc;
836    PLOG(FATAL) << "TimedWait failed for " << name_;
837  }
838  guard_.exclusive_owner_ = old_owner;
839#endif
840  guard_.recursion_count_ = old_recursion_count;
841  return timed_out;
842}
843
844void Locks::Init() {
845  if (logging_lock_ != nullptr) {
846    // Already initialized.
847    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
848      DCHECK(modify_ldt_lock_ != nullptr);
849    } else {
850      DCHECK(modify_ldt_lock_ == nullptr);
851    }
852    DCHECK(abort_lock_ != nullptr);
853    DCHECK(alloc_tracker_lock_ != nullptr);
854    DCHECK(allocated_monitor_ids_lock_ != nullptr);
855    DCHECK(allocated_thread_ids_lock_ != nullptr);
856    DCHECK(breakpoint_lock_ != nullptr);
857    DCHECK(classlinker_classes_lock_ != nullptr);
858    DCHECK(deoptimization_lock_ != nullptr);
859    DCHECK(heap_bitmap_lock_ != nullptr);
860    DCHECK(intern_table_lock_ != nullptr);
861    DCHECK(jni_libraries_lock_ != nullptr);
862    DCHECK(logging_lock_ != nullptr);
863    DCHECK(mutator_lock_ != nullptr);
864    DCHECK(profiler_lock_ != nullptr);
865    DCHECK(thread_list_lock_ != nullptr);
866    DCHECK(thread_list_suspend_thread_lock_ != nullptr);
867    DCHECK(thread_suspend_count_lock_ != nullptr);
868    DCHECK(trace_lock_ != nullptr);
869    DCHECK(unexpected_signal_lock_ != nullptr);
870  } else {
871    // Create global locks in level order from highest lock level to lowest.
872    LockLevel current_lock_level = kThreadListSuspendThreadLock;
873    DCHECK(thread_list_suspend_thread_lock_ == nullptr);
874    thread_list_suspend_thread_lock_ =
875        new Mutex("thread list suspend thread by .. lock", current_lock_level);
876
877    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
878      if (new_level >= current_lock_level) { \
879        /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
880        fprintf(stderr, "New local level %d is not less than current level %d\n", \
881                new_level, current_lock_level); \
882        exit(1); \
883      } \
884      current_lock_level = new_level;
885
886    UPDATE_CURRENT_LOCK_LEVEL(kInstrumentEntrypointsLock);
887    DCHECK(instrument_entrypoints_lock_ == nullptr);
888    instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
889
890    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
891    DCHECK(mutator_lock_ == nullptr);
892    mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
893
894    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
895    DCHECK(heap_bitmap_lock_ == nullptr);
896    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
897
898    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
899    DCHECK(trace_lock_ == nullptr);
900    trace_lock_ = new Mutex("trace lock", current_lock_level);
901
902    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
903    DCHECK(runtime_shutdown_lock_ == nullptr);
904    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
905
906    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
907    DCHECK(profiler_lock_ == nullptr);
908    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
909
910    UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
911    DCHECK(deoptimization_lock_ == nullptr);
912    deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
913
914    UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
915    DCHECK(alloc_tracker_lock_ == nullptr);
916    alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
917
918    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
919    DCHECK(thread_list_lock_ == nullptr);
920    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
921
922    UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
923    DCHECK(jni_libraries_lock_ == nullptr);
924    jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
925
926    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
927    DCHECK(breakpoint_lock_ == nullptr);
928    breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
929
930    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
931    DCHECK(classlinker_classes_lock_ == nullptr);
932    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
933                                                      current_lock_level);
934
935    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
936    DCHECK(allocated_monitor_ids_lock_ == nullptr);
937    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
938
939    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
940    DCHECK(allocated_thread_ids_lock_ == nullptr);
941    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
942
943    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
944      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
945      DCHECK(modify_ldt_lock_ == nullptr);
946      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
947    }
948
949    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
950    DCHECK(intern_table_lock_ == nullptr);
951    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
952
953    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
954    DCHECK(reference_processor_lock_ == nullptr);
955    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
956
957    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
958    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
959    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
960
961    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
962    DCHECK(reference_queue_weak_references_lock_ == nullptr);
963    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
964
965    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
966    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
967    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
968
969    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
970    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
971    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
972
973    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
974    DCHECK(reference_queue_soft_references_lock_ == nullptr);
975    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
976
977    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
978    DCHECK(abort_lock_ == nullptr);
979    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
980
981    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
982    DCHECK(thread_suspend_count_lock_ == nullptr);
983    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
984
985    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
986    DCHECK(unexpected_signal_lock_ == nullptr);
987    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
988
989    UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
990    DCHECK(mem_maps_lock_ == nullptr);
991    mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
992
993    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
994    DCHECK(logging_lock_ == nullptr);
995    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
996
997    #undef UPDATE_CURRENT_LOCK_LEVEL
998  }
999}
1000
1001
1002}  // namespace art
1003