mutex.cc revision e2facc5b18cd756a8b5500fb3d90da69c9ee0fb7
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mutex.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#define ATRACE_TAG ATRACE_TAG_DALVIK
23#include "cutils/trace.h"
24
25#include "atomic.h"
26#include "base/logging.h"
27#include "base/time_utils.h"
28#include "base/value_object.h"
29#include "mutex-inl.h"
30#include "runtime.h"
31#include "scoped_thread_state_change.h"
32#include "thread-inl.h"
33
34namespace art {
35
36Mutex* Locks::abort_lock_ = nullptr;
37Mutex* Locks::alloc_tracker_lock_ = nullptr;
38Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
39Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
40ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
41ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
42Mutex* Locks::deoptimization_lock_ = nullptr;
43ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
44Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
45Mutex* Locks::intern_table_lock_ = nullptr;
46Mutex* Locks::jni_libraries_lock_ = nullptr;
47Mutex* Locks::logging_lock_ = nullptr;
48Mutex* Locks::mem_maps_lock_ = nullptr;
49Mutex* Locks::modify_ldt_lock_ = nullptr;
50MutatorMutex* Locks::mutator_lock_ = nullptr;
51Mutex* Locks::profiler_lock_ = nullptr;
52Mutex* Locks::reference_processor_lock_ = nullptr;
53Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
54Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
55Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
56Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
57Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
58Mutex* Locks::runtime_shutdown_lock_ = nullptr;
59Mutex* Locks::thread_list_lock_ = nullptr;
60ConditionVariable* Locks::thread_exit_cond_ = nullptr;
61Mutex* Locks::thread_suspend_count_lock_ = nullptr;
62Mutex* Locks::trace_lock_ = nullptr;
63Mutex* Locks::unexpected_signal_lock_ = nullptr;
64Mutex* Locks::lambda_table_lock_ = nullptr;
65
66struct AllMutexData {
67  // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
68  Atomic<const BaseMutex*> all_mutexes_guard;
69  // All created mutexes guarded by all_mutexes_guard_.
70  std::set<BaseMutex*>* all_mutexes;
71  AllMutexData() : all_mutexes(nullptr) {}
72};
73static struct AllMutexData gAllMutexData[kAllMutexDataSize];
74
75#if ART_USE_FUTEXES
76static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
77  const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
78  result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
79  result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
80  if (result_ts->tv_nsec < 0) {
81    result_ts->tv_sec--;
82    result_ts->tv_nsec += one_sec;
83  } else if (result_ts->tv_nsec > one_sec) {
84    result_ts->tv_sec++;
85    result_ts->tv_nsec -= one_sec;
86  }
87  return result_ts->tv_sec < 0;
88}
89#endif
90
91class ScopedAllMutexesLock FINAL {
92 public:
93  explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
94    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
95      NanoSleep(100);
96    }
97  }
98
99  ~ScopedAllMutexesLock() {
100#if !defined(__clang__)
101    // TODO: remove this workaround target GCC/libc++/bionic bug "invalid failure memory model".
102    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakSequentiallyConsistent(mutex_, 0)) {
103#else
104    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
105#endif
106      NanoSleep(100);
107    }
108  }
109
110 private:
111  const BaseMutex* const mutex_;
112};
113
114// Scoped class that generates events at the beginning and end of lock contention.
115class ScopedContentionRecorder FINAL : public ValueObject {
116 public:
117  ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
118      : mutex_(kLogLockContentions ? mutex : nullptr),
119        blocked_tid_(kLogLockContentions ? blocked_tid : 0),
120        owner_tid_(kLogLockContentions ? owner_tid : 0),
121        start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
122    if (ATRACE_ENABLED()) {
123      std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
124                                     mutex->GetName(), owner_tid);
125      ATRACE_BEGIN(msg.c_str());
126    }
127  }
128
129  ~ScopedContentionRecorder() {
130    ATRACE_END();
131    if (kLogLockContentions) {
132      uint64_t end_nano_time = NanoTime();
133      mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
134    }
135  }
136
137 private:
138  BaseMutex* const mutex_;
139  const uint64_t blocked_tid_;
140  const uint64_t owner_tid_;
141  const uint64_t start_nano_time_;
142};
143
144BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
145  if (kLogLockContentions) {
146    ScopedAllMutexesLock mu(this);
147    std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
148    if (*all_mutexes_ptr == nullptr) {
149      // We leak the global set of all mutexes to avoid ordering issues in global variable
150      // construction/destruction.
151      *all_mutexes_ptr = new std::set<BaseMutex*>();
152    }
153    (*all_mutexes_ptr)->insert(this);
154  }
155}
156
157BaseMutex::~BaseMutex() {
158  if (kLogLockContentions) {
159    ScopedAllMutexesLock mu(this);
160    gAllMutexData->all_mutexes->erase(this);
161  }
162}
163
164void BaseMutex::DumpAll(std::ostream& os) {
165  if (kLogLockContentions) {
166    os << "Mutex logging:\n";
167    ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
168    std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
169    if (all_mutexes == nullptr) {
170      // No mutexes have been created yet during at startup.
171      return;
172    }
173    typedef std::set<BaseMutex*>::const_iterator It;
174    os << "(Contended)\n";
175    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
176      BaseMutex* mutex = *it;
177      if (mutex->HasEverContended()) {
178        mutex->Dump(os);
179        os << "\n";
180      }
181    }
182    os << "(Never contented)\n";
183    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
184      BaseMutex* mutex = *it;
185      if (!mutex->HasEverContended()) {
186        mutex->Dump(os);
187        os << "\n";
188      }
189    }
190  }
191}
192
193void BaseMutex::CheckSafeToWait(Thread* self) {
194  if (self == nullptr) {
195    CheckUnattachedThread(level_);
196    return;
197  }
198  if (kDebugLocking) {
199    CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
200        << "Waiting on unacquired mutex: " << name_;
201    bool bad_mutexes_held = false;
202    for (int i = kLockLevelCount - 1; i >= 0; --i) {
203      if (i != level_) {
204        BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
205        // We expect waits to happen while holding the thread list suspend thread lock.
206        if (held_mutex != nullptr) {
207          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
208                     << "(level " << LockLevel(i) << ") while performing wait on "
209                     << "\"" << name_ << "\" (level " << level_ << ")";
210          bad_mutexes_held = true;
211        }
212      }
213    }
214    if (gAborting == 0) {  // Avoid recursive aborts.
215      CHECK(!bad_mutexes_held);
216    }
217  }
218}
219
220void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
221  if (kLogLockContentions) {
222    // Atomically add value to wait_time.
223    wait_time.FetchAndAddSequentiallyConsistent(value);
224  }
225}
226
227void BaseMutex::RecordContention(uint64_t blocked_tid,
228                                 uint64_t owner_tid,
229                                 uint64_t nano_time_blocked) {
230  if (kLogLockContentions) {
231    ContentionLogData* data = contention_log_data_;
232    ++(data->contention_count);
233    data->AddToWaitTime(nano_time_blocked);
234    ContentionLogEntry* log = data->contention_log;
235    // This code is intentionally racy as it is only used for diagnostics.
236    uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
237    if (log[slot].blocked_tid == blocked_tid &&
238        log[slot].owner_tid == blocked_tid) {
239      ++log[slot].count;
240    } else {
241      uint32_t new_slot;
242      do {
243        slot = data->cur_content_log_entry.LoadRelaxed();
244        new_slot = (slot + 1) % kContentionLogSize;
245      } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
246      log[new_slot].blocked_tid = blocked_tid;
247      log[new_slot].owner_tid = owner_tid;
248      log[new_slot].count.StoreRelaxed(1);
249    }
250  }
251}
252
253void BaseMutex::DumpContention(std::ostream& os) const {
254  if (kLogLockContentions) {
255    const ContentionLogData* data = contention_log_data_;
256    const ContentionLogEntry* log = data->contention_log;
257    uint64_t wait_time = data->wait_time.LoadRelaxed();
258    uint32_t contention_count = data->contention_count.LoadRelaxed();
259    if (contention_count == 0) {
260      os << "never contended";
261    } else {
262      os << "contended " << contention_count
263         << " total wait of contender " << PrettyDuration(wait_time)
264         << " average " << PrettyDuration(wait_time / contention_count);
265      SafeMap<uint64_t, size_t> most_common_blocker;
266      SafeMap<uint64_t, size_t> most_common_blocked;
267      for (size_t i = 0; i < kContentionLogSize; ++i) {
268        uint64_t blocked_tid = log[i].blocked_tid;
269        uint64_t owner_tid = log[i].owner_tid;
270        uint32_t count = log[i].count.LoadRelaxed();
271        if (count > 0) {
272          auto it = most_common_blocked.find(blocked_tid);
273          if (it != most_common_blocked.end()) {
274            most_common_blocked.Overwrite(blocked_tid, it->second + count);
275          } else {
276            most_common_blocked.Put(blocked_tid, count);
277          }
278          it = most_common_blocker.find(owner_tid);
279          if (it != most_common_blocker.end()) {
280            most_common_blocker.Overwrite(owner_tid, it->second + count);
281          } else {
282            most_common_blocker.Put(owner_tid, count);
283          }
284        }
285      }
286      uint64_t max_tid = 0;
287      size_t max_tid_count = 0;
288      for (const auto& pair : most_common_blocked) {
289        if (pair.second > max_tid_count) {
290          max_tid = pair.first;
291          max_tid_count = pair.second;
292        }
293      }
294      if (max_tid != 0) {
295        os << " sample shows most blocked tid=" << max_tid;
296      }
297      max_tid = 0;
298      max_tid_count = 0;
299      for (const auto& pair : most_common_blocker) {
300        if (pair.second > max_tid_count) {
301          max_tid = pair.first;
302          max_tid_count = pair.second;
303        }
304      }
305      if (max_tid != 0) {
306        os << " sample shows tid=" << max_tid << " owning during this time";
307      }
308    }
309  }
310}
311
312
313Mutex::Mutex(const char* name, LockLevel level, bool recursive)
314    : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
315#if ART_USE_FUTEXES
316  DCHECK_EQ(0, state_.LoadRelaxed());
317  DCHECK_EQ(0, num_contenders_.LoadRelaxed());
318#else
319  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
320#endif
321  exclusive_owner_ = 0;
322}
323
324// Helper to ignore the lock requirement.
325static bool IsShuttingDown() NO_THREAD_SAFETY_ANALYSIS {
326  Runtime* runtime = Runtime::Current();
327  return runtime == nullptr || runtime->IsShuttingDownLocked();
328}
329
330Mutex::~Mutex() {
331  bool shutting_down = IsShuttingDown();
332#if ART_USE_FUTEXES
333  if (state_.LoadRelaxed() != 0) {
334    LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
335  } else {
336    if (exclusive_owner_ != 0) {
337      LOG(shutting_down ? WARNING : FATAL) << "unexpectedly found an owner on unlocked mutex "
338                                           << name_;
339    }
340    if (num_contenders_.LoadSequentiallyConsistent() != 0) {
341      LOG(shutting_down ? WARNING : FATAL) << "unexpectedly found a contender on mutex " << name_;
342    }
343  }
344#else
345  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
346  // may still be using locks.
347  int rc = pthread_mutex_destroy(&mutex_);
348  if (rc != 0) {
349    errno = rc;
350    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
351    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
352    PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
353  }
354#endif
355}
356
357void Mutex::ExclusiveLock(Thread* self) {
358  DCHECK(self == nullptr || self == Thread::Current());
359  if (kDebugLocking && !recursive_) {
360    AssertNotHeld(self);
361  }
362  if (!recursive_ || !IsExclusiveHeld(self)) {
363#if ART_USE_FUTEXES
364    bool done = false;
365    do {
366      int32_t cur_state = state_.LoadRelaxed();
367      if (LIKELY(cur_state == 0)) {
368        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
369        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
370      } else {
371        // Failed to acquire, hang up.
372        ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
373        num_contenders_++;
374        if (futex(state_.Address(), FUTEX_WAIT, 1, nullptr, nullptr, 0) != 0) {
375          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
376          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
377          if ((errno != EAGAIN) && (errno != EINTR)) {
378            PLOG(FATAL) << "futex wait failed for " << name_;
379          }
380        }
381        num_contenders_--;
382      }
383    } while (!done);
384    DCHECK_EQ(state_.LoadRelaxed(), 1);
385#else
386    CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
387#endif
388    DCHECK_EQ(exclusive_owner_, 0U);
389    exclusive_owner_ = SafeGetTid(self);
390    RegisterAsLocked(self);
391  }
392  recursion_count_++;
393  if (kDebugLocking) {
394    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
395        << name_ << " " << recursion_count_;
396    AssertHeld(self);
397  }
398}
399
400bool Mutex::ExclusiveTryLock(Thread* self) {
401  DCHECK(self == nullptr || self == Thread::Current());
402  if (kDebugLocking && !recursive_) {
403    AssertNotHeld(self);
404  }
405  if (!recursive_ || !IsExclusiveHeld(self)) {
406#if ART_USE_FUTEXES
407    bool done = false;
408    do {
409      int32_t cur_state = state_.LoadRelaxed();
410      if (cur_state == 0) {
411        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
412        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
413      } else {
414        return false;
415      }
416    } while (!done);
417    DCHECK_EQ(state_.LoadRelaxed(), 1);
418#else
419    int result = pthread_mutex_trylock(&mutex_);
420    if (result == EBUSY) {
421      return false;
422    }
423    if (result != 0) {
424      errno = result;
425      PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
426    }
427#endif
428    DCHECK_EQ(exclusive_owner_, 0U);
429    exclusive_owner_ = SafeGetTid(self);
430    RegisterAsLocked(self);
431  }
432  recursion_count_++;
433  if (kDebugLocking) {
434    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
435        << name_ << " " << recursion_count_;
436    AssertHeld(self);
437  }
438  return true;
439}
440
441void Mutex::ExclusiveUnlock(Thread* self) {
442  if (kIsDebugBuild && self != nullptr && self != Thread::Current()) {
443    std::string name1 = "<null>";
444    std::string name2 = "<null>";
445    if (self != nullptr) {
446      self->GetThreadName(name1);
447    }
448    if (Thread::Current() != nullptr) {
449      Thread::Current()->GetThreadName(name2);
450    }
451    LOG(FATAL) << GetName() << " level=" << level_ << " self=" << name1
452               << " Thread::Current()=" << name2;
453  }
454  AssertHeld(self);
455  DCHECK_NE(exclusive_owner_, 0U);
456  recursion_count_--;
457  if (!recursive_ || recursion_count_ == 0) {
458    if (kDebugLocking) {
459      CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
460          << name_ << " " << recursion_count_;
461    }
462    RegisterAsUnlocked(self);
463#if ART_USE_FUTEXES
464    bool done = false;
465    do {
466      int32_t cur_state = state_.LoadRelaxed();
467      if (LIKELY(cur_state == 1)) {
468        // We're no longer the owner.
469        exclusive_owner_ = 0;
470        // Change state to 0 and impose load/store ordering appropriate for lock release.
471        // Note, the relaxed loads below musn't reorder before the CompareExchange.
472        // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
473        // a status bit into the state on contention.
474        done =  state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */);
475        if (LIKELY(done)) {  // Spurious fail?
476          // Wake a contender.
477          if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
478            futex(state_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
479          }
480        }
481      } else {
482        // Logging acquires the logging lock, avoid infinite recursion in that case.
483        if (this != Locks::logging_lock_) {
484          LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
485        } else {
486          LogMessage::LogLine(__FILE__, __LINE__, INTERNAL_FATAL,
487                              StringPrintf("Unexpected state_ %d in unlock for %s",
488                                           cur_state, name_).c_str());
489          _exit(1);
490        }
491      }
492    } while (!done);
493#else
494    exclusive_owner_ = 0;
495    CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
496#endif
497  }
498}
499
500void Mutex::Dump(std::ostream& os) const {
501  os << (recursive_ ? "recursive " : "non-recursive ")
502      << name_
503      << " level=" << static_cast<int>(level_)
504      << " rec=" << recursion_count_
505      << " owner=" << GetExclusiveOwnerTid() << " ";
506  DumpContention(os);
507}
508
509std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
510  mu.Dump(os);
511  return os;
512}
513
514ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
515    : BaseMutex(name, level)
516#if ART_USE_FUTEXES
517    , state_(0), num_pending_readers_(0), num_pending_writers_(0)
518#endif
519{  // NOLINT(whitespace/braces)
520#if !ART_USE_FUTEXES
521  CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
522#endif
523  exclusive_owner_ = 0;
524}
525
526ReaderWriterMutex::~ReaderWriterMutex() {
527#if ART_USE_FUTEXES
528  CHECK_EQ(state_.LoadRelaxed(), 0);
529  CHECK_EQ(exclusive_owner_, 0U);
530  CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0);
531  CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
532#else
533  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
534  // may still be using locks.
535  int rc = pthread_rwlock_destroy(&rwlock_);
536  if (rc != 0) {
537    errno = rc;
538    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
539    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
540    Runtime* runtime = Runtime::Current();
541    bool shutting_down = runtime == nullptr || runtime->IsShuttingDownLocked();
542    PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
543  }
544#endif
545}
546
547void ReaderWriterMutex::ExclusiveLock(Thread* self) {
548  DCHECK(self == nullptr || self == Thread::Current());
549  AssertNotExclusiveHeld(self);
550#if ART_USE_FUTEXES
551  bool done = false;
552  do {
553    int32_t cur_state = state_.LoadRelaxed();
554    if (LIKELY(cur_state == 0)) {
555      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
556      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */);
557    } else {
558      // Failed to acquire, hang up.
559      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
560      ++num_pending_writers_;
561      if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
562        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
563        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
564        if ((errno != EAGAIN) && (errno != EINTR)) {
565          PLOG(FATAL) << "futex wait failed for " << name_;
566        }
567      }
568      --num_pending_writers_;
569    }
570  } while (!done);
571  DCHECK_EQ(state_.LoadRelaxed(), -1);
572#else
573  CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
574#endif
575  DCHECK_EQ(exclusive_owner_, 0U);
576  exclusive_owner_ = SafeGetTid(self);
577  RegisterAsLocked(self);
578  AssertExclusiveHeld(self);
579}
580
581void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
582  DCHECK(self == nullptr || self == Thread::Current());
583  AssertExclusiveHeld(self);
584  RegisterAsUnlocked(self);
585  DCHECK_NE(exclusive_owner_, 0U);
586#if ART_USE_FUTEXES
587  bool done = false;
588  do {
589    int32_t cur_state = state_.LoadRelaxed();
590    if (LIKELY(cur_state == -1)) {
591      // We're no longer the owner.
592      exclusive_owner_ = 0;
593      // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
594      // Note, the relaxed loads below musn't reorder before the CompareExchange.
595      // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
596      // a status bit into the state on contention.
597      done =  state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
598      if (LIKELY(done)) {  // Weak CAS may fail spuriously.
599        // Wake any waiters.
600        if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
601                     num_pending_writers_.LoadRelaxed() > 0)) {
602          futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
603        }
604      }
605    } else {
606      LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
607    }
608  } while (!done);
609#else
610  exclusive_owner_ = 0;
611  CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
612#endif
613}
614
615#if HAVE_TIMED_RWLOCK
616bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
617  DCHECK(self == nullptr || self == Thread::Current());
618#if ART_USE_FUTEXES
619  bool done = false;
620  timespec end_abs_ts;
621  InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts);
622  do {
623    int32_t cur_state = state_.LoadRelaxed();
624    if (cur_state == 0) {
625      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
626      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */);
627    } else {
628      // Failed to acquire, hang up.
629      timespec now_abs_ts;
630      InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts);
631      timespec rel_ts;
632      if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
633        return false;  // Timed out.
634      }
635      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
636      ++num_pending_writers_;
637      if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, nullptr, 0) != 0) {
638        if (errno == ETIMEDOUT) {
639          --num_pending_writers_;
640          return false;  // Timed out.
641        } else if ((errno != EAGAIN) && (errno != EINTR)) {
642          // EAGAIN and EINTR both indicate a spurious failure,
643          // recompute the relative time out from now and try again.
644          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
645          PLOG(FATAL) << "timed futex wait failed for " << name_;
646        }
647      }
648      --num_pending_writers_;
649    }
650  } while (!done);
651#else
652  timespec ts;
653  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
654  int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
655  if (result == ETIMEDOUT) {
656    return false;
657  }
658  if (result != 0) {
659    errno = result;
660    PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
661  }
662#endif
663  exclusive_owner_ = SafeGetTid(self);
664  RegisterAsLocked(self);
665  AssertSharedHeld(self);
666  return true;
667}
668#endif
669
670#if ART_USE_FUTEXES
671void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
672  // Owner holds it exclusively, hang up.
673  ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
674  ++num_pending_readers_;
675  if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
676    if (errno != EAGAIN) {
677      PLOG(FATAL) << "futex wait failed for " << name_;
678    }
679  }
680  --num_pending_readers_;
681}
682#endif
683
684bool ReaderWriterMutex::SharedTryLock(Thread* self) {
685  DCHECK(self == nullptr || self == Thread::Current());
686#if ART_USE_FUTEXES
687  bool done = false;
688  do {
689    int32_t cur_state = state_.LoadRelaxed();
690    if (cur_state >= 0) {
691      // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
692      done =  state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
693    } else {
694      // Owner holds it exclusively.
695      return false;
696    }
697  } while (!done);
698#else
699  int result = pthread_rwlock_tryrdlock(&rwlock_);
700  if (result == EBUSY) {
701    return false;
702  }
703  if (result != 0) {
704    errno = result;
705    PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
706  }
707#endif
708  RegisterAsLocked(self);
709  AssertSharedHeld(self);
710  return true;
711}
712
713bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
714  DCHECK(self == nullptr || self == Thread::Current());
715  bool result;
716  if (UNLIKELY(self == nullptr)) {  // Handle unattached threads.
717    result = IsExclusiveHeld(self);  // TODO: a better best effort here.
718  } else {
719    result = (self->GetHeldMutex(level_) == this);
720  }
721  return result;
722}
723
724void ReaderWriterMutex::Dump(std::ostream& os) const {
725  os << name_
726      << " level=" << static_cast<int>(level_)
727      << " owner=" << GetExclusiveOwnerTid()
728#if ART_USE_FUTEXES
729      << " state=" << state_.LoadSequentiallyConsistent()
730      << " num_pending_writers=" << num_pending_writers_.LoadSequentiallyConsistent()
731      << " num_pending_readers=" << num_pending_readers_.LoadSequentiallyConsistent()
732#endif
733      << " ";
734  DumpContention(os);
735}
736
737std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
738  mu.Dump(os);
739  return os;
740}
741
742std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu) {
743  mu.Dump(os);
744  return os;
745}
746
747ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
748    : name_(name), guard_(guard) {
749#if ART_USE_FUTEXES
750  DCHECK_EQ(0, sequence_.LoadRelaxed());
751  num_waiters_ = 0;
752#else
753  pthread_condattr_t cond_attrs;
754  CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
755#if !defined(__APPLE__)
756  // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
757  CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC));
758#endif
759  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
760#endif
761}
762
763ConditionVariable::~ConditionVariable() {
764#if ART_USE_FUTEXES
765  if (num_waiters_!= 0) {
766    Runtime* runtime = Runtime::Current();
767    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
768    LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
769        << " called with " << num_waiters_ << " waiters.";
770  }
771#else
772  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
773  // may still be using condition variables.
774  int rc = pthread_cond_destroy(&cond_);
775  if (rc != 0) {
776    errno = rc;
777    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
778    Runtime* runtime = Runtime::Current();
779    bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDownLocked();
780    PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
781  }
782#endif
783}
784
785void ConditionVariable::Broadcast(Thread* self) {
786  DCHECK(self == nullptr || self == Thread::Current());
787  // TODO: enable below, there's a race in thread creation that causes false failures currently.
788  // guard_.AssertExclusiveHeld(self);
789  DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
790#if ART_USE_FUTEXES
791  if (num_waiters_ > 0) {
792    sequence_++;  // Indicate the broadcast occurred.
793    bool done = false;
794    do {
795      int32_t cur_sequence = sequence_.LoadRelaxed();
796      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
797      // mutex unlocks will awaken the requeued waiter thread.
798      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
799                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
800                   guard_.state_.Address(), cur_sequence) != -1;
801      if (!done) {
802        if (errno != EAGAIN) {
803          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
804        }
805      }
806    } while (!done);
807  }
808#else
809  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
810#endif
811}
812
813void ConditionVariable::Signal(Thread* self) {
814  DCHECK(self == nullptr || self == Thread::Current());
815  guard_.AssertExclusiveHeld(self);
816#if ART_USE_FUTEXES
817  if (num_waiters_ > 0) {
818    sequence_++;  // Indicate a signal occurred.
819    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
820    // to avoid this, however, requeueing can only move all waiters.
821    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
822    // Check something was woken or else we changed sequence_ before they had chance to wait.
823    CHECK((num_woken == 0) || (num_woken == 1));
824  }
825#else
826  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
827#endif
828}
829
830void ConditionVariable::Wait(Thread* self) {
831  guard_.CheckSafeToWait(self);
832  WaitHoldingLocks(self);
833}
834
835void ConditionVariable::WaitHoldingLocks(Thread* self) {
836  DCHECK(self == nullptr || self == Thread::Current());
837  guard_.AssertExclusiveHeld(self);
838  unsigned int old_recursion_count = guard_.recursion_count_;
839#if ART_USE_FUTEXES
840  num_waiters_++;
841  // Ensure the Mutex is contended so that requeued threads are awoken.
842  guard_.num_contenders_++;
843  guard_.recursion_count_ = 1;
844  int32_t cur_sequence = sequence_.LoadRelaxed();
845  guard_.ExclusiveUnlock(self);
846  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, nullptr, nullptr, 0) != 0) {
847    // Futex failed, check it is an expected error.
848    // EAGAIN == EWOULDBLK, so we let the caller try again.
849    // EINTR implies a signal was sent to this thread.
850    if ((errno != EINTR) && (errno != EAGAIN)) {
851      PLOG(FATAL) << "futex wait failed for " << name_;
852    }
853  }
854  guard_.ExclusiveLock(self);
855  CHECK_GE(num_waiters_, 0);
856  num_waiters_--;
857  // We awoke and so no longer require awakes from the guard_'s unlock.
858  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
859  guard_.num_contenders_--;
860#else
861  uint64_t old_owner = guard_.exclusive_owner_;
862  guard_.exclusive_owner_ = 0;
863  guard_.recursion_count_ = 0;
864  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
865  guard_.exclusive_owner_ = old_owner;
866#endif
867  guard_.recursion_count_ = old_recursion_count;
868}
869
870bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
871  DCHECK(self == nullptr || self == Thread::Current());
872  bool timed_out = false;
873  guard_.AssertExclusiveHeld(self);
874  guard_.CheckSafeToWait(self);
875  unsigned int old_recursion_count = guard_.recursion_count_;
876#if ART_USE_FUTEXES
877  timespec rel_ts;
878  InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
879  num_waiters_++;
880  // Ensure the Mutex is contended so that requeued threads are awoken.
881  guard_.num_contenders_++;
882  guard_.recursion_count_ = 1;
883  int32_t cur_sequence = sequence_.LoadRelaxed();
884  guard_.ExclusiveUnlock(self);
885  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, nullptr, 0) != 0) {
886    if (errno == ETIMEDOUT) {
887      // Timed out we're done.
888      timed_out = true;
889    } else if ((errno == EAGAIN) || (errno == EINTR)) {
890      // A signal or ConditionVariable::Signal/Broadcast has come in.
891    } else {
892      PLOG(FATAL) << "timed futex wait failed for " << name_;
893    }
894  }
895  guard_.ExclusiveLock(self);
896  CHECK_GE(num_waiters_, 0);
897  num_waiters_--;
898  // We awoke and so no longer require awakes from the guard_'s unlock.
899  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
900  guard_.num_contenders_--;
901#else
902#if !defined(__APPLE__)
903  int clock = CLOCK_MONOTONIC;
904#else
905  int clock = CLOCK_REALTIME;
906#endif
907  uint64_t old_owner = guard_.exclusive_owner_;
908  guard_.exclusive_owner_ = 0;
909  guard_.recursion_count_ = 0;
910  timespec ts;
911  InitTimeSpec(true, clock, ms, ns, &ts);
912  int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
913  if (rc == ETIMEDOUT) {
914    timed_out = true;
915  } else if (rc != 0) {
916    errno = rc;
917    PLOG(FATAL) << "TimedWait failed for " << name_;
918  }
919  guard_.exclusive_owner_ = old_owner;
920#endif
921  guard_.recursion_count_ = old_recursion_count;
922  return timed_out;
923}
924
925void Locks::Init() {
926  if (logging_lock_ != nullptr) {
927    // Already initialized.
928    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
929      DCHECK(modify_ldt_lock_ != nullptr);
930    } else {
931      DCHECK(modify_ldt_lock_ == nullptr);
932    }
933    DCHECK(abort_lock_ != nullptr);
934    DCHECK(alloc_tracker_lock_ != nullptr);
935    DCHECK(allocated_monitor_ids_lock_ != nullptr);
936    DCHECK(allocated_thread_ids_lock_ != nullptr);
937    DCHECK(breakpoint_lock_ != nullptr);
938    DCHECK(classlinker_classes_lock_ != nullptr);
939    DCHECK(deoptimization_lock_ != nullptr);
940    DCHECK(heap_bitmap_lock_ != nullptr);
941    DCHECK(intern_table_lock_ != nullptr);
942    DCHECK(jni_libraries_lock_ != nullptr);
943    DCHECK(logging_lock_ != nullptr);
944    DCHECK(mutator_lock_ != nullptr);
945    DCHECK(profiler_lock_ != nullptr);
946    DCHECK(thread_list_lock_ != nullptr);
947    DCHECK(thread_suspend_count_lock_ != nullptr);
948    DCHECK(trace_lock_ != nullptr);
949    DCHECK(unexpected_signal_lock_ != nullptr);
950    DCHECK(lambda_table_lock_ != nullptr);
951  } else {
952    // Create global locks in level order from highest lock level to lowest.
953    LockLevel current_lock_level = kInstrumentEntrypointsLock;
954    DCHECK(instrument_entrypoints_lock_ == nullptr);
955    instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
956
957    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
958      if (new_level >= current_lock_level) { \
959        /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
960        fprintf(stderr, "New local level %d is not less than current level %d\n", \
961                new_level, current_lock_level); \
962        exit(1); \
963      } \
964      current_lock_level = new_level;
965
966    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
967    DCHECK(mutator_lock_ == nullptr);
968    mutator_lock_ = new MutatorMutex("mutator lock", current_lock_level);
969
970    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
971    DCHECK(heap_bitmap_lock_ == nullptr);
972    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
973
974    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
975    DCHECK(trace_lock_ == nullptr);
976    trace_lock_ = new Mutex("trace lock", current_lock_level);
977
978    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
979    DCHECK(runtime_shutdown_lock_ == nullptr);
980    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
981
982    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
983    DCHECK(profiler_lock_ == nullptr);
984    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
985
986    UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
987    DCHECK(deoptimization_lock_ == nullptr);
988    deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
989
990    UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
991    DCHECK(alloc_tracker_lock_ == nullptr);
992    alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
993
994    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
995    DCHECK(thread_list_lock_ == nullptr);
996    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
997
998    UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
999    DCHECK(jni_libraries_lock_ == nullptr);
1000    jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
1001
1002    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
1003    DCHECK(breakpoint_lock_ == nullptr);
1004    breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
1005
1006    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
1007    DCHECK(classlinker_classes_lock_ == nullptr);
1008    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
1009                                                      current_lock_level);
1010
1011    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
1012    DCHECK(allocated_monitor_ids_lock_ == nullptr);
1013    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
1014
1015    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
1016    DCHECK(allocated_thread_ids_lock_ == nullptr);
1017    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
1018
1019    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
1020      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
1021      DCHECK(modify_ldt_lock_ == nullptr);
1022      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
1023    }
1024
1025    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
1026    DCHECK(intern_table_lock_ == nullptr);
1027    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
1028
1029    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
1030    DCHECK(reference_processor_lock_ == nullptr);
1031    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
1032
1033    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
1034    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
1035    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
1036
1037    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
1038    DCHECK(reference_queue_weak_references_lock_ == nullptr);
1039    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
1040
1041    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
1042    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
1043    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
1044
1045    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
1046    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
1047    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
1048
1049    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
1050    DCHECK(reference_queue_soft_references_lock_ == nullptr);
1051    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
1052
1053    UPDATE_CURRENT_LOCK_LEVEL(kLambdaTableLock);
1054    DCHECK(lambda_table_lock_ == nullptr);
1055    lambda_table_lock_ = new Mutex("lambda table lock", current_lock_level);
1056
1057    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
1058    DCHECK(abort_lock_ == nullptr);
1059    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
1060
1061    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
1062    DCHECK(thread_suspend_count_lock_ == nullptr);
1063    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
1064
1065    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
1066    DCHECK(unexpected_signal_lock_ == nullptr);
1067    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
1068
1069    UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
1070    DCHECK(mem_maps_lock_ == nullptr);
1071    mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
1072
1073    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
1074    DCHECK(logging_lock_ == nullptr);
1075    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
1076
1077    #undef UPDATE_CURRENT_LOCK_LEVEL
1078
1079    InitConditions();
1080  }
1081}
1082
1083void Locks::InitConditions() {
1084  thread_exit_cond_ = new ConditionVariable("thread exit condition variable", *thread_list_lock_);
1085}
1086
1087}  // namespace art
1088