mutex.cc revision 2435a43f6c851c23922d8508fb17c6079248201c
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mutex.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#define ATRACE_TAG ATRACE_TAG_DALVIK
23#include "cutils/trace.h"
24
25#include "atomic.h"
26#include "base/logging.h"
27#include "base/value_object.h"
28#include "mutex-inl.h"
29#include "runtime.h"
30#include "scoped_thread_state_change.h"
31#include "thread-inl.h"
32#include "utils.h"
33
34namespace art {
35
36Mutex* Locks::abort_lock_ = nullptr;
37Mutex* Locks::alloc_tracker_lock_ = nullptr;
38Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
39Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
40ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
41ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
42Mutex* Locks::deoptimization_lock_ = nullptr;
43ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
44Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
45Mutex* Locks::intern_table_lock_ = nullptr;
46Mutex* Locks::jni_libraries_lock_ = nullptr;
47Mutex* Locks::logging_lock_ = nullptr;
48Mutex* Locks::mem_maps_lock_ = nullptr;
49Mutex* Locks::method_verifiers_lock_ = nullptr;
50Mutex* Locks::modify_ldt_lock_ = nullptr;
51ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
52Mutex* Locks::profiler_lock_ = nullptr;
53Mutex* Locks::reference_processor_lock_ = nullptr;
54Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
55Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
56Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
57Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
58Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
59Mutex* Locks::runtime_shutdown_lock_ = nullptr;
60Mutex* Locks::thread_list_lock_ = nullptr;
61ConditionVariable* Locks::thread_exit_cond_ = nullptr;
62Mutex* Locks::thread_suspend_count_lock_ = nullptr;
63Mutex* Locks::trace_lock_ = nullptr;
64Mutex* Locks::unexpected_signal_lock_ = nullptr;
65
66struct AllMutexData {
67  // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
68  Atomic<const BaseMutex*> all_mutexes_guard;
69  // All created mutexes guarded by all_mutexes_guard_.
70  std::set<BaseMutex*>* all_mutexes;
71  AllMutexData() : all_mutexes(NULL) {}
72};
73static struct AllMutexData gAllMutexData[kAllMutexDataSize];
74
75#if ART_USE_FUTEXES
76static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
77  const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
78  result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
79  result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
80  if (result_ts->tv_nsec < 0) {
81    result_ts->tv_sec--;
82    result_ts->tv_nsec += one_sec;
83  } else if (result_ts->tv_nsec > one_sec) {
84    result_ts->tv_sec++;
85    result_ts->tv_nsec -= one_sec;
86  }
87  return result_ts->tv_sec < 0;
88}
89#endif
90
91class ScopedAllMutexesLock FINAL {
92 public:
93  explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
94    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
95      NanoSleep(100);
96    }
97  }
98
99  ~ScopedAllMutexesLock() {
100#if !defined(__clang__)
101    // TODO: remove this workaround target GCC/libc++/bionic bug "invalid failure memory model".
102    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakSequentiallyConsistent(mutex_, 0)) {
103#else
104    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
105#endif
106      NanoSleep(100);
107    }
108  }
109
110 private:
111  const BaseMutex* const mutex_;
112};
113
114// Scoped class that generates events at the beginning and end of lock contention.
115class ScopedContentionRecorder FINAL : public ValueObject {
116 public:
117  ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
118      : mutex_(kLogLockContentions ? mutex : NULL),
119        blocked_tid_(kLogLockContentions ? blocked_tid : 0),
120        owner_tid_(kLogLockContentions ? owner_tid : 0),
121        start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
122    if (ATRACE_ENABLED()) {
123      std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
124                                     mutex->GetName(), owner_tid);
125      ATRACE_BEGIN(msg.c_str());
126    }
127  }
128
129  ~ScopedContentionRecorder() {
130    ATRACE_END();
131    if (kLogLockContentions) {
132      uint64_t end_nano_time = NanoTime();
133      mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
134    }
135  }
136
137 private:
138  BaseMutex* const mutex_;
139  const uint64_t blocked_tid_;
140  const uint64_t owner_tid_;
141  const uint64_t start_nano_time_;
142};
143
144BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
145  if (kLogLockContentions) {
146    ScopedAllMutexesLock mu(this);
147    std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
148    if (*all_mutexes_ptr == NULL) {
149      // We leak the global set of all mutexes to avoid ordering issues in global variable
150      // construction/destruction.
151      *all_mutexes_ptr = new std::set<BaseMutex*>();
152    }
153    (*all_mutexes_ptr)->insert(this);
154  }
155}
156
157BaseMutex::~BaseMutex() {
158  if (kLogLockContentions) {
159    ScopedAllMutexesLock mu(this);
160    gAllMutexData->all_mutexes->erase(this);
161  }
162}
163
164void BaseMutex::DumpAll(std::ostream& os) {
165  if (kLogLockContentions) {
166    os << "Mutex logging:\n";
167    ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
168    std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
169    if (all_mutexes == NULL) {
170      // No mutexes have been created yet during at startup.
171      return;
172    }
173    typedef std::set<BaseMutex*>::const_iterator It;
174    os << "(Contended)\n";
175    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
176      BaseMutex* mutex = *it;
177      if (mutex->HasEverContended()) {
178        mutex->Dump(os);
179        os << "\n";
180      }
181    }
182    os << "(Never contented)\n";
183    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
184      BaseMutex* mutex = *it;
185      if (!mutex->HasEverContended()) {
186        mutex->Dump(os);
187        os << "\n";
188      }
189    }
190  }
191}
192
193void BaseMutex::CheckSafeToWait(Thread* self) {
194  if (self == NULL) {
195    CheckUnattachedThread(level_);
196    return;
197  }
198  if (kDebugLocking) {
199    CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
200        << "Waiting on unacquired mutex: " << name_;
201    bool bad_mutexes_held = false;
202    for (int i = kLockLevelCount - 1; i >= 0; --i) {
203      if (i != level_) {
204        BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
205        // We expect waits to happen while holding the thread list suspend thread lock.
206        if (held_mutex != NULL) {
207          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
208                     << "(level " << LockLevel(i) << ") while performing wait on "
209                     << "\"" << name_ << "\" (level " << level_ << ")";
210          bad_mutexes_held = true;
211        }
212      }
213    }
214    if (gAborting == 0) {  // Avoid recursive aborts.
215      CHECK(!bad_mutexes_held);
216    }
217  }
218}
219
220void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
221  if (kLogLockContentions) {
222    // Atomically add value to wait_time.
223    wait_time.FetchAndAddSequentiallyConsistent(value);
224  }
225}
226
227void BaseMutex::RecordContention(uint64_t blocked_tid,
228                                 uint64_t owner_tid,
229                                 uint64_t nano_time_blocked) {
230  if (kLogLockContentions) {
231    ContentionLogData* data = contention_log_data_;
232    ++(data->contention_count);
233    data->AddToWaitTime(nano_time_blocked);
234    ContentionLogEntry* log = data->contention_log;
235    // This code is intentionally racy as it is only used for diagnostics.
236    uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
237    if (log[slot].blocked_tid == blocked_tid &&
238        log[slot].owner_tid == blocked_tid) {
239      ++log[slot].count;
240    } else {
241      uint32_t new_slot;
242      do {
243        slot = data->cur_content_log_entry.LoadRelaxed();
244        new_slot = (slot + 1) % kContentionLogSize;
245      } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
246      log[new_slot].blocked_tid = blocked_tid;
247      log[new_slot].owner_tid = owner_tid;
248      log[new_slot].count.StoreRelaxed(1);
249    }
250  }
251}
252
253void BaseMutex::DumpContention(std::ostream& os) const {
254  if (kLogLockContentions) {
255    const ContentionLogData* data = contention_log_data_;
256    const ContentionLogEntry* log = data->contention_log;
257    uint64_t wait_time = data->wait_time.LoadRelaxed();
258    uint32_t contention_count = data->contention_count.LoadRelaxed();
259    if (contention_count == 0) {
260      os << "never contended";
261    } else {
262      os << "contended " << contention_count
263         << " total wait of contender " << PrettyDuration(wait_time)
264         << " average " << PrettyDuration(wait_time / contention_count);
265      SafeMap<uint64_t, size_t> most_common_blocker;
266      SafeMap<uint64_t, size_t> most_common_blocked;
267      for (size_t i = 0; i < kContentionLogSize; ++i) {
268        uint64_t blocked_tid = log[i].blocked_tid;
269        uint64_t owner_tid = log[i].owner_tid;
270        uint32_t count = log[i].count.LoadRelaxed();
271        if (count > 0) {
272          auto it = most_common_blocked.find(blocked_tid);
273          if (it != most_common_blocked.end()) {
274            most_common_blocked.Overwrite(blocked_tid, it->second + count);
275          } else {
276            most_common_blocked.Put(blocked_tid, count);
277          }
278          it = most_common_blocker.find(owner_tid);
279          if (it != most_common_blocker.end()) {
280            most_common_blocker.Overwrite(owner_tid, it->second + count);
281          } else {
282            most_common_blocker.Put(owner_tid, count);
283          }
284        }
285      }
286      uint64_t max_tid = 0;
287      size_t max_tid_count = 0;
288      for (const auto& pair : most_common_blocked) {
289        if (pair.second > max_tid_count) {
290          max_tid = pair.first;
291          max_tid_count = pair.second;
292        }
293      }
294      if (max_tid != 0) {
295        os << " sample shows most blocked tid=" << max_tid;
296      }
297      max_tid = 0;
298      max_tid_count = 0;
299      for (const auto& pair : most_common_blocker) {
300        if (pair.second > max_tid_count) {
301          max_tid = pair.first;
302          max_tid_count = pair.second;
303        }
304      }
305      if (max_tid != 0) {
306        os << " sample shows tid=" << max_tid << " owning during this time";
307      }
308    }
309  }
310}
311
312
313Mutex::Mutex(const char* name, LockLevel level, bool recursive)
314    : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
315#if ART_USE_FUTEXES
316  DCHECK_EQ(0, state_.LoadRelaxed());
317  DCHECK_EQ(0, num_contenders_.LoadRelaxed());
318#else
319  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
320#endif
321  exclusive_owner_ = 0;
322}
323
324// Helper to ignore the lock requirement.
325static bool IsShuttingDown() NO_THREAD_SAFETY_ANALYSIS {
326  Runtime* runtime = Runtime::Current();
327  return runtime == nullptr || runtime->IsShuttingDownLocked();
328}
329
330Mutex::~Mutex() {
331  bool shutting_down = IsShuttingDown();
332#if ART_USE_FUTEXES
333  if (state_.LoadRelaxed() != 0) {
334    LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
335  } else {
336    if (exclusive_owner_ != 0) {
337      LOG(shutting_down ? WARNING : FATAL) << "unexpectedly found an owner on unlocked mutex "
338                                           << name_;
339    }
340    if (num_contenders_.LoadSequentiallyConsistent() != 0) {
341      LOG(shutting_down ? WARNING : FATAL) << "unexpectedly found a contender on mutex " << name_;
342    }
343  }
344#else
345  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
346  // may still be using locks.
347  int rc = pthread_mutex_destroy(&mutex_);
348  if (rc != 0) {
349    errno = rc;
350    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
351    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
352    PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
353  }
354#endif
355}
356
357void Mutex::ExclusiveLock(Thread* self) {
358  DCHECK(self == NULL || self == Thread::Current());
359  if (kDebugLocking && !recursive_) {
360    AssertNotHeld(self);
361  }
362  if (!recursive_ || !IsExclusiveHeld(self)) {
363#if ART_USE_FUTEXES
364    bool done = false;
365    do {
366      int32_t cur_state = state_.LoadRelaxed();
367      if (LIKELY(cur_state == 0)) {
368        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
369        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
370      } else {
371        // Failed to acquire, hang up.
372        ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
373        num_contenders_++;
374        if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
375          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
376          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
377          if ((errno != EAGAIN) && (errno != EINTR)) {
378            PLOG(FATAL) << "futex wait failed for " << name_;
379          }
380        }
381        num_contenders_--;
382      }
383    } while (!done);
384    DCHECK_EQ(state_.LoadRelaxed(), 1);
385#else
386    CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
387#endif
388    DCHECK_EQ(exclusive_owner_, 0U);
389    exclusive_owner_ = SafeGetTid(self);
390    RegisterAsLocked(self);
391  }
392  recursion_count_++;
393  if (kDebugLocking) {
394    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
395        << name_ << " " << recursion_count_;
396    AssertHeld(self);
397  }
398}
399
400bool Mutex::ExclusiveTryLock(Thread* self) {
401  DCHECK(self == NULL || self == Thread::Current());
402  if (kDebugLocking && !recursive_) {
403    AssertNotHeld(self);
404  }
405  if (!recursive_ || !IsExclusiveHeld(self)) {
406#if ART_USE_FUTEXES
407    bool done = false;
408    do {
409      int32_t cur_state = state_.LoadRelaxed();
410      if (cur_state == 0) {
411        // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
412        done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
413      } else {
414        return false;
415      }
416    } while (!done);
417    DCHECK_EQ(state_.LoadRelaxed(), 1);
418#else
419    int result = pthread_mutex_trylock(&mutex_);
420    if (result == EBUSY) {
421      return false;
422    }
423    if (result != 0) {
424      errno = result;
425      PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
426    }
427#endif
428    DCHECK_EQ(exclusive_owner_, 0U);
429    exclusive_owner_ = SafeGetTid(self);
430    RegisterAsLocked(self);
431  }
432  recursion_count_++;
433  if (kDebugLocking) {
434    CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
435        << name_ << " " << recursion_count_;
436    AssertHeld(self);
437  }
438  return true;
439}
440
441void Mutex::ExclusiveUnlock(Thread* self) {
442  if (kIsDebugBuild && self != nullptr && self != Thread::Current()) {
443    std::string name1 = "<null>";
444    std::string name2 = "<null>";
445    if (self != nullptr) {
446      self->GetThreadName(name1);
447    }
448    if (Thread::Current() != nullptr) {
449      Thread::Current()->GetThreadName(name2);
450    }
451    LOG(FATAL) << GetName() << " level=" << level_ << " self=" << name1
452               << " Thread::Current()=" << name2;
453  }
454  AssertHeld(self);
455  DCHECK_NE(exclusive_owner_, 0U);
456  recursion_count_--;
457  if (!recursive_ || recursion_count_ == 0) {
458    if (kDebugLocking) {
459      CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
460          << name_ << " " << recursion_count_;
461    }
462    RegisterAsUnlocked(self);
463#if ART_USE_FUTEXES
464    bool done = false;
465    do {
466      int32_t cur_state = state_.LoadRelaxed();
467      if (LIKELY(cur_state == 1)) {
468        // We're no longer the owner.
469        exclusive_owner_ = 0;
470        // Change state to 0 and impose load/store ordering appropriate for lock release.
471        // Note, the relaxed loads below musn't reorder before the CompareExchange.
472        // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
473        // a status bit into the state on contention.
474        done =  state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */);
475        if (LIKELY(done)) {  // Spurious fail?
476          // Wake a contender.
477          if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
478            futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
479          }
480        }
481      } else {
482        // Logging acquires the logging lock, avoid infinite recursion in that case.
483        if (this != Locks::logging_lock_) {
484          LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
485        } else {
486          LogMessage::LogLine(__FILE__, __LINE__, INTERNAL_FATAL,
487                              StringPrintf("Unexpected state_ %d in unlock for %s",
488                                           cur_state, name_).c_str());
489          _exit(1);
490        }
491      }
492    } while (!done);
493#else
494    exclusive_owner_ = 0;
495    CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
496#endif
497  }
498}
499
500void Mutex::Dump(std::ostream& os) const {
501  os << (recursive_ ? "recursive " : "non-recursive ")
502      << name_
503      << " level=" << static_cast<int>(level_)
504      << " rec=" << recursion_count_
505      << " owner=" << GetExclusiveOwnerTid() << " ";
506  DumpContention(os);
507}
508
509std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
510  mu.Dump(os);
511  return os;
512}
513
514ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
515    : BaseMutex(name, level)
516#if ART_USE_FUTEXES
517    , state_(0), num_pending_readers_(0), num_pending_writers_(0)
518#endif
519{  // NOLINT(whitespace/braces)
520#if !ART_USE_FUTEXES
521  CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
522#endif
523  exclusive_owner_ = 0;
524}
525
526ReaderWriterMutex::~ReaderWriterMutex() {
527#if ART_USE_FUTEXES
528  CHECK_EQ(state_.LoadRelaxed(), 0);
529  CHECK_EQ(exclusive_owner_, 0U);
530  CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0);
531  CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
532#else
533  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
534  // may still be using locks.
535  int rc = pthread_rwlock_destroy(&rwlock_);
536  if (rc != 0) {
537    errno = rc;
538    // TODO: should we just not log at all if shutting down? this could be the logging mutex!
539    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
540    Runtime* runtime = Runtime::Current();
541    bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
542    PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
543  }
544#endif
545}
546
547void ReaderWriterMutex::ExclusiveLock(Thread* self) {
548  DCHECK(self == NULL || self == Thread::Current());
549  AssertNotExclusiveHeld(self);
550#if ART_USE_FUTEXES
551  bool done = false;
552  do {
553    int32_t cur_state = state_.LoadRelaxed();
554    if (LIKELY(cur_state == 0)) {
555      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
556      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */);
557    } else {
558      // Failed to acquire, hang up.
559      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
560      ++num_pending_writers_;
561      if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
562        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
563        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
564        if ((errno != EAGAIN) && (errno != EINTR)) {
565          PLOG(FATAL) << "futex wait failed for " << name_;
566        }
567      }
568      --num_pending_writers_;
569    }
570  } while (!done);
571  DCHECK_EQ(state_.LoadRelaxed(), -1);
572#else
573  CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
574#endif
575  DCHECK_EQ(exclusive_owner_, 0U);
576  exclusive_owner_ = SafeGetTid(self);
577  RegisterAsLocked(self);
578  AssertExclusiveHeld(self);
579}
580
581void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
582  DCHECK(self == NULL || self == Thread::Current());
583  AssertExclusiveHeld(self);
584  RegisterAsUnlocked(self);
585  DCHECK_NE(exclusive_owner_, 0U);
586#if ART_USE_FUTEXES
587  bool done = false;
588  do {
589    int32_t cur_state = state_.LoadRelaxed();
590    if (LIKELY(cur_state == -1)) {
591      // We're no longer the owner.
592      exclusive_owner_ = 0;
593      // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
594      // Note, the relaxed loads below musn't reorder before the CompareExchange.
595      // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
596      // a status bit into the state on contention.
597      done =  state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
598      if (LIKELY(done)) {  // Weak CAS may fail spuriously.
599        // Wake any waiters.
600        if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
601                     num_pending_writers_.LoadRelaxed() > 0)) {
602          futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
603        }
604      }
605    } else {
606      LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
607    }
608  } while (!done);
609#else
610  exclusive_owner_ = 0;
611  CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
612#endif
613}
614
615#if HAVE_TIMED_RWLOCK
616bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
617  DCHECK(self == NULL || self == Thread::Current());
618#if ART_USE_FUTEXES
619  bool done = false;
620  timespec end_abs_ts;
621  InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts);
622  do {
623    int32_t cur_state = state_.LoadRelaxed();
624    if (cur_state == 0) {
625      // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
626      done =  state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */);
627    } else {
628      // Failed to acquire, hang up.
629      timespec now_abs_ts;
630      InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts);
631      timespec rel_ts;
632      if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
633        return false;  // Timed out.
634      }
635      ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
636      ++num_pending_writers_;
637      if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
638        if (errno == ETIMEDOUT) {
639          --num_pending_writers_;
640          return false;  // Timed out.
641        } else if ((errno != EAGAIN) && (errno != EINTR)) {
642          // EAGAIN and EINTR both indicate a spurious failure,
643          // recompute the relative time out from now and try again.
644          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
645          PLOG(FATAL) << "timed futex wait failed for " << name_;
646        }
647      }
648      --num_pending_writers_;
649    }
650  } while (!done);
651#else
652  timespec ts;
653  InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
654  int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
655  if (result == ETIMEDOUT) {
656    return false;
657  }
658  if (result != 0) {
659    errno = result;
660    PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
661  }
662#endif
663  exclusive_owner_ = SafeGetTid(self);
664  RegisterAsLocked(self);
665  AssertSharedHeld(self);
666  return true;
667}
668#endif
669
670#if ART_USE_FUTEXES
671void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
672  // Owner holds it exclusively, hang up.
673  ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
674  ++num_pending_readers_;
675  if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
676    if (errno != EAGAIN) {
677      PLOG(FATAL) << "futex wait failed for " << name_;
678    }
679  }
680  --num_pending_readers_;
681}
682#endif
683
684bool ReaderWriterMutex::SharedTryLock(Thread* self) {
685  DCHECK(self == NULL || self == Thread::Current());
686#if ART_USE_FUTEXES
687  bool done = false;
688  do {
689    int32_t cur_state = state_.LoadRelaxed();
690    if (cur_state >= 0) {
691      // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
692      done =  state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
693    } else {
694      // Owner holds it exclusively.
695      return false;
696    }
697  } while (!done);
698#else
699  int result = pthread_rwlock_tryrdlock(&rwlock_);
700  if (result == EBUSY) {
701    return false;
702  }
703  if (result != 0) {
704    errno = result;
705    PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
706  }
707#endif
708  RegisterAsLocked(self);
709  AssertSharedHeld(self);
710  return true;
711}
712
713bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
714  DCHECK(self == NULL || self == Thread::Current());
715  bool result;
716  if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
717    result = IsExclusiveHeld(self);  // TODO: a better best effort here.
718  } else {
719    result = (self->GetHeldMutex(level_) == this);
720  }
721  return result;
722}
723
724void ReaderWriterMutex::Dump(std::ostream& os) const {
725  os << name_
726      << " level=" << static_cast<int>(level_)
727      << " owner=" << GetExclusiveOwnerTid()
728#if ART_USE_FUTEXES
729      << " state=" << state_.LoadSequentiallyConsistent()
730      << " num_pending_writers=" << num_pending_writers_.LoadSequentiallyConsistent()
731      << " num_pending_readers=" << num_pending_readers_.LoadSequentiallyConsistent()
732#endif
733      << " ";
734  DumpContention(os);
735}
736
737std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
738  mu.Dump(os);
739  return os;
740}
741
742ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
743    : name_(name), guard_(guard) {
744#if ART_USE_FUTEXES
745  DCHECK_EQ(0, sequence_.LoadRelaxed());
746  num_waiters_ = 0;
747#else
748  pthread_condattr_t cond_attrs;
749  CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
750#if !defined(__APPLE__)
751  // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
752  CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC));
753#endif
754  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
755#endif
756}
757
758ConditionVariable::~ConditionVariable() {
759#if ART_USE_FUTEXES
760  if (num_waiters_!= 0) {
761    Runtime* runtime = Runtime::Current();
762    bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
763    LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
764        << " called with " << num_waiters_ << " waiters.";
765  }
766#else
767  // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
768  // may still be using condition variables.
769  int rc = pthread_cond_destroy(&cond_);
770  if (rc != 0) {
771    errno = rc;
772    MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
773    Runtime* runtime = Runtime::Current();
774    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
775    PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
776  }
777#endif
778}
779
780void ConditionVariable::Broadcast(Thread* self) {
781  DCHECK(self == NULL || self == Thread::Current());
782  // TODO: enable below, there's a race in thread creation that causes false failures currently.
783  // guard_.AssertExclusiveHeld(self);
784  DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
785#if ART_USE_FUTEXES
786  if (num_waiters_ > 0) {
787    sequence_++;  // Indicate the broadcast occurred.
788    bool done = false;
789    do {
790      int32_t cur_sequence = sequence_.LoadRelaxed();
791      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
792      // mutex unlocks will awaken the requeued waiter thread.
793      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
794                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
795                   guard_.state_.Address(), cur_sequence) != -1;
796      if (!done) {
797        if (errno != EAGAIN) {
798          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
799        }
800      }
801    } while (!done);
802  }
803#else
804  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
805#endif
806}
807
808void ConditionVariable::Signal(Thread* self) {
809  DCHECK(self == NULL || self == Thread::Current());
810  guard_.AssertExclusiveHeld(self);
811#if ART_USE_FUTEXES
812  if (num_waiters_ > 0) {
813    sequence_++;  // Indicate a signal occurred.
814    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
815    // to avoid this, however, requeueing can only move all waiters.
816    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
817    // Check something was woken or else we changed sequence_ before they had chance to wait.
818    CHECK((num_woken == 0) || (num_woken == 1));
819  }
820#else
821  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
822#endif
823}
824
825void ConditionVariable::Wait(Thread* self) {
826  guard_.CheckSafeToWait(self);
827  WaitHoldingLocks(self);
828}
829
830void ConditionVariable::WaitHoldingLocks(Thread* self) {
831  DCHECK(self == NULL || self == Thread::Current());
832  guard_.AssertExclusiveHeld(self);
833  unsigned int old_recursion_count = guard_.recursion_count_;
834#if ART_USE_FUTEXES
835  num_waiters_++;
836  // Ensure the Mutex is contended so that requeued threads are awoken.
837  guard_.num_contenders_++;
838  guard_.recursion_count_ = 1;
839  int32_t cur_sequence = sequence_.LoadRelaxed();
840  guard_.ExclusiveUnlock(self);
841  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
842    // Futex failed, check it is an expected error.
843    // EAGAIN == EWOULDBLK, so we let the caller try again.
844    // EINTR implies a signal was sent to this thread.
845    if ((errno != EINTR) && (errno != EAGAIN)) {
846      PLOG(FATAL) << "futex wait failed for " << name_;
847    }
848  }
849  guard_.ExclusiveLock(self);
850  CHECK_GE(num_waiters_, 0);
851  num_waiters_--;
852  // We awoke and so no longer require awakes from the guard_'s unlock.
853  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
854  guard_.num_contenders_--;
855#else
856  uint64_t old_owner = guard_.exclusive_owner_;
857  guard_.exclusive_owner_ = 0;
858  guard_.recursion_count_ = 0;
859  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
860  guard_.exclusive_owner_ = old_owner;
861#endif
862  guard_.recursion_count_ = old_recursion_count;
863}
864
865bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
866  DCHECK(self == NULL || self == Thread::Current());
867  bool timed_out = false;
868  guard_.AssertExclusiveHeld(self);
869  guard_.CheckSafeToWait(self);
870  unsigned int old_recursion_count = guard_.recursion_count_;
871#if ART_USE_FUTEXES
872  timespec rel_ts;
873  InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
874  num_waiters_++;
875  // Ensure the Mutex is contended so that requeued threads are awoken.
876  guard_.num_contenders_++;
877  guard_.recursion_count_ = 1;
878  int32_t cur_sequence = sequence_.LoadRelaxed();
879  guard_.ExclusiveUnlock(self);
880  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
881    if (errno == ETIMEDOUT) {
882      // Timed out we're done.
883      timed_out = true;
884    } else if ((errno == EAGAIN) || (errno == EINTR)) {
885      // A signal or ConditionVariable::Signal/Broadcast has come in.
886    } else {
887      PLOG(FATAL) << "timed futex wait failed for " << name_;
888    }
889  }
890  guard_.ExclusiveLock(self);
891  CHECK_GE(num_waiters_, 0);
892  num_waiters_--;
893  // We awoke and so no longer require awakes from the guard_'s unlock.
894  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
895  guard_.num_contenders_--;
896#else
897#if !defined(__APPLE__)
898  int clock = CLOCK_MONOTONIC;
899#else
900  int clock = CLOCK_REALTIME;
901#endif
902  uint64_t old_owner = guard_.exclusive_owner_;
903  guard_.exclusive_owner_ = 0;
904  guard_.recursion_count_ = 0;
905  timespec ts;
906  InitTimeSpec(true, clock, ms, ns, &ts);
907  int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
908  if (rc == ETIMEDOUT) {
909    timed_out = true;
910  } else if (rc != 0) {
911    errno = rc;
912    PLOG(FATAL) << "TimedWait failed for " << name_;
913  }
914  guard_.exclusive_owner_ = old_owner;
915#endif
916  guard_.recursion_count_ = old_recursion_count;
917  return timed_out;
918}
919
920void Locks::Init() {
921  if (logging_lock_ != nullptr) {
922    // Already initialized.
923    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
924      DCHECK(modify_ldt_lock_ != nullptr);
925    } else {
926      DCHECK(modify_ldt_lock_ == nullptr);
927    }
928    DCHECK(abort_lock_ != nullptr);
929    DCHECK(alloc_tracker_lock_ != nullptr);
930    DCHECK(allocated_monitor_ids_lock_ != nullptr);
931    DCHECK(allocated_thread_ids_lock_ != nullptr);
932    DCHECK(breakpoint_lock_ != nullptr);
933    DCHECK(classlinker_classes_lock_ != nullptr);
934    DCHECK(deoptimization_lock_ != nullptr);
935    DCHECK(heap_bitmap_lock_ != nullptr);
936    DCHECK(intern_table_lock_ != nullptr);
937    DCHECK(jni_libraries_lock_ != nullptr);
938    DCHECK(logging_lock_ != nullptr);
939    DCHECK(mutator_lock_ != nullptr);
940    DCHECK(profiler_lock_ != nullptr);
941    DCHECK(thread_list_lock_ != nullptr);
942    DCHECK(thread_suspend_count_lock_ != nullptr);
943    DCHECK(trace_lock_ != nullptr);
944    DCHECK(unexpected_signal_lock_ != nullptr);
945  } else {
946    // Create global locks in level order from highest lock level to lowest.
947    LockLevel current_lock_level = kInstrumentEntrypointsLock;
948    DCHECK(instrument_entrypoints_lock_ == nullptr);
949    instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
950
951    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
952      if (new_level >= current_lock_level) { \
953        /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
954        fprintf(stderr, "New local level %d is not less than current level %d\n", \
955                new_level, current_lock_level); \
956        exit(1); \
957      } \
958      current_lock_level = new_level;
959
960    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
961    DCHECK(mutator_lock_ == nullptr);
962    mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
963
964    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
965    DCHECK(heap_bitmap_lock_ == nullptr);
966    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
967
968    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
969    DCHECK(trace_lock_ == nullptr);
970    trace_lock_ = new Mutex("trace lock", current_lock_level);
971
972    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
973    DCHECK(runtime_shutdown_lock_ == nullptr);
974    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
975
976    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
977    DCHECK(profiler_lock_ == nullptr);
978    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
979
980    UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
981    DCHECK(deoptimization_lock_ == nullptr);
982    deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
983
984    UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
985    DCHECK(alloc_tracker_lock_ == nullptr);
986    alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
987
988    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
989    DCHECK(thread_list_lock_ == nullptr);
990    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
991
992    UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
993    DCHECK(jni_libraries_lock_ == nullptr);
994    jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
995
996    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
997    DCHECK(breakpoint_lock_ == nullptr);
998    breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
999
1000    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
1001    DCHECK(classlinker_classes_lock_ == nullptr);
1002    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
1003                                                      current_lock_level);
1004
1005    UPDATE_CURRENT_LOCK_LEVEL(kMethodVerifiersLock);
1006    DCHECK(method_verifiers_lock_ == nullptr);
1007    method_verifiers_lock_ = new Mutex("Method verifiers lock", current_lock_level);
1008
1009    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
1010    DCHECK(allocated_monitor_ids_lock_ == nullptr);
1011    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
1012
1013    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
1014    DCHECK(allocated_thread_ids_lock_ == nullptr);
1015    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
1016
1017    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
1018      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
1019      DCHECK(modify_ldt_lock_ == nullptr);
1020      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
1021    }
1022
1023    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
1024    DCHECK(intern_table_lock_ == nullptr);
1025    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
1026
1027    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
1028    DCHECK(reference_processor_lock_ == nullptr);
1029    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
1030
1031    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
1032    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
1033    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
1034
1035    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
1036    DCHECK(reference_queue_weak_references_lock_ == nullptr);
1037    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
1038
1039    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
1040    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
1041    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
1042
1043    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
1044    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
1045    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
1046
1047    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
1048    DCHECK(reference_queue_soft_references_lock_ == nullptr);
1049    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
1050
1051    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
1052    DCHECK(abort_lock_ == nullptr);
1053    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
1054
1055    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
1056    DCHECK(thread_suspend_count_lock_ == nullptr);
1057    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
1058
1059    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
1060    DCHECK(unexpected_signal_lock_ == nullptr);
1061    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
1062
1063    UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
1064    DCHECK(mem_maps_lock_ == nullptr);
1065    mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
1066
1067    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
1068    DCHECK(logging_lock_ == nullptr);
1069    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
1070
1071    #undef UPDATE_CURRENT_LOCK_LEVEL
1072
1073    InitConditions();
1074  }
1075}
1076
1077void Locks::InitConditions() {
1078  thread_exit_cond_ = new ConditionVariable("thread exit condition variable", *thread_list_lock_);
1079}
1080
1081}  // namespace art
1082