monitor.cc revision 50b35e2fd1a68cd1240e4a9d9f363e11764957d1
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "monitor.h"
18
19#include <errno.h>
20#include <fcntl.h>
21#include <pthread.h>
22#include <stdlib.h>
23#include <sys/time.h>
24#include <time.h>
25#include <unistd.h>
26
27#include <vector>
28
29#include "class_linker.h"
30#include "dex_instruction.h"
31#include "mutex.h"
32#include "object.h"
33#include "object_utils.h"
34#include "scoped_thread_state_change.h"
35#include "stl_util.h"
36#include "thread.h"
37#include "thread_list.h"
38#include "verifier/method_verifier.h"
39#include "well_known_classes.h"
40
41namespace art {
42
43/*
44 * Every Object has a monitor associated with it, but not every Object is
45 * actually locked.  Even the ones that are locked do not need a
46 * full-fledged monitor until a) there is actual contention or b) wait()
47 * is called on the Object.
48 *
49 * For Android, we have implemented a scheme similar to the one described
50 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
51 * (ACM 1998).  Things are even easier for us, though, because we have
52 * a full 32 bits to work with.
53 *
54 * The two states of an Object's lock are referred to as "thin" and
55 * "fat".  A lock may transition from the "thin" state to the "fat"
56 * state and this transition is referred to as inflation.  Once a lock
57 * has been inflated it remains in the "fat" state indefinitely.
58 *
59 * The lock value itself is stored in Object.lock.  The LSB of the
60 * lock encodes its state.  When cleared, the lock is in the "thin"
61 * state and its bits are formatted as follows:
62 *
63 *    [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
64 *     lock count   thread id  hash state  0
65 *
66 * When set, the lock is in the "fat" state and its bits are formatted
67 * as follows:
68 *
69 *    [31 ---- 3] [2 ---- 1] [0]
70 *      pointer   hash state  1
71 *
72 * For an in-depth description of the mechanics of thin-vs-fat locking,
73 * read the paper referred to above.
74 *
75 * Monitors provide:
76 *  - mutually exclusive access to resources
77 *  - a way for multiple threads to wait for notification
78 *
79 * In effect, they fill the role of both mutexes and condition variables.
80 *
81 * Only one thread can own the monitor at any time.  There may be several
82 * threads waiting on it (the wait call unlocks it).  One or more waiting
83 * threads may be getting interrupted or notified at any given time.
84 *
85 * TODO: the various members of monitor are not SMP-safe.
86 */
87
88
89/*
90 * Monitor accessor.  Extracts a monitor structure pointer from a fat
91 * lock.  Performs no error checking.
92 */
93#define LW_MONITOR(x) \
94  (reinterpret_cast<Monitor*>((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
95
96/*
97 * Lock recursion count field.  Contains a count of the number of times
98 * a lock has been recursively acquired.
99 */
100#define LW_LOCK_COUNT_MASK 0x1fff
101#define LW_LOCK_COUNT_SHIFT 19
102#define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
103
104bool (*Monitor::is_sensitive_thread_hook_)() = NULL;
105uint32_t Monitor::lock_profiling_threshold_ = 0;
106
107bool Monitor::IsSensitiveThread() {
108  if (is_sensitive_thread_hook_ != NULL) {
109    return (*is_sensitive_thread_hook_)();
110  }
111  return false;
112}
113
114void Monitor::Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)()) {
115  lock_profiling_threshold_ = lock_profiling_threshold;
116  is_sensitive_thread_hook_ = is_sensitive_thread_hook;
117}
118
119Monitor::Monitor(Thread* owner, Object* obj)
120    : monitor_lock_("a monitor lock", kMonitorLock),
121      owner_(owner),
122      lock_count_(0),
123      obj_(obj),
124      wait_set_(NULL),
125      locking_method_(NULL),
126      locking_dex_pc_(0) {
127  monitor_lock_.Lock(owner);
128  // Propagate the lock state.
129  uint32_t thin = *obj->GetRawLockWordAddress();
130  lock_count_ = LW_LOCK_COUNT(thin);
131  thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
132  thin |= reinterpret_cast<uint32_t>(this) | LW_SHAPE_FAT;
133  // Publish the updated lock word.
134  android_atomic_release_store(thin, obj->GetRawLockWordAddress());
135  // Lock profiling.
136  if (lock_profiling_threshold_ != 0) {
137    locking_method_ = owner->GetCurrentMethod(&locking_dex_pc_);
138  }
139}
140
141Monitor::~Monitor() {
142  DCHECK(obj_ != NULL);
143  DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
144}
145
146/*
147 * Links a thread into a monitor's wait set.  The monitor lock must be
148 * held by the caller of this routine.
149 */
150void Monitor::AppendToWaitSet(Thread* thread) {
151  DCHECK(owner_ == Thread::Current());
152  DCHECK(thread != NULL);
153  DCHECK(thread->wait_next_ == NULL) << thread->wait_next_;
154  if (wait_set_ == NULL) {
155    wait_set_ = thread;
156    return;
157  }
158
159  // push_back.
160  Thread* t = wait_set_;
161  while (t->wait_next_ != NULL) {
162    t = t->wait_next_;
163  }
164  t->wait_next_ = thread;
165}
166
167/*
168 * Unlinks a thread from a monitor's wait set.  The monitor lock must
169 * be held by the caller of this routine.
170 */
171void Monitor::RemoveFromWaitSet(Thread *thread) {
172  DCHECK(owner_ == Thread::Current());
173  DCHECK(thread != NULL);
174  if (wait_set_ == NULL) {
175    return;
176  }
177  if (wait_set_ == thread) {
178    wait_set_ = thread->wait_next_;
179    thread->wait_next_ = NULL;
180    return;
181  }
182
183  Thread* t = wait_set_;
184  while (t->wait_next_ != NULL) {
185    if (t->wait_next_ == thread) {
186      t->wait_next_ = thread->wait_next_;
187      thread->wait_next_ = NULL;
188      return;
189    }
190    t = t->wait_next_;
191  }
192}
193
194Object* Monitor::GetObject() {
195  return obj_;
196}
197
198void Monitor::Lock(Thread* self) {
199  if (owner_ == self) {
200    lock_count_++;
201    return;
202  }
203
204  if (!monitor_lock_.TryLock(self)) {
205    uint64_t waitStart = 0;
206    uint64_t waitEnd = 0;
207    uint32_t wait_threshold = lock_profiling_threshold_;
208    const AbstractMethod* current_locking_method = NULL;
209    uint32_t current_locking_dex_pc = 0;
210    {
211      ScopedThreadStateChange tsc(self, kBlocked);
212      if (wait_threshold != 0) {
213        waitStart = NanoTime() / 1000;
214      }
215      current_locking_method = locking_method_;
216      current_locking_dex_pc = locking_dex_pc_;
217
218      monitor_lock_.Lock(self);
219      if (wait_threshold != 0) {
220        waitEnd = NanoTime() / 1000;
221      }
222    }
223
224    if (wait_threshold != 0) {
225      uint64_t wait_ms = (waitEnd - waitStart) / 1000;
226      uint32_t sample_percent;
227      if (wait_ms >= wait_threshold) {
228        sample_percent = 100;
229      } else {
230        sample_percent = 100 * wait_ms / wait_threshold;
231      }
232      if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
233        const char* current_locking_filename;
234        uint32_t current_locking_line_number;
235        TranslateLocation(current_locking_method, current_locking_dex_pc,
236                          current_locking_filename, current_locking_line_number);
237        LogContentionEvent(self, wait_ms, sample_percent, current_locking_filename, current_locking_line_number);
238      }
239    }
240  }
241  owner_ = self;
242  DCHECK_EQ(lock_count_, 0);
243
244  // When debugging, save the current monitor holder for future
245  // acquisition failures to use in sampled logging.
246  if (lock_profiling_threshold_ != 0) {
247    locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
248  }
249}
250
251static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
252                                              __attribute__((format(printf, 1, 2)));
253
254static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
255    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
256  va_list args;
257  va_start(args, fmt);
258  Thread::Current()->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args);
259  if (!Runtime::Current()->IsStarted()) {
260    std::ostringstream ss;
261    Thread::Current()->Dump(ss);
262    std::string str(ss.str());
263    LOG(ERROR) << "IllegalMonitorStateException: " << str;
264  }
265  va_end(args);
266}
267
268static std::string ThreadToString(Thread* thread) {
269  if (thread == NULL) {
270    return "NULL";
271  }
272  std::ostringstream oss;
273  // TODO: alternatively, we could just return the thread's name.
274  oss << *thread;
275  return oss.str();
276}
277
278void Monitor::FailedUnlock(Object* o, Thread* expected_owner, Thread* found_owner,
279                           Monitor* monitor) {
280  Thread* current_owner = NULL;
281  std::string current_owner_string;
282  std::string expected_owner_string;
283  std::string found_owner_string;
284  {
285    // TODO: isn't this too late to prevent threads from disappearing?
286    // Acquire thread list lock so threads won't disappear from under us.
287    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
288    // Re-read owner now that we hold lock.
289    current_owner = (monitor != NULL) ? monitor->owner_ : NULL;
290    // Get short descriptions of the threads involved.
291    current_owner_string = ThreadToString(current_owner);
292    expected_owner_string = ThreadToString(expected_owner);
293    found_owner_string = ThreadToString(found_owner);
294  }
295  if (current_owner == NULL) {
296    if (found_owner == NULL) {
297      ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
298                                         " on thread '%s'",
299                                         PrettyTypeOf(o).c_str(),
300                                         expected_owner_string.c_str());
301    } else {
302      // Race: the original read found an owner but now there is none
303      ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
304                                         " (where now the monitor appears unowned) on thread '%s'",
305                                         found_owner_string.c_str(),
306                                         PrettyTypeOf(o).c_str(),
307                                         expected_owner_string.c_str());
308    }
309  } else {
310    if (found_owner == NULL) {
311      // Race: originally there was no owner, there is now
312      ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
313                                         " (originally believed to be unowned) on thread '%s'",
314                                         current_owner_string.c_str(),
315                                         PrettyTypeOf(o).c_str(),
316                                         expected_owner_string.c_str());
317    } else {
318      if (found_owner != current_owner) {
319        // Race: originally found and current owner have changed
320        ThrowIllegalMonitorStateExceptionF("unlock of monitor originally owned by '%s' (now"
321                                           " owned by '%s') on object of type '%s' on thread '%s'",
322                                           found_owner_string.c_str(),
323                                           current_owner_string.c_str(),
324                                           PrettyTypeOf(o).c_str(),
325                                           expected_owner_string.c_str());
326      } else {
327        ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
328                                           " on thread '%s",
329                                           current_owner_string.c_str(),
330                                           PrettyTypeOf(o).c_str(),
331                                           expected_owner_string.c_str());
332      }
333    }
334  }
335}
336
337bool Monitor::Unlock(Thread* self, bool for_wait) {
338  DCHECK(self != NULL);
339  Thread* owner = owner_;
340  if (owner == self) {
341    // We own the monitor, so nobody else can be in here.
342    if (lock_count_ == 0) {
343      owner_ = NULL;
344      locking_method_ = NULL;
345      locking_dex_pc_ = 0;
346      monitor_lock_.Unlock(self);
347    } else {
348      --lock_count_;
349    }
350  } else if (for_wait) {
351    // Wait should have already cleared the fields.
352    DCHECK_EQ(lock_count_, 0);
353    DCHECK(owner == NULL);
354    DCHECK(locking_method_ == NULL);
355    DCHECK_EQ(locking_dex_pc_, 0u);
356    monitor_lock_.Unlock(self);
357  } else {
358    // We don't own this, so we're not allowed to unlock it.
359    // The JNI spec says that we should throw IllegalMonitorStateException
360    // in this case.
361    FailedUnlock(obj_, self, owner, this);
362    return false;
363  }
364  return true;
365}
366
367// Converts the given waiting time (relative to "now") into an absolute time in 'ts'.
368static void ToAbsoluteTime(int64_t ms, int32_t ns, timespec* ts)
369    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
370  int64_t endSec;
371
372#ifdef HAVE_TIMEDWAIT_MONOTONIC
373  clock_gettime(CLOCK_MONOTONIC, ts);
374#else
375  {
376    timeval tv;
377    gettimeofday(&tv, NULL);
378    ts->tv_sec = tv.tv_sec;
379    ts->tv_nsec = tv.tv_usec * 1000;
380  }
381#endif
382  endSec = ts->tv_sec + ms / 1000;
383  if (endSec >= 0x7fffffff) {
384    std::ostringstream ss;
385    Thread::Current()->Dump(ss);
386    LOG(INFO) << "Note: end time exceeds epoch: " << ss.str();
387    endSec = 0x7ffffffe;
388  }
389  ts->tv_sec = endSec;
390  ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
391
392  // Catch rollover.
393  if (ts->tv_nsec >= 1000000000L) {
394    ts->tv_sec++;
395    ts->tv_nsec -= 1000000000L;
396  }
397}
398
399/*
400 * Wait on a monitor until timeout, interrupt, or notification.  Used for
401 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
402 *
403 * If another thread calls Thread.interrupt(), we throw InterruptedException
404 * and return immediately if one of the following are true:
405 *  - blocked in wait(), wait(long), or wait(long, int) methods of Object
406 *  - blocked in join(), join(long), or join(long, int) methods of Thread
407 *  - blocked in sleep(long), or sleep(long, int) methods of Thread
408 * Otherwise, we set the "interrupted" flag.
409 *
410 * Checks to make sure that "ns" is in the range 0-999999
411 * (i.e. fractions of a millisecond) and throws the appropriate
412 * exception if it isn't.
413 *
414 * The spec allows "spurious wakeups", and recommends that all code using
415 * Object.wait() do so in a loop.  This appears to derive from concerns
416 * about pthread_cond_wait() on multiprocessor systems.  Some commentary
417 * on the web casts doubt on whether these can/should occur.
418 *
419 * Since we're allowed to wake up "early", we clamp extremely long durations
420 * to return at the end of the 32-bit time epoch.
421 */
422void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
423  DCHECK(self != NULL);
424
425  // Make sure that we hold the lock.
426  if (owner_ != self) {
427    ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
428    return;
429  }
430  monitor_lock_.AssertHeld(self);
431  WaitWithLock(self, ms, ns, interruptShouldThrow);
432}
433
434void Monitor::WaitWithLock(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
435  // Enforce the timeout range.
436  if (ms < 0 || ns < 0 || ns > 999999) {
437    Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
438        "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
439    return;
440  }
441
442  // Compute absolute wakeup time, if necessary.
443  timespec ts;
444  bool timed = false;
445  if (ms != 0 || ns != 0) {
446    ToAbsoluteTime(ms, ns, &ts);
447    timed = true;
448  }
449
450  /*
451   * Add ourselves to the set of threads waiting on this monitor, and
452   * release our hold.  We need to let it go even if we're a few levels
453   * deep in a recursive lock, and we need to restore that later.
454   *
455   * We append to the wait set ahead of clearing the count and owner
456   * fields so the subroutine can check that the calling thread owns
457   * the monitor.  Aside from that, the order of member updates is
458   * not order sensitive as we hold the pthread mutex.
459   */
460  AppendToWaitSet(self);
461  int prev_lock_count = lock_count_;
462  lock_count_ = 0;
463  owner_ = NULL;
464  const AbstractMethod* saved_method = locking_method_;
465  locking_method_ = NULL;
466  uintptr_t saved_dex_pc = locking_dex_pc_;
467  locking_dex_pc_ = 0;
468
469  /*
470   * Update thread status.  If the GC wakes up, it'll ignore us, knowing
471   * that we won't touch any references in this state, and we'll check
472   * our suspend mode before we transition out.
473   */
474  self->TransitionFromRunnableToSuspended(timed ? kTimedWaiting : kWaiting);
475
476  bool wasInterrupted = false;
477  {
478    // Pseudo-atomically wait on self's wait_cond_ and release the monitor lock.
479    MutexLock mu(self, *self->wait_mutex_);
480
481    // Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
482    // non-NULL a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
483    // up.
484    DCHECK(self->wait_monitor_ == NULL);
485    self->wait_monitor_ = this;
486
487    // Release the monitor lock.
488    Unlock(self, true);
489
490    /*
491     * Handle the case where the thread was interrupted before we called
492     * wait().
493     */
494    if (self->interrupted_) {
495      wasInterrupted = true;
496    } else {
497      // Wait for a notification or a timeout to occur.
498      if (!timed) {
499        self->wait_cond_->Wait(self, *self->wait_mutex_);
500      } else {
501        self->wait_cond_->TimedWait(self, *self->wait_mutex_, ts);
502      }
503      if (self->interrupted_) {
504        wasInterrupted = true;
505      }
506      self->interrupted_ = false;
507    }
508    self->wait_monitor_ = NULL;
509  }
510
511  // Set self->status back to kRunnable, and self-suspend if needed.
512  self->TransitionFromSuspendedToRunnable();
513
514  // Re-acquire the monitor lock.
515  Lock(self);
516
517
518  self->wait_mutex_->AssertNotHeld(self);
519
520  /*
521   * We remove our thread from wait set after restoring the count
522   * and owner fields so the subroutine can check that the calling
523   * thread owns the monitor. Aside from that, the order of member
524   * updates is not order sensitive as we hold the pthread mutex.
525   */
526  owner_ = self;
527  lock_count_ = prev_lock_count;
528  locking_method_ = saved_method;
529  locking_dex_pc_ = saved_dex_pc;
530  RemoveFromWaitSet(self);
531
532  if (wasInterrupted) {
533    /*
534     * We were interrupted while waiting, or somebody interrupted an
535     * un-interruptible thread earlier and we're bailing out immediately.
536     *
537     * The doc sayeth: "The interrupted status of the current thread is
538     * cleared when this exception is thrown."
539     */
540    {
541      MutexLock mu(self, *self->wait_mutex_);
542      self->interrupted_ = false;
543    }
544    if (interruptShouldThrow) {
545      Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
546    }
547  }
548}
549
550void Monitor::Notify(Thread* self) {
551  DCHECK(self != NULL);
552  // Make sure that we hold the lock.
553  if (owner_ != self) {
554    ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
555    return;
556  }
557  monitor_lock_.AssertHeld(self);
558  NotifyWithLock(self);
559}
560
561void Monitor::NotifyWithLock(Thread* self) {
562  // Signal the first waiting thread in the wait set.
563  while (wait_set_ != NULL) {
564    Thread* thread = wait_set_;
565    wait_set_ = thread->wait_next_;
566    thread->wait_next_ = NULL;
567
568    // Check to see if the thread is still waiting.
569    MutexLock mu(self, *thread->wait_mutex_);
570    if (thread->wait_monitor_ != NULL) {
571      thread->wait_cond_->Signal();
572      return;
573    }
574  }
575}
576
577void Monitor::NotifyAll(Thread* self) {
578  DCHECK(self != NULL);
579  // Make sure that we hold the lock.
580  if (owner_ != self) {
581    ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
582    return;
583  }
584  monitor_lock_.AssertHeld(self);
585  NotifyAllWithLock();
586}
587
588void Monitor::NotifyAllWithLock() {
589  // Signal all threads in the wait set.
590  while (wait_set_ != NULL) {
591    Thread* thread = wait_set_;
592    wait_set_ = thread->wait_next_;
593    thread->wait_next_ = NULL;
594    thread->Notify();
595  }
596}
597
598/*
599 * Changes the shape of a monitor from thin to fat, preserving the
600 * internal lock state. The calling thread must own the lock.
601 */
602void Monitor::Inflate(Thread* self, Object* obj) {
603  DCHECK(self != NULL);
604  DCHECK(obj != NULL);
605  DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
606  DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->GetThinLockId()));
607
608  // Allocate and acquire a new monitor.
609  Monitor* m = new Monitor(self, obj);
610  VLOG(monitor) << "monitor: thread " << self->GetThinLockId()
611                << " created monitor " << m << " for object " << obj;
612  Runtime::Current()->GetMonitorList()->Add(m);
613}
614
615void Monitor::MonitorEnter(Thread* self, Object* obj) {
616  volatile int32_t* thinp = obj->GetRawLockWordAddress();
617  timespec tm;
618  uint32_t sleepDelayNs;
619  uint32_t minSleepDelayNs = 1000000;  /* 1 millisecond */
620  uint32_t maxSleepDelayNs = 1000000000;  /* 1 second */
621  uint32_t thin, newThin;
622
623  DCHECK(self != NULL);
624  DCHECK(obj != NULL);
625  uint32_t threadId = self->GetThinLockId();
626 retry:
627  thin = *thinp;
628  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
629    /*
630     * The lock is a thin lock.  The owner field is used to
631     * determine the acquire method, ordered by cost.
632     */
633    if (LW_LOCK_OWNER(thin) == threadId) {
634      /*
635       * The calling thread owns the lock.  Increment the
636       * value of the recursion count field.
637       */
638      *thinp += 1 << LW_LOCK_COUNT_SHIFT;
639      if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
640        /*
641         * The reacquisition limit has been reached.  Inflate
642         * the lock so the next acquire will not overflow the
643         * recursion count field.
644         */
645        Inflate(self, obj);
646      }
647    } else if (LW_LOCK_OWNER(thin) == 0) {
648      // The lock is unowned. Install the thread id of the calling thread into the owner field.
649      // This is the common case: compiled code will have tried this before calling back into
650      // the runtime.
651      newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
652      if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
653        // The acquire failed. Try again.
654        goto retry;
655      }
656    } else {
657      VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p (a %s) owned by %d",
658                                    threadId, thinp, PrettyTypeOf(obj).c_str(), LW_LOCK_OWNER(thin));
659      // The lock is owned by another thread. Notify the runtime that we are about to wait.
660      self->monitor_enter_object_ = obj;
661      self->TransitionFromRunnableToSuspended(kBlocked);
662      // Spin until the thin lock is released or inflated.
663      sleepDelayNs = 0;
664      for (;;) {
665        thin = *thinp;
666        // Check the shape of the lock word. Another thread
667        // may have inflated the lock while we were waiting.
668        if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
669          if (LW_LOCK_OWNER(thin) == 0) {
670            // The lock has been released. Install the thread id of the
671            // calling thread into the owner field.
672            newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
673            if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
674              // The acquire succeed. Break out of the loop and proceed to inflate the lock.
675              break;
676            }
677          } else {
678            // The lock has not been released. Yield so the owning thread can run.
679            if (sleepDelayNs == 0) {
680              sched_yield();
681              sleepDelayNs = minSleepDelayNs;
682            } else {
683              tm.tv_sec = 0;
684              tm.tv_nsec = sleepDelayNs;
685              nanosleep(&tm, NULL);
686              // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
687              if (sleepDelayNs < maxSleepDelayNs / 2) {
688                sleepDelayNs *= 2;
689              } else {
690                sleepDelayNs = minSleepDelayNs;
691              }
692            }
693          }
694        } else {
695          // The thin lock was inflated by another thread. Let the runtime know we are no longer
696          // waiting and try again.
697          VLOG(monitor) << StringPrintf("monitor: thread %d found lock %p surprise-fattened by another thread", threadId, thinp);
698          self->monitor_enter_object_ = NULL;
699          self->TransitionFromSuspendedToRunnable();
700          goto retry;
701        }
702      }
703      VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p done", threadId, thinp);
704      // We have acquired the thin lock. Let the runtime know that we are no longer waiting.
705      self->monitor_enter_object_ = NULL;
706      self->TransitionFromSuspendedToRunnable();
707      // Fatten the lock.
708      Inflate(self, obj);
709      VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p", threadId, thinp);
710    }
711  } else {
712    // The lock is a fat lock.
713    VLOG(monitor) << StringPrintf("monitor: thread %d locking fat lock %p (%p) %p on a %s",
714                                  threadId, thinp, LW_MONITOR(*thinp),
715                                  reinterpret_cast<void*>(*thinp), PrettyTypeOf(obj).c_str());
716    DCHECK(LW_MONITOR(*thinp) != NULL);
717    LW_MONITOR(*thinp)->Lock(self);
718  }
719}
720
721bool Monitor::MonitorExit(Thread* self, Object* obj) {
722  volatile int32_t* thinp = obj->GetRawLockWordAddress();
723
724  DCHECK(self != NULL);
725  //DCHECK_EQ(self->GetState(), kRunnable);
726  DCHECK(obj != NULL);
727
728  /*
729   * Cache the lock word as its value can change while we are
730   * examining its state.
731   */
732  uint32_t thin = *thinp;
733  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
734    /*
735     * The lock is thin.  We must ensure that the lock is owned
736     * by the given thread before unlocking it.
737     */
738    if (LW_LOCK_OWNER(thin) == self->GetThinLockId()) {
739      /*
740       * We are the lock owner.  It is safe to update the lock
741       * without CAS as lock ownership guards the lock itself.
742       */
743      if (LW_LOCK_COUNT(thin) == 0) {
744        /*
745         * The lock was not recursively acquired, the common
746         * case.  Unlock by clearing all bits except for the
747         * hash state.
748         */
749        thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
750        android_atomic_release_store(thin, thinp);
751      } else {
752        /*
753         * The object was recursively acquired.  Decrement the
754         * lock recursion count field.
755         */
756        *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
757      }
758    } else {
759      /*
760       * We do not own the lock.  The JVM spec requires that we
761       * throw an exception in this case.
762       */
763      FailedUnlock(obj, self, NULL, NULL);
764      return false;
765    }
766  } else {
767    /*
768     * The lock is fat.  We must check to see if Unlock has
769     * raised any exceptions before continuing.
770     */
771    DCHECK(LW_MONITOR(*thinp) != NULL);
772    if (!LW_MONITOR(*thinp)->Unlock(self, false)) {
773      // An exception has been raised.  Do not fall through.
774      return false;
775    }
776  }
777  return true;
778}
779
780/*
781 * Object.wait().  Also called for class init.
782 */
783void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
784  volatile int32_t* thinp = obj->GetRawLockWordAddress();
785
786  // If the lock is still thin, we need to fatten it.
787  uint32_t thin = *thinp;
788  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
789    // Make sure that 'self' holds the lock.
790    if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
791      ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
792      return;
793    }
794
795    /* This thread holds the lock.  We need to fatten the lock
796     * so 'self' can block on it.  Don't update the object lock
797     * field yet, because 'self' needs to acquire the lock before
798     * any other thread gets a chance.
799     */
800    Inflate(self, obj);
801    VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p by wait()", self->GetThinLockId(), thinp);
802  }
803  LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
804}
805
806void Monitor::Notify(Thread* self, Object *obj) {
807  uint32_t thin = *obj->GetRawLockWordAddress();
808
809  // If the lock is still thin, there aren't any waiters;
810  // waiting on an object forces lock fattening.
811  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
812    // Make sure that 'self' holds the lock.
813    if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
814      ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
815      return;
816    }
817    // no-op;  there are no waiters to notify.
818    Inflate(self, obj);
819  } else {
820    // It's a fat lock.
821    LW_MONITOR(thin)->Notify(self);
822  }
823}
824
825void Monitor::NotifyAll(Thread* self, Object *obj) {
826  uint32_t thin = *obj->GetRawLockWordAddress();
827
828  // If the lock is still thin, there aren't any waiters;
829  // waiting on an object forces lock fattening.
830  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
831    // Make sure that 'self' holds the lock.
832    if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
833      ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
834      return;
835    }
836    // no-op;  there are no waiters to notify.
837    Inflate(self, obj);
838  } else {
839    // It's a fat lock.
840    LW_MONITOR(thin)->NotifyAll(self);
841  }
842}
843
844uint32_t Monitor::GetThinLockId(uint32_t raw_lock_word) {
845  if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
846    return LW_LOCK_OWNER(raw_lock_word);
847  } else {
848    Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
849    return owner ? owner->GetThinLockId() : 0;
850  }
851}
852
853static uint32_t LockOwnerFromThreadLock(Object* thread_lock) {
854  ScopedObjectAccess soa(Thread::Current());
855  if (thread_lock == NULL ||
856      thread_lock->GetClass() != soa.Decode<Class*>(WellKnownClasses::java_lang_ThreadLock)) {
857    return ThreadList::kInvalidId;
858  }
859  Field* thread_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadLock_thread);
860  Object* managed_thread = thread_field->GetObject(thread_lock);
861  if (managed_thread == NULL) {
862    return ThreadList::kInvalidId;
863  }
864  Field* vmData_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_vmData);
865  uintptr_t vmData = static_cast<uintptr_t>(vmData_field->GetInt(managed_thread));
866  Thread* thread = reinterpret_cast<Thread*>(vmData);
867  if (thread == NULL) {
868    return ThreadList::kInvalidId;
869  }
870  return thread->GetThinLockId();
871}
872
873void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
874  ThreadState state;
875  state = thread->GetState();
876
877  Object* object = NULL;
878  uint32_t lock_owner = ThreadList::kInvalidId;
879  if (state == kWaiting || state == kTimedWaiting) {
880    os << "  - waiting on ";
881    Monitor* monitor;
882    {
883      MutexLock mu(Thread::Current(), *thread->wait_mutex_);
884      monitor = thread->wait_monitor_;
885    }
886    if (monitor != NULL) {
887      object = monitor->obj_;
888    }
889    lock_owner = LockOwnerFromThreadLock(object);
890  } else if (state == kBlocked) {
891    os << "  - waiting to lock ";
892    object = thread->monitor_enter_object_;
893    if (object != NULL) {
894      lock_owner = object->GetThinLockId();
895    }
896  } else {
897    // We're not waiting on anything.
898    return;
899  }
900
901  // - waiting on <0x613f83d8> (a java.lang.ThreadLock) held by thread 5
902  // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
903  os << "<" << object << "> (a " << PrettyTypeOf(object) << ")";
904
905  if (lock_owner != ThreadList::kInvalidId) {
906    os << " held by thread " << lock_owner;
907  }
908
909  os << "\n";
910}
911
912static void DumpLockedObject(std::ostream& os, Object* o)
913    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
914  os << "  - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n";
915}
916
917void Monitor::DescribeLocks(std::ostream& os, StackVisitor* stack_visitor) {
918  AbstractMethod* m = stack_visitor->GetMethod();
919  CHECK(m != NULL);
920
921  // Native methods are an easy special case.
922  // TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
923  if (m->IsNative()) {
924    if (m->IsSynchronized()) {
925      Object* jni_this = stack_visitor->GetCurrentSirt()->GetReference(0);
926      DumpLockedObject(os, jni_this);
927    }
928    return;
929  }
930
931  // <clinit> is another special case. The runtime holds the class lock while calling <clinit>.
932  MethodHelper mh(m);
933  if (mh.IsClassInitializer()) {
934    DumpLockedObject(os, m->GetDeclaringClass());
935    // Fall through because there might be synchronization in the user code too.
936  }
937
938  // Is there any reason to believe there's any synchronization in this method?
939  const DexFile::CodeItem* code_item = mh.GetCodeItem();
940  CHECK(code_item != NULL) << PrettyMethod(m);
941  if (code_item->tries_size_ == 0) {
942    return; // No "tries" implies no synchronization, so no held locks to report.
943  }
944
945  // TODO: Enable dex register lock descriptions, disabling as for the portable path GetVReg is
946  // unimplemented. There is also a possible deadlock relating to the verifier calling
947  // ClassLoader.loadClass and reentering managed code whilst the ThreadList lock is held.
948  const bool kEnableDexRegisterLockDescriptions = false;
949  if (kEnableDexRegisterLockDescriptions) {
950    // Ask the verifier for the dex pcs of all the monitor-enter instructions corresponding to
951    // the locks held in this stack frame.
952    std::vector<uint32_t> monitor_enter_dex_pcs;
953    verifier::MethodVerifier::FindLocksAtDexPc(m, stack_visitor->GetDexPc(), monitor_enter_dex_pcs);
954    if (monitor_enter_dex_pcs.empty()) {
955      return;
956    }
957
958    // Verification is an iterative process, so it can visit the same monitor-enter instruction
959    // repeatedly with increasingly accurate type information. Our callers don't want to see
960    // duplicates.
961    STLSortAndRemoveDuplicates(&monitor_enter_dex_pcs);
962
963    for (size_t i = 0; i < monitor_enter_dex_pcs.size(); ++i) {
964      // The verifier works in terms of the dex pcs of the monitor-enter instructions.
965      // We want the registers used by those instructions (so we can read the values out of them).
966      uint32_t dex_pc = monitor_enter_dex_pcs[i];
967      uint16_t monitor_enter_instruction = code_item->insns_[dex_pc];
968
969      // Quick sanity check.
970      if ((monitor_enter_instruction & 0xff) != Instruction::MONITOR_ENTER) {
971        LOG(FATAL) << "expected monitor-enter @" << dex_pc << "; was "
972            << reinterpret_cast<void*>(monitor_enter_instruction);
973      }
974
975      uint16_t monitor_register = ((monitor_enter_instruction >> 8) & 0xff);
976      Object* o = reinterpret_cast<Object*>(stack_visitor->GetVReg(m, monitor_register));
977      DumpLockedObject(os, o);
978    }
979  }
980}
981
982void Monitor::TranslateLocation(const AbstractMethod* method, uint32_t dex_pc,
983                                const char*& source_file, uint32_t& line_number) const {
984  // If method is null, location is unknown
985  if (method == NULL) {
986    source_file = "";
987    line_number = 0;
988    return;
989  }
990  MethodHelper mh(method);
991  source_file = mh.GetDeclaringClassSourceFile();
992  if (source_file == NULL) {
993    source_file = "";
994  }
995  line_number = mh.GetLineNumFromDexPC(dex_pc);
996}
997
998MonitorList::MonitorList() : monitor_list_lock_("MonitorList lock") {
999}
1000
1001MonitorList::~MonitorList() {
1002  MutexLock mu(Thread::Current(), monitor_list_lock_);
1003  STLDeleteElements(&list_);
1004}
1005
1006void MonitorList::Add(Monitor* m) {
1007  MutexLock mu(Thread::Current(), monitor_list_lock_);
1008  list_.push_front(m);
1009}
1010
1011void MonitorList::SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) {
1012  MutexLock mu(Thread::Current(), monitor_list_lock_);
1013  typedef std::list<Monitor*>::iterator It; // TODO: C++0x auto
1014  It it = list_.begin();
1015  while (it != list_.end()) {
1016    Monitor* m = *it;
1017    if (!is_marked(m->GetObject(), arg)) {
1018      VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object " << m->GetObject();
1019      delete m;
1020      it = list_.erase(it);
1021    } else {
1022      ++it;
1023    }
1024  }
1025}
1026
1027}  // namespace art
1028