monitor.cc revision 08fc03ae5dded4adc9b45b7014a4b9dfedbe95a6
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "monitor.h"
18
19#include <errno.h>
20#include <fcntl.h>
21#include <pthread.h>
22#include <stdlib.h>
23#include <sys/time.h>
24#include <time.h>
25#include <unistd.h>
26
27#include <vector>
28
29#include "class_linker.h"
30#include "dex_instruction.h"
31#include "mutex.h"
32#include "object.h"
33#include "object_utils.h"
34#include "scoped_jni_thread_state.h"
35#include "scoped_thread_list_lock.h"
36#include "stl_util.h"
37#include "thread.h"
38#include "thread_list.h"
39#include "verifier/method_verifier.h"
40#include "well_known_classes.h"
41
42namespace art {
43
44/*
45 * Every Object has a monitor associated with it, but not every Object is
46 * actually locked.  Even the ones that are locked do not need a
47 * full-fledged monitor until a) there is actual contention or b) wait()
48 * is called on the Object.
49 *
50 * For Android, we have implemented a scheme similar to the one described
51 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
52 * (ACM 1998).  Things are even easier for us, though, because we have
53 * a full 32 bits to work with.
54 *
55 * The two states of an Object's lock are referred to as "thin" and
56 * "fat".  A lock may transition from the "thin" state to the "fat"
57 * state and this transition is referred to as inflation.  Once a lock
58 * has been inflated it remains in the "fat" state indefinitely.
59 *
60 * The lock value itself is stored in Object.lock.  The LSB of the
61 * lock encodes its state.  When cleared, the lock is in the "thin"
62 * state and its bits are formatted as follows:
63 *
64 *    [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
65 *     lock count   thread id  hash state  0
66 *
67 * When set, the lock is in the "fat" state and its bits are formatted
68 * as follows:
69 *
70 *    [31 ---- 3] [2 ---- 1] [0]
71 *      pointer   hash state  1
72 *
73 * For an in-depth description of the mechanics of thin-vs-fat locking,
74 * read the paper referred to above.
75 *
76 * Monitors provide:
77 *  - mutually exclusive access to resources
78 *  - a way for multiple threads to wait for notification
79 *
80 * In effect, they fill the role of both mutexes and condition variables.
81 *
82 * Only one thread can own the monitor at any time.  There may be several
83 * threads waiting on it (the wait call unlocks it).  One or more waiting
84 * threads may be getting interrupted or notified at any given time.
85 *
86 * TODO: the various members of monitor are not SMP-safe.
87 */
88
89
90/*
91 * Monitor accessor.  Extracts a monitor structure pointer from a fat
92 * lock.  Performs no error checking.
93 */
94#define LW_MONITOR(x) \
95  (reinterpret_cast<Monitor*>((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
96
97/*
98 * Lock recursion count field.  Contains a count of the number of times
99 * a lock has been recursively acquired.
100 */
101#define LW_LOCK_COUNT_MASK 0x1fff
102#define LW_LOCK_COUNT_SHIFT 19
103#define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
104
105bool (*Monitor::is_sensitive_thread_hook_)() = NULL;
106uint32_t Monitor::lock_profiling_threshold_ = 0;
107
108bool Monitor::IsSensitiveThread() {
109  if (is_sensitive_thread_hook_ != NULL) {
110    return (*is_sensitive_thread_hook_)();
111  }
112  return false;
113}
114
115void Monitor::Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)()) {
116  lock_profiling_threshold_ = lock_profiling_threshold;
117  is_sensitive_thread_hook_ = is_sensitive_thread_hook;
118}
119
120Monitor::Monitor(Object* obj)
121    : owner_(NULL),
122      lock_count_(0),
123      obj_(obj),
124      wait_set_(NULL),
125      lock_("a monitor lock"),
126      locking_method_(NULL),
127      locking_dex_pc_(0) {
128}
129
130Monitor::~Monitor() {
131  DCHECK(obj_ != NULL);
132  DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
133}
134
135/*
136 * Links a thread into a monitor's wait set.  The monitor lock must be
137 * held by the caller of this routine.
138 */
139void Monitor::AppendToWaitSet(Thread* thread) {
140  DCHECK(owner_ == Thread::Current());
141  DCHECK(thread != NULL);
142  DCHECK(thread->wait_next_ == NULL) << thread->wait_next_;
143  if (wait_set_ == NULL) {
144    wait_set_ = thread;
145    return;
146  }
147
148  // push_back.
149  Thread* t = wait_set_;
150  while (t->wait_next_ != NULL) {
151    t = t->wait_next_;
152  }
153  t->wait_next_ = thread;
154}
155
156/*
157 * Unlinks a thread from a monitor's wait set.  The monitor lock must
158 * be held by the caller of this routine.
159 */
160void Monitor::RemoveFromWaitSet(Thread *thread) {
161  DCHECK(owner_ == Thread::Current());
162  DCHECK(thread != NULL);
163  if (wait_set_ == NULL) {
164    return;
165  }
166  if (wait_set_ == thread) {
167    wait_set_ = thread->wait_next_;
168    thread->wait_next_ = NULL;
169    return;
170  }
171
172  Thread* t = wait_set_;
173  while (t->wait_next_ != NULL) {
174    if (t->wait_next_ == thread) {
175      t->wait_next_ = thread->wait_next_;
176      thread->wait_next_ = NULL;
177      return;
178    }
179    t = t->wait_next_;
180  }
181}
182
183Object* Monitor::GetObject() {
184  return obj_;
185}
186
187void Monitor::Lock(Thread* self) {
188  if (owner_ == self) {
189    lock_count_++;
190    return;
191  }
192
193  if (!lock_.TryLock()) {
194    uint64_t waitStart = 0;
195    uint64_t waitEnd = 0;
196    uint32_t wait_threshold = lock_profiling_threshold_;
197    const Method* current_locking_method = NULL;
198    uint32_t current_locking_dex_pc = 0;
199    {
200      ScopedThreadStateChange tsc(self, kBlocked);
201      if (wait_threshold != 0) {
202        waitStart = NanoTime() / 1000;
203      }
204      current_locking_method = locking_method_;
205      current_locking_dex_pc = locking_dex_pc_;
206
207      lock_.Lock();
208      if (wait_threshold != 0) {
209        waitEnd = NanoTime() / 1000;
210      }
211    }
212
213    if (wait_threshold != 0) {
214      uint64_t wait_ms = (waitEnd - waitStart) / 1000;
215      uint32_t sample_percent;
216      if (wait_ms >= wait_threshold) {
217        sample_percent = 100;
218      } else {
219        sample_percent = 100 * wait_ms / wait_threshold;
220      }
221      if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
222        const char* current_locking_filename;
223        uint32_t current_locking_line_number;
224        TranslateLocation(current_locking_method, current_locking_dex_pc,
225                          current_locking_filename, current_locking_line_number);
226        LogContentionEvent(self, wait_ms, sample_percent, current_locking_filename, current_locking_line_number);
227      }
228    }
229  }
230  owner_ = self;
231  DCHECK_EQ(lock_count_, 0);
232
233  // When debugging, save the current monitor holder for future
234  // acquisition failures to use in sampled logging.
235  if (lock_profiling_threshold_ != 0) {
236    locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
237  }
238}
239
240static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
241                                              __attribute__((format(printf, 1, 2)));
242
243static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) {
244  va_list args;
245  va_start(args, fmt);
246  Thread::Current()->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args);
247  if (!Runtime::Current()->IsStarted()) {
248    std::ostringstream ss;
249    Thread::Current()->Dump(ss);
250    std::string str(ss.str());
251    LOG(ERROR) << "IllegalMonitorStateException: " << str;
252  }
253  va_end(args);
254}
255
256static std::string ThreadToString(Thread* thread) {
257  if (thread == NULL) {
258    return "NULL";
259  }
260  std::ostringstream oss;
261  // TODO: alternatively, we could just return the thread's name.
262  oss << *thread;
263  return oss.str();
264}
265
266void Monitor::FailedUnlock(Object* o, Thread* expected_owner, Thread* found_owner,
267                           Monitor* monitor) {
268  Thread* current_owner = NULL;
269  std::string current_owner_string;
270  std::string expected_owner_string;
271  std::string found_owner_string;
272  {
273    // TODO: isn't this too late to prevent threads from disappearing?
274    // Acquire thread list lock so threads won't disappear from under us.
275    ScopedThreadListLock thread_list_lock;
276    // Re-read owner now that we hold lock.
277    current_owner = (monitor != NULL) ? monitor->owner_ : NULL;
278    // Get short descriptions of the threads involved.
279    current_owner_string = ThreadToString(current_owner);
280    expected_owner_string = ThreadToString(expected_owner);
281    found_owner_string = ThreadToString(found_owner);
282  }
283  if (current_owner == NULL) {
284    if (found_owner == NULL) {
285      ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
286                                         " on thread '%s'",
287                                         PrettyTypeOf(o).c_str(),
288                                         expected_owner_string.c_str());
289    } else {
290      // Race: the original read found an owner but now there is none
291      ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
292                                         " (where now the monitor appears unowned) on thread '%s'",
293                                         found_owner_string.c_str(),
294                                         PrettyTypeOf(o).c_str(),
295                                         expected_owner_string.c_str());
296    }
297  } else {
298    if (found_owner == NULL) {
299      // Race: originally there was no owner, there is now
300      ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
301                                         " (originally believed to be unowned) on thread '%s'",
302                                         current_owner_string.c_str(),
303                                         PrettyTypeOf(o).c_str(),
304                                         expected_owner_string.c_str());
305    } else {
306      if (found_owner != current_owner) {
307        // Race: originally found and current owner have changed
308        ThrowIllegalMonitorStateExceptionF("unlock of monitor originally owned by '%s' (now"
309                                           " owned by '%s') on object of type '%s' on thread '%s'",
310                                           found_owner_string.c_str(),
311                                           current_owner_string.c_str(),
312                                           PrettyTypeOf(o).c_str(),
313                                           expected_owner_string.c_str());
314      } else {
315        ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
316                                           " on thread '%s",
317                                           current_owner_string.c_str(),
318                                           PrettyTypeOf(o).c_str(),
319                                           expected_owner_string.c_str());
320      }
321    }
322  }
323}
324
325bool Monitor::Unlock(Thread* self) {
326  DCHECK(self != NULL);
327  Thread* owner = owner_;
328  if (owner == self) {
329    // We own the monitor, so nobody else can be in here.
330    if (lock_count_ == 0) {
331      owner_ = NULL;
332      locking_method_ = NULL;
333      locking_dex_pc_ = 0;
334      lock_.Unlock();
335    } else {
336      --lock_count_;
337    }
338  } else {
339    // We don't own this, so we're not allowed to unlock it.
340    // The JNI spec says that we should throw IllegalMonitorStateException
341    // in this case.
342    FailedUnlock(obj_, self, owner, this);
343    return false;
344  }
345  return true;
346}
347
348// Converts the given waiting time (relative to "now") into an absolute time in 'ts'.
349static void ToAbsoluteTime(int64_t ms, int32_t ns, timespec* ts) {
350  int64_t endSec;
351
352#ifdef HAVE_TIMEDWAIT_MONOTONIC
353  clock_gettime(CLOCK_MONOTONIC, ts);
354#else
355  {
356    timeval tv;
357    gettimeofday(&tv, NULL);
358    ts->tv_sec = tv.tv_sec;
359    ts->tv_nsec = tv.tv_usec * 1000;
360  }
361#endif
362  endSec = ts->tv_sec + ms / 1000;
363  if (endSec >= 0x7fffffff) {
364    std::ostringstream ss;
365    Thread::Current()->Dump(ss);
366    LOG(INFO) << "Note: end time exceeds epoch: " << ss.str();
367    endSec = 0x7ffffffe;
368  }
369  ts->tv_sec = endSec;
370  ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
371
372  // Catch rollover.
373  if (ts->tv_nsec >= 1000000000L) {
374    ts->tv_sec++;
375    ts->tv_nsec -= 1000000000L;
376  }
377}
378
379/*
380 * Wait on a monitor until timeout, interrupt, or notification.  Used for
381 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
382 *
383 * If another thread calls Thread.interrupt(), we throw InterruptedException
384 * and return immediately if one of the following are true:
385 *  - blocked in wait(), wait(long), or wait(long, int) methods of Object
386 *  - blocked in join(), join(long), or join(long, int) methods of Thread
387 *  - blocked in sleep(long), or sleep(long, int) methods of Thread
388 * Otherwise, we set the "interrupted" flag.
389 *
390 * Checks to make sure that "ns" is in the range 0-999999
391 * (i.e. fractions of a millisecond) and throws the appropriate
392 * exception if it isn't.
393 *
394 * The spec allows "spurious wakeups", and recommends that all code using
395 * Object.wait() do so in a loop.  This appears to derive from concerns
396 * about pthread_cond_wait() on multiprocessor systems.  Some commentary
397 * on the web casts doubt on whether these can/should occur.
398 *
399 * Since we're allowed to wake up "early", we clamp extremely long durations
400 * to return at the end of the 32-bit time epoch.
401 */
402void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
403  DCHECK(self != NULL);
404
405  // Make sure that we hold the lock.
406  if (owner_ != self) {
407    ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
408    return;
409  }
410
411  // Enforce the timeout range.
412  if (ms < 0 || ns < 0 || ns > 999999) {
413    Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
414        "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
415    return;
416  }
417
418  // Compute absolute wakeup time, if necessary.
419  timespec ts;
420  bool timed = false;
421  if (ms != 0 || ns != 0) {
422    ToAbsoluteTime(ms, ns, &ts);
423    timed = true;
424  }
425
426  /*
427   * Add ourselves to the set of threads waiting on this monitor, and
428   * release our hold.  We need to let it go even if we're a few levels
429   * deep in a recursive lock, and we need to restore that later.
430   *
431   * We append to the wait set ahead of clearing the count and owner
432   * fields so the subroutine can check that the calling thread owns
433   * the monitor.  Aside from that, the order of member updates is
434   * not order sensitive as we hold the pthread mutex.
435   */
436  AppendToWaitSet(self);
437  int prev_lock_count = lock_count_;
438  lock_count_ = 0;
439  owner_ = NULL;
440  const Method* saved_method = locking_method_;
441  locking_method_ = NULL;
442  uintptr_t saved_dex_pc = locking_dex_pc_;
443  locking_dex_pc_ = 0;
444
445  /*
446   * Update thread status.  If the GC wakes up, it'll ignore us, knowing
447   * that we won't touch any references in this state, and we'll check
448   * our suspend mode before we transition out.
449   */
450  if (timed) {
451    self->SetState(kTimedWaiting);
452  } else {
453    self->SetState(kWaiting);
454  }
455
456  self->wait_mutex_->Lock();
457
458  /*
459   * Set wait_monitor_ to the monitor object we will be waiting on.
460   * When wait_monitor_ is non-NULL a notifying or interrupting thread
461   * must signal the thread's wait_cond_ to wake it up.
462   */
463  DCHECK(self->wait_monitor_ == NULL);
464  self->wait_monitor_ = this;
465
466  /*
467   * Handle the case where the thread was interrupted before we called
468   * wait().
469   */
470  bool wasInterrupted = false;
471  if (self->interrupted_) {
472    wasInterrupted = true;
473    self->wait_monitor_ = NULL;
474    self->wait_mutex_->Unlock();
475    goto done;
476  }
477
478  /*
479   * Release the monitor lock and wait for a notification or
480   * a timeout to occur.
481   */
482  lock_.Unlock();
483
484  if (!timed) {
485    self->wait_cond_->Wait(*self->wait_mutex_);
486  } else {
487    self->wait_cond_->TimedWait(*self->wait_mutex_, ts);
488  }
489  if (self->interrupted_) {
490    wasInterrupted = true;
491  }
492
493  self->interrupted_ = false;
494  self->wait_monitor_ = NULL;
495  self->wait_mutex_->Unlock();
496
497  // Reacquire the monitor lock.
498  Lock(self);
499
500 done:
501  /*
502   * We remove our thread from wait set after restoring the count
503   * and owner fields so the subroutine can check that the calling
504   * thread owns the monitor. Aside from that, the order of member
505   * updates is not order sensitive as we hold the pthread mutex.
506   */
507  owner_ = self;
508  lock_count_ = prev_lock_count;
509  locking_method_ = saved_method;
510  locking_dex_pc_ = saved_dex_pc;
511  RemoveFromWaitSet(self);
512
513  /* set self->status back to kRunnable, and self-suspend if needed */
514  self->SetState(kRunnable);
515
516  if (wasInterrupted) {
517    /*
518     * We were interrupted while waiting, or somebody interrupted an
519     * un-interruptible thread earlier and we're bailing out immediately.
520     *
521     * The doc sayeth: "The interrupted status of the current thread is
522     * cleared when this exception is thrown."
523     */
524    self->interrupted_ = false;
525    if (interruptShouldThrow) {
526      Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
527    }
528  }
529}
530
531void Monitor::Notify(Thread* self) {
532  DCHECK(self != NULL);
533
534  // Make sure that we hold the lock.
535  if (owner_ != self) {
536    ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
537    return;
538  }
539  // Signal the first waiting thread in the wait set.
540  while (wait_set_ != NULL) {
541    Thread* thread = wait_set_;
542    wait_set_ = thread->wait_next_;
543    thread->wait_next_ = NULL;
544
545    // Check to see if the thread is still waiting.
546    MutexLock mu(*thread->wait_mutex_);
547    if (thread->wait_monitor_ != NULL) {
548      thread->wait_cond_->Signal();
549      return;
550    }
551  }
552}
553
554void Monitor::NotifyAll(Thread* self) {
555  DCHECK(self != NULL);
556
557  // Make sure that we hold the lock.
558  if (owner_ != self) {
559    ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
560    return;
561  }
562  // Signal all threads in the wait set.
563  while (wait_set_ != NULL) {
564    Thread* thread = wait_set_;
565    wait_set_ = thread->wait_next_;
566    thread->wait_next_ = NULL;
567    thread->Notify();
568  }
569}
570
571/*
572 * Changes the shape of a monitor from thin to fat, preserving the
573 * internal lock state. The calling thread must own the lock.
574 */
575void Monitor::Inflate(Thread* self, Object* obj) {
576  DCHECK(self != NULL);
577  DCHECK(obj != NULL);
578  DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
579  DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->GetThinLockId()));
580
581  // Allocate and acquire a new monitor.
582  Monitor* m = new Monitor(obj);
583  VLOG(monitor) << "monitor: thread " << self->GetThinLockId()
584                << " created monitor " << m << " for object " << obj;
585  Runtime::Current()->GetMonitorList()->Add(m);
586  m->Lock(self);
587  // Propagate the lock state.
588  uint32_t thin = *obj->GetRawLockWordAddress();
589  m->lock_count_ = LW_LOCK_COUNT(thin);
590  thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
591  thin |= reinterpret_cast<uint32_t>(m) | LW_SHAPE_FAT;
592  // Publish the updated lock word.
593  android_atomic_release_store(thin, obj->GetRawLockWordAddress());
594}
595
596void Monitor::MonitorEnter(Thread* self, Object* obj) {
597  volatile int32_t* thinp = obj->GetRawLockWordAddress();
598  timespec tm;
599  uint32_t sleepDelayNs;
600  uint32_t minSleepDelayNs = 1000000;  /* 1 millisecond */
601  uint32_t maxSleepDelayNs = 1000000000;  /* 1 second */
602  uint32_t thin, newThin;
603
604  DCHECK(self != NULL);
605  DCHECK(obj != NULL);
606  uint32_t threadId = self->GetThinLockId();
607 retry:
608  thin = *thinp;
609  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
610    /*
611     * The lock is a thin lock.  The owner field is used to
612     * determine the acquire method, ordered by cost.
613     */
614    if (LW_LOCK_OWNER(thin) == threadId) {
615      /*
616       * The calling thread owns the lock.  Increment the
617       * value of the recursion count field.
618       */
619      *thinp += 1 << LW_LOCK_COUNT_SHIFT;
620      if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
621        /*
622         * The reacquisition limit has been reached.  Inflate
623         * the lock so the next acquire will not overflow the
624         * recursion count field.
625         */
626        Inflate(self, obj);
627      }
628    } else if (LW_LOCK_OWNER(thin) == 0) {
629      // The lock is unowned. Install the thread id of the calling thread into the owner field.
630      // This is the common case: compiled code will have tried this before calling back into
631      // the runtime.
632      newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
633      if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
634        // The acquire failed. Try again.
635        goto retry;
636      }
637    } else {
638      VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p (a %s) owned by %d",
639                                    threadId, thinp, PrettyTypeOf(obj).c_str(), LW_LOCK_OWNER(thin));
640      // The lock is owned by another thread. Notify the runtime that we are about to wait.
641      self->monitor_enter_object_ = obj;
642      ThreadState oldStatus = self->SetState(kBlocked);
643      // Spin until the thin lock is released or inflated.
644      sleepDelayNs = 0;
645      for (;;) {
646        thin = *thinp;
647        // Check the shape of the lock word. Another thread
648        // may have inflated the lock while we were waiting.
649        if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
650          if (LW_LOCK_OWNER(thin) == 0) {
651            // The lock has been released. Install the thread id of the
652            // calling thread into the owner field.
653            newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
654            if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
655              // The acquire succeed. Break out of the loop and proceed to inflate the lock.
656              break;
657            }
658          } else {
659            // The lock has not been released. Yield so the owning thread can run.
660            if (sleepDelayNs == 0) {
661              sched_yield();
662              sleepDelayNs = minSleepDelayNs;
663            } else {
664              tm.tv_sec = 0;
665              tm.tv_nsec = sleepDelayNs;
666              nanosleep(&tm, NULL);
667              // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
668              if (sleepDelayNs < maxSleepDelayNs / 2) {
669                sleepDelayNs *= 2;
670              } else {
671                sleepDelayNs = minSleepDelayNs;
672              }
673            }
674          }
675        } else {
676          // The thin lock was inflated by another thread. Let the runtime know we are no longer
677          // waiting and try again.
678          VLOG(monitor) << StringPrintf("monitor: thread %d found lock %p surprise-fattened by another thread", threadId, thinp);
679          self->monitor_enter_object_ = NULL;
680          self->SetState(oldStatus);
681          goto retry;
682        }
683      }
684      VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p done", threadId, thinp);
685      // We have acquired the thin lock. Let the runtime know that we are no longer waiting.
686      self->monitor_enter_object_ = NULL;
687      self->SetState(oldStatus);
688      // Fatten the lock.
689      Inflate(self, obj);
690      VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p", threadId, thinp);
691    }
692  } else {
693    // The lock is a fat lock.
694    VLOG(monitor) << StringPrintf("monitor: thread %d locking fat lock %p (%p) %p on a %s",
695                                  threadId, thinp, LW_MONITOR(*thinp),
696                                  reinterpret_cast<void*>(*thinp), PrettyTypeOf(obj).c_str());
697    DCHECK(LW_MONITOR(*thinp) != NULL);
698    LW_MONITOR(*thinp)->Lock(self);
699  }
700}
701
702bool Monitor::MonitorExit(Thread* self, Object* obj) {
703  volatile int32_t* thinp = obj->GetRawLockWordAddress();
704
705  DCHECK(self != NULL);
706  //DCHECK_EQ(self->GetState(), kRunnable);
707  DCHECK(obj != NULL);
708
709  /*
710   * Cache the lock word as its value can change while we are
711   * examining its state.
712   */
713  uint32_t thin = *thinp;
714  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
715    /*
716     * The lock is thin.  We must ensure that the lock is owned
717     * by the given thread before unlocking it.
718     */
719    if (LW_LOCK_OWNER(thin) == self->GetThinLockId()) {
720      /*
721       * We are the lock owner.  It is safe to update the lock
722       * without CAS as lock ownership guards the lock itself.
723       */
724      if (LW_LOCK_COUNT(thin) == 0) {
725        /*
726         * The lock was not recursively acquired, the common
727         * case.  Unlock by clearing all bits except for the
728         * hash state.
729         */
730        thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
731        android_atomic_release_store(thin, thinp);
732      } else {
733        /*
734         * The object was recursively acquired.  Decrement the
735         * lock recursion count field.
736         */
737        *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
738      }
739    } else {
740      /*
741       * We do not own the lock.  The JVM spec requires that we
742       * throw an exception in this case.
743       */
744      FailedUnlock(obj, self, NULL, NULL);
745      return false;
746    }
747  } else {
748    /*
749     * The lock is fat.  We must check to see if Unlock has
750     * raised any exceptions before continuing.
751     */
752    DCHECK(LW_MONITOR(*thinp) != NULL);
753    if (!LW_MONITOR(*thinp)->Unlock(self)) {
754      // An exception has been raised.  Do not fall through.
755      return false;
756    }
757  }
758  return true;
759}
760
761/*
762 * Object.wait().  Also called for class init.
763 */
764void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
765  volatile int32_t* thinp = obj->GetRawLockWordAddress();
766
767  // If the lock is still thin, we need to fatten it.
768  uint32_t thin = *thinp;
769  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
770    // Make sure that 'self' holds the lock.
771    if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
772      ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
773      return;
774    }
775
776    /* This thread holds the lock.  We need to fatten the lock
777     * so 'self' can block on it.  Don't update the object lock
778     * field yet, because 'self' needs to acquire the lock before
779     * any other thread gets a chance.
780     */
781    Inflate(self, obj);
782    VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p by wait()", self->GetThinLockId(), thinp);
783  }
784  LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
785}
786
787void Monitor::Notify(Thread* self, Object *obj) {
788  uint32_t thin = *obj->GetRawLockWordAddress();
789
790  // If the lock is still thin, there aren't any waiters;
791  // waiting on an object forces lock fattening.
792  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
793    // Make sure that 'self' holds the lock.
794    if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
795      ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
796      return;
797    }
798    // no-op;  there are no waiters to notify.
799  } else {
800    // It's a fat lock.
801    LW_MONITOR(thin)->Notify(self);
802  }
803}
804
805void Monitor::NotifyAll(Thread* self, Object *obj) {
806  uint32_t thin = *obj->GetRawLockWordAddress();
807
808  // If the lock is still thin, there aren't any waiters;
809  // waiting on an object forces lock fattening.
810  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
811    // Make sure that 'self' holds the lock.
812    if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
813      ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
814      return;
815    }
816    // no-op;  there are no waiters to notify.
817  } else {
818    // It's a fat lock.
819    LW_MONITOR(thin)->NotifyAll(self);
820  }
821}
822
823uint32_t Monitor::GetThinLockId(uint32_t raw_lock_word) {
824  if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
825    return LW_LOCK_OWNER(raw_lock_word);
826  } else {
827    Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
828    return owner ? owner->GetThinLockId() : 0;
829  }
830}
831
832static uint32_t LockOwnerFromThreadLock(Object* thread_lock) {
833  ScopedJniThreadState ts(Thread::Current());
834  if (thread_lock == NULL ||
835      thread_lock->GetClass() != ts.Decode<Class*>(WellKnownClasses::java_lang_ThreadLock)) {
836    return ThreadList::kInvalidId;
837  }
838  Field* thread_field = ts.DecodeField(WellKnownClasses::java_lang_ThreadLock_thread);
839  Object* managed_thread = thread_field->GetObject(thread_lock);
840  if (managed_thread == NULL) {
841    return ThreadList::kInvalidId;
842  }
843  Field* vmData_field = ts.DecodeField(WellKnownClasses::java_lang_Thread_vmData);
844  uintptr_t vmData = static_cast<uintptr_t>(vmData_field->GetInt(managed_thread));
845  Thread* thread = reinterpret_cast<Thread*>(vmData);
846  if (thread == NULL) {
847    return ThreadList::kInvalidId;
848  }
849  return thread->GetThinLockId();
850}
851
852void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
853  ThreadState state = thread->GetState();
854
855  Object* object = NULL;
856  uint32_t lock_owner = ThreadList::kInvalidId;
857  if (state == kWaiting || state == kTimedWaiting) {
858    os << "  - waiting on ";
859    Monitor* monitor = thread->wait_monitor_;
860    if (monitor != NULL) {
861      object = monitor->obj_;
862    }
863    lock_owner = LockOwnerFromThreadLock(object);
864  } else if (state == kBlocked) {
865    os << "  - waiting to lock ";
866    object = thread->monitor_enter_object_;
867    if (object != NULL) {
868      lock_owner = object->GetThinLockId();
869    }
870  } else {
871    // We're not waiting on anything.
872    return;
873  }
874
875  // - waiting on <0x613f83d8> (a java.lang.ThreadLock) held by thread 5
876  // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
877  os << "<" << object << "> (a " << PrettyTypeOf(object) << ")";
878
879  if (lock_owner != ThreadList::kInvalidId) {
880    os << " held by thread " << lock_owner;
881  }
882
883  os << "\n";
884}
885
886static void DumpLockedObject(std::ostream& os, Object* o) {
887  os << "  - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n";
888}
889
890void Monitor::DescribeLocks(std::ostream& os, StackVisitor* stack_visitor) {
891  Method* m = stack_visitor->GetMethod();
892  CHECK(m != NULL);
893
894  // Native methods are an easy special case.
895  // TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
896  if (m->IsNative()) {
897    if (m->IsSynchronized()) {
898      Object* jni_this = stack_visitor->GetCurrentSirt()->GetReference(0);
899      DumpLockedObject(os, jni_this);
900    }
901    return;
902  }
903
904  // <clinit> is another special case. The runtime holds the class lock while calling <clinit>.
905  MethodHelper mh(m);
906  if (mh.IsClassInitializer()) {
907    DumpLockedObject(os, m->GetDeclaringClass());
908    // Fall through because there might be synchronization in the user code too.
909  }
910
911  // Is there any reason to believe there's any synchronization in this method?
912  const DexFile::CodeItem* code_item = mh.GetCodeItem();
913  CHECK(code_item != NULL);
914  if (code_item->tries_size_ == 0) {
915    return; // No "tries" implies no synchronization, so no held locks to report.
916  }
917
918  // Ask the verifier for the dex pcs of all the monitor-enter instructions corresponding to
919  // the locks held in this stack frame.
920  std::vector<uint32_t> monitor_enter_dex_pcs;
921  verifier::MethodVerifier::FindLocksAtDexPc(m, stack_visitor->GetDexPc(), monitor_enter_dex_pcs);
922  if (monitor_enter_dex_pcs.empty()) {
923    return;
924  }
925
926  // Verification is an iterative process, so it can visit the same monitor-enter instruction
927  // repeatedly with increasingly accurate type information. Our callers don't want to see
928  // duplicates.
929  STLSortAndRemoveDuplicates(&monitor_enter_dex_pcs);
930
931  for (size_t i = 0; i < monitor_enter_dex_pcs.size(); ++i) {
932    // The verifier works in terms of the dex pcs of the monitor-enter instructions.
933    // We want the registers used by those instructions (so we can read the values out of them).
934    uint32_t dex_pc = monitor_enter_dex_pcs[i];
935    uint16_t monitor_enter_instruction = code_item->insns_[dex_pc];
936
937    // Quick sanity check.
938    if ((monitor_enter_instruction & 0xff) != Instruction::MONITOR_ENTER) {
939      LOG(FATAL) << "expected monitor-enter @" << dex_pc << "; was "
940                 << reinterpret_cast<void*>(monitor_enter_instruction);
941    }
942
943    uint16_t monitor_register = ((monitor_enter_instruction >> 8) & 0xff);
944    Object* o = reinterpret_cast<Object*>(stack_visitor->GetVReg(m, monitor_register));
945    DumpLockedObject(os, o);
946  }
947}
948
949void Monitor::TranslateLocation(const Method* method, uint32_t dex_pc,
950                                const char*& source_file, uint32_t& line_number) const {
951  // If method is null, location is unknown
952  if (method == NULL) {
953    source_file = "";
954    line_number = 0;
955    return;
956  }
957  MethodHelper mh(method);
958  source_file = mh.GetDeclaringClassSourceFile();
959  if (source_file == NULL) {
960    source_file = "";
961  }
962  line_number = mh.GetLineNumFromDexPC(dex_pc);
963}
964
965MonitorList::MonitorList() : lock_("MonitorList lock") {
966}
967
968MonitorList::~MonitorList() {
969  MutexLock mu(lock_);
970  STLDeleteElements(&list_);
971}
972
973void MonitorList::Add(Monitor* m) {
974  MutexLock mu(lock_);
975  list_.push_front(m);
976}
977
978void MonitorList::SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) {
979  MutexLock mu(lock_);
980  typedef std::list<Monitor*>::iterator It; // TODO: C++0x auto
981  It it = list_.begin();
982  while (it != list_.end()) {
983    Monitor* m = *it;
984    if (!is_marked(m->GetObject(), arg)) {
985      VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object " << m->GetObject();
986      delete m;
987      it = list_.erase(it);
988    } else {
989      ++it;
990    }
991  }
992}
993
994}  // namespace art
995