monitor.cc revision 34e069606d6f1698cd3c33b39e72b79ae27e1c7b
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "monitor.h"
18
19#include <errno.h>
20#include <fcntl.h>
21#include <pthread.h>
22#include <stdlib.h>
23#include <sys/time.h>
24#include <time.h>
25#include <unistd.h>
26
27#include "class_linker.h"
28#include "mutex.h"
29#include "object.h"
30#include "object_utils.h"
31#include "scoped_thread_list_lock.h"
32#include "stl_util.h"
33#include "thread.h"
34#include "thread_list.h"
35
36namespace art {
37
38/*
39 * Every Object has a monitor associated with it, but not every Object is
40 * actually locked.  Even the ones that are locked do not need a
41 * full-fledged monitor until a) there is actual contention or b) wait()
42 * is called on the Object.
43 *
44 * For Android, we have implemented a scheme similar to the one described
45 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
46 * (ACM 1998).  Things are even easier for us, though, because we have
47 * a full 32 bits to work with.
48 *
49 * The two states of an Object's lock are referred to as "thin" and
50 * "fat".  A lock may transition from the "thin" state to the "fat"
51 * state and this transition is referred to as inflation.  Once a lock
52 * has been inflated it remains in the "fat" state indefinitely.
53 *
54 * The lock value itself is stored in Object.lock.  The LSB of the
55 * lock encodes its state.  When cleared, the lock is in the "thin"
56 * state and its bits are formatted as follows:
57 *
58 *    [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
59 *     lock count   thread id  hash state  0
60 *
61 * When set, the lock is in the "fat" state and its bits are formatted
62 * as follows:
63 *
64 *    [31 ---- 3] [2 ---- 1] [0]
65 *      pointer   hash state  1
66 *
67 * For an in-depth description of the mechanics of thin-vs-fat locking,
68 * read the paper referred to above.
69 *
70 * Monitors provide:
71 *  - mutually exclusive access to resources
72 *  - a way for multiple threads to wait for notification
73 *
74 * In effect, they fill the role of both mutexes and condition variables.
75 *
76 * Only one thread can own the monitor at any time.  There may be several
77 * threads waiting on it (the wait call unlocks it).  One or more waiting
78 * threads may be getting interrupted or notified at any given time.
79 *
80 * TODO: the various members of monitor are not SMP-safe.
81 */
82
83
84/*
85 * Monitor accessor.  Extracts a monitor structure pointer from a fat
86 * lock.  Performs no error checking.
87 */
88#define LW_MONITOR(x) \
89  (reinterpret_cast<Monitor*>((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
90
91/*
92 * Lock recursion count field.  Contains a count of the number of times
93 * a lock has been recursively acquired.
94 */
95#define LW_LOCK_COUNT_MASK 0x1fff
96#define LW_LOCK_COUNT_SHIFT 19
97#define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
98
99bool (*Monitor::is_sensitive_thread_hook_)() = NULL;
100uint32_t Monitor::lock_profiling_threshold_ = 0;
101
102bool Monitor::IsSensitiveThread() {
103  if (is_sensitive_thread_hook_ != NULL) {
104    return (*is_sensitive_thread_hook_)();
105  }
106  return false;
107}
108
109void Monitor::Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)()) {
110  lock_profiling_threshold_ = lock_profiling_threshold;
111  is_sensitive_thread_hook_ = is_sensitive_thread_hook;
112}
113
114Monitor::Monitor(Object* obj)
115    : owner_(NULL),
116      lock_count_(0),
117      obj_(obj),
118      wait_set_(NULL),
119      lock_("a monitor lock"),
120      locking_method_(NULL),
121      locking_pc_(0) {
122}
123
124Monitor::~Monitor() {
125  DCHECK(obj_ != NULL);
126  DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
127}
128
129/*
130 * Links a thread into a monitor's wait set.  The monitor lock must be
131 * held by the caller of this routine.
132 */
133void Monitor::AppendToWaitSet(Thread* thread) {
134  DCHECK(owner_ == Thread::Current());
135  DCHECK(thread != NULL);
136  DCHECK(thread->wait_next_ == NULL) << thread->wait_next_;
137  if (wait_set_ == NULL) {
138    wait_set_ = thread;
139    return;
140  }
141
142  // push_back.
143  Thread* t = wait_set_;
144  while (t->wait_next_ != NULL) {
145    t = t->wait_next_;
146  }
147  t->wait_next_ = thread;
148}
149
150/*
151 * Unlinks a thread from a monitor's wait set.  The monitor lock must
152 * be held by the caller of this routine.
153 */
154void Monitor::RemoveFromWaitSet(Thread *thread) {
155  DCHECK(owner_ == Thread::Current());
156  DCHECK(thread != NULL);
157  if (wait_set_ == NULL) {
158    return;
159  }
160  if (wait_set_ == thread) {
161    wait_set_ = thread->wait_next_;
162    thread->wait_next_ = NULL;
163    return;
164  }
165
166  Thread* t = wait_set_;
167  while (t->wait_next_ != NULL) {
168    if (t->wait_next_ == thread) {
169      t->wait_next_ = thread->wait_next_;
170      thread->wait_next_ = NULL;
171      return;
172    }
173    t = t->wait_next_;
174  }
175}
176
177Object* Monitor::GetObject() {
178  return obj_;
179}
180
181void Monitor::Lock(Thread* self) {
182  if (owner_ == self) {
183    lock_count_++;
184    return;
185  }
186
187  uint64_t waitStart, waitEnd;
188  if (!lock_.TryLock()) {
189    uint32_t wait_threshold = lock_profiling_threshold_;
190    const Method* current_locking_method = NULL;
191    uintptr_t current_locking_pc = 0;
192    {
193      ScopedThreadStateChange tsc(self, kBlocked);
194      if (wait_threshold != 0) {
195        waitStart = NanoTime() / 1000;
196      }
197      current_locking_method = locking_method_;
198      current_locking_pc = locking_pc_;
199
200      lock_.Lock();
201      if (wait_threshold != 0) {
202        waitEnd = NanoTime() / 1000;
203      }
204    }
205
206    if (wait_threshold != 0) {
207      uint64_t wait_ms = (waitEnd - waitStart) / 1000;
208      uint32_t sample_percent;
209      if (wait_ms >= wait_threshold) {
210        sample_percent = 100;
211      } else {
212        sample_percent = 100 * wait_ms / wait_threshold;
213      }
214      if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
215        const char* current_locking_filename;
216        uint32_t current_locking_line_number;
217        TranslateLocation(current_locking_method, current_locking_pc,
218                          current_locking_filename, current_locking_line_number);
219        LogContentionEvent(self, wait_ms, sample_percent, current_locking_filename, current_locking_line_number);
220      }
221    }
222  }
223  owner_ = self;
224  DCHECK_EQ(lock_count_, 0);
225
226  // When debugging, save the current monitor holder for future
227  // acquisition failures to use in sampled logging.
228  if (lock_profiling_threshold_ != 0) {
229    locking_method_ = self->GetCurrentMethod(&locking_pc_);
230  }
231}
232
233static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
234                                              __attribute__((format(printf, 1, 2)));
235
236static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) {
237  va_list args;
238  va_start(args, fmt);
239  Thread::Current()->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args);
240  if (!Runtime::Current()->IsStarted()) {
241    std::ostringstream ss;
242    Thread::Current()->Dump(ss);
243    std::string str(ss.str());
244    LOG(ERROR) << "IllegalMonitorStateException: " << str;
245  }
246  va_end(args);
247}
248
249static std::string ThreadToString(Thread* thread) {
250  if (thread == NULL) {
251    return "NULL";
252  }
253  std::ostringstream oss;
254  // TODO: alternatively, we could just return the thread's name.
255  oss << *thread;
256  return oss.str();
257}
258
259void Monitor::FailedUnlock(Object* o, Thread* expected_owner, Thread* found_owner,
260                           Monitor* monitor) {
261  Thread* current_owner = NULL;
262  std::string current_owner_string;
263  std::string expected_owner_string;
264  std::string found_owner_string;
265  {
266    // TODO: isn't this too late to prevent threads from disappearing?
267    // Acquire thread list lock so threads won't disappear from under us.
268    ScopedThreadListLock thread_list_lock;
269    // Re-read owner now that we hold lock.
270    current_owner = (monitor != NULL) ? monitor->owner_ : NULL;
271    // Get short descriptions of the threads involved.
272    current_owner_string = ThreadToString(current_owner);
273    expected_owner_string = ThreadToString(expected_owner);
274    found_owner_string = ThreadToString(found_owner);
275  }
276  if (current_owner == NULL) {
277    if (found_owner == NULL) {
278      ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
279                                         " on thread '%s'",
280                                         PrettyTypeOf(o).c_str(),
281                                         expected_owner_string.c_str());
282    } else {
283      // Race: the original read found an owner but now there is none
284      ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
285                                         " (where now the monitor appears unowned) on thread '%s'",
286                                         found_owner_string.c_str(),
287                                         PrettyTypeOf(o).c_str(),
288                                         expected_owner_string.c_str());
289    }
290  } else {
291    if (found_owner == NULL) {
292      // Race: originally there was no owner, there is now
293      ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
294                                         " (originally believed to be unowned) on thread '%s'",
295                                         current_owner_string.c_str(),
296                                         PrettyTypeOf(o).c_str(),
297                                         expected_owner_string.c_str());
298    } else {
299      if (found_owner != current_owner) {
300        // Race: originally found and current owner have changed
301        ThrowIllegalMonitorStateExceptionF("unlock of monitor originally owned by '%s' (now"
302                                           " owned by '%s') on object of type '%s' on thread '%s'",
303                                           found_owner_string.c_str(),
304                                           current_owner_string.c_str(),
305                                           PrettyTypeOf(o).c_str(),
306                                           expected_owner_string.c_str());
307      } else {
308        ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
309                                           " on thread '%s",
310                                           current_owner_string.c_str(),
311                                           PrettyTypeOf(o).c_str(),
312                                           expected_owner_string.c_str());
313      }
314    }
315  }
316}
317
318bool Monitor::Unlock(Thread* self) {
319  DCHECK(self != NULL);
320  Thread* owner = owner_;
321  if (owner == self) {
322    // We own the monitor, so nobody else can be in here.
323    if (lock_count_ == 0) {
324      owner_ = NULL;
325      locking_method_ = NULL;
326      locking_pc_ = 0;
327      lock_.Unlock();
328    } else {
329      --lock_count_;
330    }
331  } else {
332    // We don't own this, so we're not allowed to unlock it.
333    // The JNI spec says that we should throw IllegalMonitorStateException
334    // in this case.
335    FailedUnlock(obj_, self, owner, this);
336    return false;
337  }
338  return true;
339}
340
341/*
342 * Converts the given relative waiting time into an absolute time.
343 */
344static void ToAbsoluteTime(int64_t ms, int32_t ns, struct timespec *ts) {
345  int64_t endSec;
346
347#ifdef HAVE_TIMEDWAIT_MONOTONIC
348  clock_gettime(CLOCK_MONOTONIC, ts);
349#else
350  {
351    struct timeval tv;
352    gettimeofday(&tv, NULL);
353    ts->tv_sec = tv.tv_sec;
354    ts->tv_nsec = tv.tv_usec * 1000;
355  }
356#endif
357  endSec = ts->tv_sec + ms / 1000;
358  if (endSec >= 0x7fffffff) {
359    LOG(INFO) << "Note: end time exceeds epoch";
360    endSec = 0x7ffffffe;
361  }
362  ts->tv_sec = endSec;
363  ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
364
365  // Catch rollover.
366  if (ts->tv_nsec >= 1000000000L) {
367    ts->tv_sec++;
368    ts->tv_nsec -= 1000000000L;
369  }
370}
371
372/*
373 * Wait on a monitor until timeout, interrupt, or notification.  Used for
374 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
375 *
376 * If another thread calls Thread.interrupt(), we throw InterruptedException
377 * and return immediately if one of the following are true:
378 *  - blocked in wait(), wait(long), or wait(long, int) methods of Object
379 *  - blocked in join(), join(long), or join(long, int) methods of Thread
380 *  - blocked in sleep(long), or sleep(long, int) methods of Thread
381 * Otherwise, we set the "interrupted" flag.
382 *
383 * Checks to make sure that "ns" is in the range 0-999999
384 * (i.e. fractions of a millisecond) and throws the appropriate
385 * exception if it isn't.
386 *
387 * The spec allows "spurious wakeups", and recommends that all code using
388 * Object.wait() do so in a loop.  This appears to derive from concerns
389 * about pthread_cond_wait() on multiprocessor systems.  Some commentary
390 * on the web casts doubt on whether these can/should occur.
391 *
392 * Since we're allowed to wake up "early", we clamp extremely long durations
393 * to return at the end of the 32-bit time epoch.
394 */
395void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
396  DCHECK(self != NULL);
397
398  // Make sure that we hold the lock.
399  if (owner_ != self) {
400    ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
401    return;
402  }
403
404  // Enforce the timeout range.
405  if (ms < 0 || ns < 0 || ns > 999999) {
406    Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
407        "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
408    return;
409  }
410
411  // Compute absolute wakeup time, if necessary.
412  struct timespec ts;
413  bool timed = false;
414  if (ms != 0 || ns != 0) {
415    ToAbsoluteTime(ms, ns, &ts);
416    timed = true;
417  }
418
419  /*
420   * Add ourselves to the set of threads waiting on this monitor, and
421   * release our hold.  We need to let it go even if we're a few levels
422   * deep in a recursive lock, and we need to restore that later.
423   *
424   * We append to the wait set ahead of clearing the count and owner
425   * fields so the subroutine can check that the calling thread owns
426   * the monitor.  Aside from that, the order of member updates is
427   * not order sensitive as we hold the pthread mutex.
428   */
429  AppendToWaitSet(self);
430  int prevLockCount = lock_count_;
431  lock_count_ = 0;
432  owner_ = NULL;
433  const Method* savedMethod = locking_method_;
434  locking_method_ = NULL;
435  uintptr_t savedPc = locking_pc_;
436  locking_pc_ = 0;
437
438  /*
439   * Update thread status.  If the GC wakes up, it'll ignore us, knowing
440   * that we won't touch any references in this state, and we'll check
441   * our suspend mode before we transition out.
442   */
443  if (timed) {
444    self->SetState(kTimedWaiting);
445  } else {
446    self->SetState(kWaiting);
447  }
448
449  self->wait_mutex_->Lock();
450
451  /*
452   * Set wait_monitor_ to the monitor object we will be waiting on.
453   * When wait_monitor_ is non-NULL a notifying or interrupting thread
454   * must signal the thread's wait_cond_ to wake it up.
455   */
456  DCHECK(self->wait_monitor_ == NULL);
457  self->wait_monitor_ = this;
458
459  /*
460   * Handle the case where the thread was interrupted before we called
461   * wait().
462   */
463  bool wasInterrupted = false;
464  if (self->interrupted_) {
465    wasInterrupted = true;
466    self->wait_monitor_ = NULL;
467    self->wait_mutex_->Unlock();
468    goto done;
469  }
470
471  /*
472   * Release the monitor lock and wait for a notification or
473   * a timeout to occur.
474   */
475  lock_.Unlock();
476
477  if (!timed) {
478    self->wait_cond_->Wait(*self->wait_mutex_);
479  } else {
480    self->wait_cond_->TimedWait(*self->wait_mutex_, ts);
481  }
482  if (self->interrupted_) {
483    wasInterrupted = true;
484  }
485
486  self->interrupted_ = false;
487  self->wait_monitor_ = NULL;
488  self->wait_mutex_->Unlock();
489
490  // Reacquire the monitor lock.
491  Lock(self);
492
493done:
494  /*
495   * We remove our thread from wait set after restoring the count
496   * and owner fields so the subroutine can check that the calling
497   * thread owns the monitor. Aside from that, the order of member
498   * updates is not order sensitive as we hold the pthread mutex.
499   */
500  owner_ = self;
501  lock_count_ = prevLockCount;
502  locking_method_ = savedMethod;
503  locking_pc_ = savedPc;
504  RemoveFromWaitSet(self);
505
506  /* set self->status back to kRunnable, and self-suspend if needed */
507  self->SetState(kRunnable);
508
509  if (wasInterrupted) {
510    /*
511     * We were interrupted while waiting, or somebody interrupted an
512     * un-interruptible thread earlier and we're bailing out immediately.
513     *
514     * The doc sayeth: "The interrupted status of the current thread is
515     * cleared when this exception is thrown."
516     */
517    self->interrupted_ = false;
518    if (interruptShouldThrow) {
519      Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
520    }
521  }
522}
523
524void Monitor::Notify(Thread* self) {
525  DCHECK(self != NULL);
526
527  // Make sure that we hold the lock.
528  if (owner_ != self) {
529    ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
530    return;
531  }
532  // Signal the first waiting thread in the wait set.
533  while (wait_set_ != NULL) {
534    Thread* thread = wait_set_;
535    wait_set_ = thread->wait_next_;
536    thread->wait_next_ = NULL;
537
538    // Check to see if the thread is still waiting.
539    MutexLock mu(*thread->wait_mutex_);
540    if (thread->wait_monitor_ != NULL) {
541      thread->wait_cond_->Signal();
542      return;
543    }
544  }
545}
546
547void Monitor::NotifyAll(Thread* self) {
548  DCHECK(self != NULL);
549
550  // Make sure that we hold the lock.
551  if (owner_ != self) {
552    ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
553    return;
554  }
555  // Signal all threads in the wait set.
556  while (wait_set_ != NULL) {
557    Thread* thread = wait_set_;
558    wait_set_ = thread->wait_next_;
559    thread->wait_next_ = NULL;
560    thread->Notify();
561  }
562}
563
564/*
565 * Changes the shape of a monitor from thin to fat, preserving the
566 * internal lock state. The calling thread must own the lock.
567 */
568void Monitor::Inflate(Thread* self, Object* obj) {
569  DCHECK(self != NULL);
570  DCHECK(obj != NULL);
571  DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
572  DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->GetThinLockId()));
573
574  // Allocate and acquire a new monitor.
575  Monitor* m = new Monitor(obj);
576  VLOG(monitor) << "monitor: thread " << self->GetThinLockId()
577                << " created monitor " << m << " for object " << obj;
578  Runtime::Current()->GetMonitorList()->Add(m);
579  m->Lock(self);
580  // Propagate the lock state.
581  uint32_t thin = *obj->GetRawLockWordAddress();
582  m->lock_count_ = LW_LOCK_COUNT(thin);
583  thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
584  thin |= reinterpret_cast<uint32_t>(m) | LW_SHAPE_FAT;
585  // Publish the updated lock word.
586  android_atomic_release_store(thin, obj->GetRawLockWordAddress());
587}
588
589void Monitor::MonitorEnter(Thread* self, Object* obj) {
590  volatile int32_t* thinp = obj->GetRawLockWordAddress();
591  struct timespec tm;
592  uint32_t sleepDelayNs;
593  uint32_t minSleepDelayNs = 1000000;  /* 1 millisecond */
594  uint32_t maxSleepDelayNs = 1000000000;  /* 1 second */
595  uint32_t thin, newThin;
596
597  DCHECK(self != NULL);
598  DCHECK(obj != NULL);
599  uint32_t threadId = self->GetThinLockId();
600retry:
601  thin = *thinp;
602  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
603    /*
604     * The lock is a thin lock.  The owner field is used to
605     * determine the acquire method, ordered by cost.
606     */
607    if (LW_LOCK_OWNER(thin) == threadId) {
608      /*
609       * The calling thread owns the lock.  Increment the
610       * value of the recursion count field.
611       */
612      *thinp += 1 << LW_LOCK_COUNT_SHIFT;
613      if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
614        /*
615         * The reacquisition limit has been reached.  Inflate
616         * the lock so the next acquire will not overflow the
617         * recursion count field.
618         */
619        Inflate(self, obj);
620      }
621    } else if (LW_LOCK_OWNER(thin) == 0) {
622      // The lock is unowned. Install the thread id of the calling thread into the owner field.
623      // This is the common case: compiled code will have tried this before calling back into
624      // the runtime.
625      newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
626      if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
627        // The acquire failed. Try again.
628        goto retry;
629      }
630    } else {
631      VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p (a %s) owned by %d",
632                                    threadId, thinp, PrettyTypeOf(obj).c_str(), LW_LOCK_OWNER(thin));
633      // The lock is owned by another thread. Notify the runtime that we are about to wait.
634      self->monitor_enter_object_ = obj;
635      ThreadState oldStatus = self->SetState(kBlocked);
636      // Spin until the thin lock is released or inflated.
637      sleepDelayNs = 0;
638      for (;;) {
639        thin = *thinp;
640        // Check the shape of the lock word. Another thread
641        // may have inflated the lock while we were waiting.
642        if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
643          if (LW_LOCK_OWNER(thin) == 0) {
644            // The lock has been released. Install the thread id of the
645            // calling thread into the owner field.
646            newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
647            if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
648              // The acquire succeed. Break out of the loop and proceed to inflate the lock.
649              break;
650            }
651          } else {
652            // The lock has not been released. Yield so the owning thread can run.
653            if (sleepDelayNs == 0) {
654              sched_yield();
655              sleepDelayNs = minSleepDelayNs;
656            } else {
657              tm.tv_sec = 0;
658              tm.tv_nsec = sleepDelayNs;
659              nanosleep(&tm, NULL);
660              // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
661              if (sleepDelayNs < maxSleepDelayNs / 2) {
662                sleepDelayNs *= 2;
663              } else {
664                sleepDelayNs = minSleepDelayNs;
665              }
666            }
667          }
668        } else {
669          // The thin lock was inflated by another thread. Let the runtime know we are no longer
670          // waiting and try again.
671          VLOG(monitor) << StringPrintf("monitor: thread %d found lock %p surprise-fattened by another thread", threadId, thinp);
672          self->monitor_enter_object_ = NULL;
673          self->SetState(oldStatus);
674          goto retry;
675        }
676      }
677      VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p done", threadId, thinp);
678      // We have acquired the thin lock. Let the runtime know that we are no longer waiting.
679      self->monitor_enter_object_ = NULL;
680      self->SetState(oldStatus);
681      // Fatten the lock.
682      Inflate(self, obj);
683      VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p", threadId, thinp);
684    }
685  } else {
686    // The lock is a fat lock.
687    VLOG(monitor) << StringPrintf("monitor: thread %d locking fat lock %p (%p) %p on a %s",
688                                  threadId, thinp, LW_MONITOR(*thinp),
689                                  reinterpret_cast<void*>(*thinp), PrettyTypeOf(obj).c_str());
690    DCHECK(LW_MONITOR(*thinp) != NULL);
691    LW_MONITOR(*thinp)->Lock(self);
692  }
693}
694
695bool Monitor::MonitorExit(Thread* self, Object* obj) {
696  volatile int32_t* thinp = obj->GetRawLockWordAddress();
697
698  DCHECK(self != NULL);
699  //DCHECK_EQ(self->GetState(), kRunnable);
700  DCHECK(obj != NULL);
701
702  /*
703   * Cache the lock word as its value can change while we are
704   * examining its state.
705   */
706  uint32_t thin = *thinp;
707  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
708    /*
709     * The lock is thin.  We must ensure that the lock is owned
710     * by the given thread before unlocking it.
711     */
712    if (LW_LOCK_OWNER(thin) == self->GetThinLockId()) {
713      /*
714       * We are the lock owner.  It is safe to update the lock
715       * without CAS as lock ownership guards the lock itself.
716       */
717      if (LW_LOCK_COUNT(thin) == 0) {
718        /*
719         * The lock was not recursively acquired, the common
720         * case.  Unlock by clearing all bits except for the
721         * hash state.
722         */
723        thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
724        android_atomic_release_store(thin, thinp);
725      } else {
726        /*
727         * The object was recursively acquired.  Decrement the
728         * lock recursion count field.
729         */
730        *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
731      }
732    } else {
733      /*
734       * We do not own the lock.  The JVM spec requires that we
735       * throw an exception in this case.
736       */
737      FailedUnlock(obj, self, NULL, NULL);
738      return false;
739    }
740  } else {
741    /*
742     * The lock is fat.  We must check to see if Unlock has
743     * raised any exceptions before continuing.
744     */
745    DCHECK(LW_MONITOR(*thinp) != NULL);
746    if (!LW_MONITOR(*thinp)->Unlock(self)) {
747      // An exception has been raised.  Do not fall through.
748      return false;
749    }
750  }
751  return true;
752}
753
754/*
755 * Object.wait().  Also called for class init.
756 */
757void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
758  volatile int32_t* thinp = obj->GetRawLockWordAddress();
759
760  // If the lock is still thin, we need to fatten it.
761  uint32_t thin = *thinp;
762  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
763    // Make sure that 'self' holds the lock.
764    if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
765      ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
766      return;
767    }
768
769    /* This thread holds the lock.  We need to fatten the lock
770     * so 'self' can block on it.  Don't update the object lock
771     * field yet, because 'self' needs to acquire the lock before
772     * any other thread gets a chance.
773     */
774    Inflate(self, obj);
775    VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p by wait()", self->GetThinLockId(), thinp);
776  }
777  LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
778}
779
780void Monitor::Notify(Thread* self, Object *obj) {
781  uint32_t thin = *obj->GetRawLockWordAddress();
782
783  // If the lock is still thin, there aren't any waiters;
784  // waiting on an object forces lock fattening.
785  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
786    // Make sure that 'self' holds the lock.
787    if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
788      ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
789      return;
790    }
791    // no-op;  there are no waiters to notify.
792  } else {
793    // It's a fat lock.
794    LW_MONITOR(thin)->Notify(self);
795  }
796}
797
798void Monitor::NotifyAll(Thread* self, Object *obj) {
799  uint32_t thin = *obj->GetRawLockWordAddress();
800
801  // If the lock is still thin, there aren't any waiters;
802  // waiting on an object forces lock fattening.
803  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
804    // Make sure that 'self' holds the lock.
805    if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
806      ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
807      return;
808    }
809    // no-op;  there are no waiters to notify.
810  } else {
811    // It's a fat lock.
812    LW_MONITOR(thin)->NotifyAll(self);
813  }
814}
815
816uint32_t Monitor::GetThinLockId(uint32_t raw_lock_word) {
817  if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
818    return LW_LOCK_OWNER(raw_lock_word);
819  } else {
820    Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
821    return owner ? owner->GetThinLockId() : 0;
822  }
823}
824
825void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
826  ThreadState state = thread->GetState();
827
828  Object* object = NULL;
829  uint32_t lock_owner = ThreadList::kInvalidId;
830  if (state == kWaiting || state == kTimedWaiting) {
831    os << "  - waiting on ";
832    Monitor* monitor = thread->wait_monitor_;
833    if (monitor != NULL) {
834      object = monitor->obj_;
835    }
836    lock_owner = Thread::LockOwnerFromThreadLock(object);
837  } else if (state == kBlocked) {
838    os << "  - waiting to lock ";
839    object = thread->monitor_enter_object_;
840    if (object != NULL) {
841      lock_owner = object->GetThinLockId();
842    }
843  } else {
844    // We're not waiting on anything.
845    return;
846  }
847  os << "<" << object << ">";
848
849  // - waiting on <0x613f83d8> (a java.lang.ThreadLock) held by thread 5
850  // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
851  os << " (a " << PrettyTypeOf(object) << ")";
852
853  if (lock_owner != ThreadList::kInvalidId) {
854    os << " held by thread " << lock_owner;
855  }
856
857  os << "\n";
858}
859
860void Monitor::TranslateLocation(const Method* method, uint32_t pc,
861                                const char*& source_file, uint32_t& line_number) const {
862  // If method is null, location is unknown
863  if (method == NULL) {
864    source_file = "";
865    line_number = 0;
866    return;
867  }
868  MethodHelper mh(method);
869  source_file = mh.GetDeclaringClassSourceFile();
870  if (source_file == NULL) {
871    source_file = "";
872  }
873  line_number = mh.GetLineNumFromNativePC(pc);
874}
875
876MonitorList::MonitorList() : lock_("MonitorList lock") {
877}
878
879MonitorList::~MonitorList() {
880  MutexLock mu(lock_);
881  STLDeleteElements(&list_);
882}
883
884void MonitorList::Add(Monitor* m) {
885  MutexLock mu(lock_);
886  list_.push_front(m);
887}
888
889void MonitorList::SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) {
890  MutexLock mu(lock_);
891  typedef std::list<Monitor*>::iterator It; // TODO: C++0x auto
892  It it = list_.begin();
893  while (it != list_.end()) {
894    Monitor* m = *it;
895    if (!is_marked(m->GetObject(), arg)) {
896      VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object " << m->GetObject();
897      delete m;
898      it = list_.erase(it);
899    } else {
900      ++it;
901    }
902  }
903}
904
905}  // namespace art
906