monitor.cc revision 80537bb742dff4ccdf6d04b1c0bb7d2179acc8cb
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "monitor.h"
18
19#include <vector>
20
21#include "base/mutex.h"
22#include "base/stl_util.h"
23#include "class_linker.h"
24#include "dex_instruction.h"
25#include "object.h"
26#include "object_utils.h"
27#include "scoped_thread_state_change.h"
28#include "thread.h"
29#include "thread_list.h"
30#include "verifier/method_verifier.h"
31#include "well_known_classes.h"
32
33namespace art {
34
35/*
36 * Every Object has a monitor associated with it, but not every Object is
37 * actually locked.  Even the ones that are locked do not need a
38 * full-fledged monitor until a) there is actual contention or b) wait()
39 * is called on the Object.
40 *
41 * For Android, we have implemented a scheme similar to the one described
42 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
43 * (ACM 1998).  Things are even easier for us, though, because we have
44 * a full 32 bits to work with.
45 *
46 * The two states of an Object's lock are referred to as "thin" and
47 * "fat".  A lock may transition from the "thin" state to the "fat"
48 * state and this transition is referred to as inflation.  Once a lock
49 * has been inflated it remains in the "fat" state indefinitely.
50 *
51 * The lock value itself is stored in Object.lock.  The LSB of the
52 * lock encodes its state.  When cleared, the lock is in the "thin"
53 * state and its bits are formatted as follows:
54 *
55 *    [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
56 *     lock count   thread id  hash state  0
57 *
58 * When set, the lock is in the "fat" state and its bits are formatted
59 * as follows:
60 *
61 *    [31 ---- 3] [2 ---- 1] [0]
62 *      pointer   hash state  1
63 *
64 * For an in-depth description of the mechanics of thin-vs-fat locking,
65 * read the paper referred to above.
66 *
67 * Monitors provide:
68 *  - mutually exclusive access to resources
69 *  - a way for multiple threads to wait for notification
70 *
71 * In effect, they fill the role of both mutexes and condition variables.
72 *
73 * Only one thread can own the monitor at any time.  There may be several
74 * threads waiting on it (the wait call unlocks it).  One or more waiting
75 * threads may be getting interrupted or notified at any given time.
76 *
77 * TODO: the various members of monitor are not SMP-safe.
78 */
79
80
81/*
82 * Monitor accessor.  Extracts a monitor structure pointer from a fat
83 * lock.  Performs no error checking.
84 */
85#define LW_MONITOR(x) \
86  (reinterpret_cast<Monitor*>((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
87
88/*
89 * Lock recursion count field.  Contains a count of the number of times
90 * a lock has been recursively acquired.
91 */
92#define LW_LOCK_COUNT_MASK 0x1fff
93#define LW_LOCK_COUNT_SHIFT 19
94#define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
95
96bool (*Monitor::is_sensitive_thread_hook_)() = NULL;
97uint32_t Monitor::lock_profiling_threshold_ = 0;
98
99bool Monitor::IsSensitiveThread() {
100  if (is_sensitive_thread_hook_ != NULL) {
101    return (*is_sensitive_thread_hook_)();
102  }
103  return false;
104}
105
106void Monitor::Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)()) {
107  lock_profiling_threshold_ = lock_profiling_threshold;
108  is_sensitive_thread_hook_ = is_sensitive_thread_hook;
109}
110
111Monitor::Monitor(Thread* owner, Object* obj)
112    : monitor_lock_("a monitor lock", kMonitorLock),
113      owner_(owner),
114      lock_count_(0),
115      obj_(obj),
116      wait_set_(NULL),
117      locking_method_(NULL),
118      locking_dex_pc_(0) {
119  monitor_lock_.Lock(owner);
120  // Propagate the lock state.
121  uint32_t thin = *obj->GetRawLockWordAddress();
122  lock_count_ = LW_LOCK_COUNT(thin);
123  thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
124  thin |= reinterpret_cast<uint32_t>(this) | LW_SHAPE_FAT;
125  // Publish the updated lock word.
126  android_atomic_release_store(thin, obj->GetRawLockWordAddress());
127  // Lock profiling.
128  if (lock_profiling_threshold_ != 0) {
129    locking_method_ = owner->GetCurrentMethod(&locking_dex_pc_);
130  }
131}
132
133Monitor::~Monitor() {
134  DCHECK(obj_ != NULL);
135  DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
136}
137
138/*
139 * Links a thread into a monitor's wait set.  The monitor lock must be
140 * held by the caller of this routine.
141 */
142void Monitor::AppendToWaitSet(Thread* thread) {
143  DCHECK(owner_ == Thread::Current());
144  DCHECK(thread != NULL);
145  DCHECK(thread->wait_next_ == NULL) << thread->wait_next_;
146  if (wait_set_ == NULL) {
147    wait_set_ = thread;
148    return;
149  }
150
151  // push_back.
152  Thread* t = wait_set_;
153  while (t->wait_next_ != NULL) {
154    t = t->wait_next_;
155  }
156  t->wait_next_ = thread;
157}
158
159/*
160 * Unlinks a thread from a monitor's wait set.  The monitor lock must
161 * be held by the caller of this routine.
162 */
163void Monitor::RemoveFromWaitSet(Thread *thread) {
164  DCHECK(owner_ == Thread::Current());
165  DCHECK(thread != NULL);
166  if (wait_set_ == NULL) {
167    return;
168  }
169  if (wait_set_ == thread) {
170    wait_set_ = thread->wait_next_;
171    thread->wait_next_ = NULL;
172    return;
173  }
174
175  Thread* t = wait_set_;
176  while (t->wait_next_ != NULL) {
177    if (t->wait_next_ == thread) {
178      t->wait_next_ = thread->wait_next_;
179      thread->wait_next_ = NULL;
180      return;
181    }
182    t = t->wait_next_;
183  }
184}
185
186Object* Monitor::GetObject() {
187  return obj_;
188}
189
190void Monitor::Lock(Thread* self) {
191  if (owner_ == self) {
192    lock_count_++;
193    return;
194  }
195
196  if (!monitor_lock_.TryLock(self)) {
197    uint64_t waitStart = 0;
198    uint64_t waitEnd = 0;
199    uint32_t wait_threshold = lock_profiling_threshold_;
200    const AbstractMethod* current_locking_method = NULL;
201    uint32_t current_locking_dex_pc = 0;
202    {
203      ScopedThreadStateChange tsc(self, kBlocked);
204      if (wait_threshold != 0) {
205        waitStart = NanoTime() / 1000;
206      }
207      current_locking_method = locking_method_;
208      current_locking_dex_pc = locking_dex_pc_;
209
210      monitor_lock_.Lock(self);
211      if (wait_threshold != 0) {
212        waitEnd = NanoTime() / 1000;
213      }
214    }
215
216    if (wait_threshold != 0) {
217      uint64_t wait_ms = (waitEnd - waitStart) / 1000;
218      uint32_t sample_percent;
219      if (wait_ms >= wait_threshold) {
220        sample_percent = 100;
221      } else {
222        sample_percent = 100 * wait_ms / wait_threshold;
223      }
224      if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
225        const char* current_locking_filename;
226        uint32_t current_locking_line_number;
227        TranslateLocation(current_locking_method, current_locking_dex_pc,
228                          current_locking_filename, current_locking_line_number);
229        LogContentionEvent(self, wait_ms, sample_percent, current_locking_filename, current_locking_line_number);
230      }
231    }
232  }
233  owner_ = self;
234  DCHECK_EQ(lock_count_, 0);
235
236  // When debugging, save the current monitor holder for future
237  // acquisition failures to use in sampled logging.
238  if (lock_profiling_threshold_ != 0) {
239    locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
240  }
241}
242
243static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
244                                              __attribute__((format(printf, 1, 2)));
245
246static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
247    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
248  va_list args;
249  va_start(args, fmt);
250  Thread::Current()->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args);
251  if (!Runtime::Current()->IsStarted()) {
252    std::ostringstream ss;
253    Thread::Current()->Dump(ss);
254    std::string str(ss.str());
255    LOG(ERROR) << "IllegalMonitorStateException: " << str;
256  }
257  va_end(args);
258}
259
260static std::string ThreadToString(Thread* thread) {
261  if (thread == NULL) {
262    return "NULL";
263  }
264  std::ostringstream oss;
265  // TODO: alternatively, we could just return the thread's name.
266  oss << *thread;
267  return oss.str();
268}
269
270void Monitor::FailedUnlock(Object* o, Thread* expected_owner, Thread* found_owner,
271                           Monitor* monitor) {
272  Thread* current_owner = NULL;
273  std::string current_owner_string;
274  std::string expected_owner_string;
275  std::string found_owner_string;
276  {
277    // TODO: isn't this too late to prevent threads from disappearing?
278    // Acquire thread list lock so threads won't disappear from under us.
279    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
280    // Re-read owner now that we hold lock.
281    current_owner = (monitor != NULL) ? monitor->owner_ : NULL;
282    // Get short descriptions of the threads involved.
283    current_owner_string = ThreadToString(current_owner);
284    expected_owner_string = ThreadToString(expected_owner);
285    found_owner_string = ThreadToString(found_owner);
286  }
287  if (current_owner == NULL) {
288    if (found_owner == NULL) {
289      ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
290                                         " on thread '%s'",
291                                         PrettyTypeOf(o).c_str(),
292                                         expected_owner_string.c_str());
293    } else {
294      // Race: the original read found an owner but now there is none
295      ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
296                                         " (where now the monitor appears unowned) on thread '%s'",
297                                         found_owner_string.c_str(),
298                                         PrettyTypeOf(o).c_str(),
299                                         expected_owner_string.c_str());
300    }
301  } else {
302    if (found_owner == NULL) {
303      // Race: originally there was no owner, there is now
304      ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
305                                         " (originally believed to be unowned) on thread '%s'",
306                                         current_owner_string.c_str(),
307                                         PrettyTypeOf(o).c_str(),
308                                         expected_owner_string.c_str());
309    } else {
310      if (found_owner != current_owner) {
311        // Race: originally found and current owner have changed
312        ThrowIllegalMonitorStateExceptionF("unlock of monitor originally owned by '%s' (now"
313                                           " owned by '%s') on object of type '%s' on thread '%s'",
314                                           found_owner_string.c_str(),
315                                           current_owner_string.c_str(),
316                                           PrettyTypeOf(o).c_str(),
317                                           expected_owner_string.c_str());
318      } else {
319        ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
320                                           " on thread '%s",
321                                           current_owner_string.c_str(),
322                                           PrettyTypeOf(o).c_str(),
323                                           expected_owner_string.c_str());
324      }
325    }
326  }
327}
328
329bool Monitor::Unlock(Thread* self, bool for_wait) {
330  DCHECK(self != NULL);
331  Thread* owner = owner_;
332  if (owner == self) {
333    // We own the monitor, so nobody else can be in here.
334    if (lock_count_ == 0) {
335      owner_ = NULL;
336      locking_method_ = NULL;
337      locking_dex_pc_ = 0;
338      monitor_lock_.Unlock(self);
339    } else {
340      --lock_count_;
341    }
342  } else if (for_wait) {
343    // Wait should have already cleared the fields.
344    DCHECK_EQ(lock_count_, 0);
345    DCHECK(owner == NULL);
346    DCHECK(locking_method_ == NULL);
347    DCHECK_EQ(locking_dex_pc_, 0u);
348    monitor_lock_.Unlock(self);
349  } else {
350    // We don't own this, so we're not allowed to unlock it.
351    // The JNI spec says that we should throw IllegalMonitorStateException
352    // in this case.
353    FailedUnlock(obj_, self, owner, this);
354    return false;
355  }
356  return true;
357}
358
359/*
360 * Wait on a monitor until timeout, interrupt, or notification.  Used for
361 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
362 *
363 * If another thread calls Thread.interrupt(), we throw InterruptedException
364 * and return immediately if one of the following are true:
365 *  - blocked in wait(), wait(long), or wait(long, int) methods of Object
366 *  - blocked in join(), join(long), or join(long, int) methods of Thread
367 *  - blocked in sleep(long), or sleep(long, int) methods of Thread
368 * Otherwise, we set the "interrupted" flag.
369 *
370 * Checks to make sure that "ns" is in the range 0-999999
371 * (i.e. fractions of a millisecond) and throws the appropriate
372 * exception if it isn't.
373 *
374 * The spec allows "spurious wakeups", and recommends that all code using
375 * Object.wait() do so in a loop.  This appears to derive from concerns
376 * about pthread_cond_wait() on multiprocessor systems.  Some commentary
377 * on the web casts doubt on whether these can/should occur.
378 *
379 * Since we're allowed to wake up "early", we clamp extremely long durations
380 * to return at the end of the 32-bit time epoch.
381 */
382void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
383  DCHECK(self != NULL);
384
385  // Make sure that we hold the lock.
386  if (owner_ != self) {
387    ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
388    return;
389  }
390  monitor_lock_.AssertHeld(self);
391  WaitWithLock(self, ms, ns, interruptShouldThrow);
392}
393
394void Monitor::WaitWithLock(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
395  // Enforce the timeout range.
396  if (ms < 0 || ns < 0 || ns > 999999) {
397    Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
398        "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
399    return;
400  }
401
402  /*
403   * Add ourselves to the set of threads waiting on this monitor, and
404   * release our hold.  We need to let it go even if we're a few levels
405   * deep in a recursive lock, and we need to restore that later.
406   *
407   * We append to the wait set ahead of clearing the count and owner
408   * fields so the subroutine can check that the calling thread owns
409   * the monitor.  Aside from that, the order of member updates is
410   * not order sensitive as we hold the pthread mutex.
411   */
412  AppendToWaitSet(self);
413  int prev_lock_count = lock_count_;
414  lock_count_ = 0;
415  owner_ = NULL;
416  const AbstractMethod* saved_method = locking_method_;
417  locking_method_ = NULL;
418  uintptr_t saved_dex_pc = locking_dex_pc_;
419  locking_dex_pc_ = 0;
420
421  /*
422   * Update thread status.  If the GC wakes up, it'll ignore us, knowing
423   * that we won't touch any references in this state, and we'll check
424   * our suspend mode before we transition out.
425   */
426  bool timed = (ms != 0 || ns != 0);
427  self->TransitionFromRunnableToSuspended(timed ? kTimedWaiting : kWaiting);
428
429  bool wasInterrupted = false;
430  {
431    // Pseudo-atomically wait on self's wait_cond_ and release the monitor lock.
432    MutexLock mu(self, *self->wait_mutex_);
433
434    // Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
435    // non-NULL a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
436    // up.
437    DCHECK(self->wait_monitor_ == NULL);
438    self->wait_monitor_ = this;
439
440    // Release the monitor lock.
441    Unlock(self, true);
442
443    /*
444     * Handle the case where the thread was interrupted before we called
445     * wait().
446     */
447    if (self->interrupted_) {
448      wasInterrupted = true;
449    } else {
450      // Wait for a notification or a timeout to occur.
451      if (!timed) {
452        self->wait_cond_->Wait(self);
453      } else {
454        self->wait_cond_->TimedWait(self, ms, ns);
455      }
456      if (self->interrupted_) {
457        wasInterrupted = true;
458      }
459      self->interrupted_ = false;
460    }
461    self->wait_monitor_ = NULL;
462  }
463
464  // Set self->status back to kRunnable, and self-suspend if needed.
465  self->TransitionFromSuspendedToRunnable();
466
467  // Re-acquire the monitor lock.
468  Lock(self);
469
470
471  self->wait_mutex_->AssertNotHeld(self);
472
473  /*
474   * We remove our thread from wait set after restoring the count
475   * and owner fields so the subroutine can check that the calling
476   * thread owns the monitor. Aside from that, the order of member
477   * updates is not order sensitive as we hold the pthread mutex.
478   */
479  owner_ = self;
480  lock_count_ = prev_lock_count;
481  locking_method_ = saved_method;
482  locking_dex_pc_ = saved_dex_pc;
483  RemoveFromWaitSet(self);
484
485  if (wasInterrupted) {
486    /*
487     * We were interrupted while waiting, or somebody interrupted an
488     * un-interruptible thread earlier and we're bailing out immediately.
489     *
490     * The doc sayeth: "The interrupted status of the current thread is
491     * cleared when this exception is thrown."
492     */
493    {
494      MutexLock mu(self, *self->wait_mutex_);
495      self->interrupted_ = false;
496    }
497    if (interruptShouldThrow) {
498      Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
499    }
500  }
501}
502
503void Monitor::Notify(Thread* self) {
504  DCHECK(self != NULL);
505  // Make sure that we hold the lock.
506  if (owner_ != self) {
507    ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
508    return;
509  }
510  monitor_lock_.AssertHeld(self);
511  NotifyWithLock(self);
512}
513
514void Monitor::NotifyWithLock(Thread* self) {
515  // Signal the first waiting thread in the wait set.
516  while (wait_set_ != NULL) {
517    Thread* thread = wait_set_;
518    wait_set_ = thread->wait_next_;
519    thread->wait_next_ = NULL;
520
521    // Check to see if the thread is still waiting.
522    MutexLock mu(self, *thread->wait_mutex_);
523    if (thread->wait_monitor_ != NULL) {
524      thread->wait_cond_->Signal(self);
525      return;
526    }
527  }
528}
529
530void Monitor::NotifyAll(Thread* self) {
531  DCHECK(self != NULL);
532  // Make sure that we hold the lock.
533  if (owner_ != self) {
534    ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
535    return;
536  }
537  monitor_lock_.AssertHeld(self);
538  NotifyAllWithLock();
539}
540
541void Monitor::NotifyAllWithLock() {
542  // Signal all threads in the wait set.
543  while (wait_set_ != NULL) {
544    Thread* thread = wait_set_;
545    wait_set_ = thread->wait_next_;
546    thread->wait_next_ = NULL;
547    thread->Notify();
548  }
549}
550
551/*
552 * Changes the shape of a monitor from thin to fat, preserving the
553 * internal lock state. The calling thread must own the lock.
554 */
555void Monitor::Inflate(Thread* self, Object* obj) {
556  DCHECK(self != NULL);
557  DCHECK(obj != NULL);
558  DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
559  DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->GetThinLockId()));
560
561  // Allocate and acquire a new monitor.
562  Monitor* m = new Monitor(self, obj);
563  VLOG(monitor) << "monitor: thread " << self->GetThinLockId()
564                << " created monitor " << m << " for object " << obj;
565  Runtime::Current()->GetMonitorList()->Add(m);
566}
567
568void Monitor::MonitorEnter(Thread* self, Object* obj) {
569  volatile int32_t* thinp = obj->GetRawLockWordAddress();
570  timespec tm;
571  uint32_t sleepDelayNs;
572  uint32_t minSleepDelayNs = 1000000;  /* 1 millisecond */
573  uint32_t maxSleepDelayNs = 1000000000;  /* 1 second */
574  uint32_t thin, newThin;
575
576  DCHECK(self != NULL);
577  DCHECK(obj != NULL);
578  uint32_t threadId = self->GetThinLockId();
579 retry:
580  thin = *thinp;
581  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
582    /*
583     * The lock is a thin lock.  The owner field is used to
584     * determine the acquire method, ordered by cost.
585     */
586    if (LW_LOCK_OWNER(thin) == threadId) {
587      /*
588       * The calling thread owns the lock.  Increment the
589       * value of the recursion count field.
590       */
591      *thinp += 1 << LW_LOCK_COUNT_SHIFT;
592      if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
593        /*
594         * The reacquisition limit has been reached.  Inflate
595         * the lock so the next acquire will not overflow the
596         * recursion count field.
597         */
598        Inflate(self, obj);
599      }
600    } else if (LW_LOCK_OWNER(thin) == 0) {
601      // The lock is unowned. Install the thread id of the calling thread into the owner field.
602      // This is the common case: compiled code will have tried this before calling back into
603      // the runtime.
604      newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
605      if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
606        // The acquire failed. Try again.
607        goto retry;
608      }
609    } else {
610      VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p (a %s) owned by %d",
611                                    threadId, thinp, PrettyTypeOf(obj).c_str(), LW_LOCK_OWNER(thin));
612      // The lock is owned by another thread. Notify the runtime that we are about to wait.
613      self->monitor_enter_object_ = obj;
614      self->TransitionFromRunnableToSuspended(kBlocked);
615      // Spin until the thin lock is released or inflated.
616      sleepDelayNs = 0;
617      for (;;) {
618        thin = *thinp;
619        // Check the shape of the lock word. Another thread
620        // may have inflated the lock while we were waiting.
621        if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
622          if (LW_LOCK_OWNER(thin) == 0) {
623            // The lock has been released. Install the thread id of the
624            // calling thread into the owner field.
625            newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
626            if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
627              // The acquire succeed. Break out of the loop and proceed to inflate the lock.
628              break;
629            }
630          } else {
631            // The lock has not been released. Yield so the owning thread can run.
632            if (sleepDelayNs == 0) {
633              sched_yield();
634              sleepDelayNs = minSleepDelayNs;
635            } else {
636              tm.tv_sec = 0;
637              tm.tv_nsec = sleepDelayNs;
638              nanosleep(&tm, NULL);
639              // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
640              if (sleepDelayNs < maxSleepDelayNs / 2) {
641                sleepDelayNs *= 2;
642              } else {
643                sleepDelayNs = minSleepDelayNs;
644              }
645            }
646          }
647        } else {
648          // The thin lock was inflated by another thread. Let the runtime know we are no longer
649          // waiting and try again.
650          VLOG(monitor) << StringPrintf("monitor: thread %d found lock %p surprise-fattened by another thread", threadId, thinp);
651          self->monitor_enter_object_ = NULL;
652          self->TransitionFromSuspendedToRunnable();
653          goto retry;
654        }
655      }
656      VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p done", threadId, thinp);
657      // We have acquired the thin lock. Let the runtime know that we are no longer waiting.
658      self->monitor_enter_object_ = NULL;
659      self->TransitionFromSuspendedToRunnable();
660      // Fatten the lock.
661      Inflate(self, obj);
662      VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p", threadId, thinp);
663    }
664  } else {
665    // The lock is a fat lock.
666    VLOG(monitor) << StringPrintf("monitor: thread %d locking fat lock %p (%p) %p on a %s",
667                                  threadId, thinp, LW_MONITOR(*thinp),
668                                  reinterpret_cast<void*>(*thinp), PrettyTypeOf(obj).c_str());
669    DCHECK(LW_MONITOR(*thinp) != NULL);
670    LW_MONITOR(*thinp)->Lock(self);
671  }
672}
673
674bool Monitor::MonitorExit(Thread* self, Object* obj) {
675  volatile int32_t* thinp = obj->GetRawLockWordAddress();
676
677  DCHECK(self != NULL);
678  //DCHECK_EQ(self->GetState(), kRunnable);
679  DCHECK(obj != NULL);
680
681  /*
682   * Cache the lock word as its value can change while we are
683   * examining its state.
684   */
685  uint32_t thin = *thinp;
686  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
687    /*
688     * The lock is thin.  We must ensure that the lock is owned
689     * by the given thread before unlocking it.
690     */
691    if (LW_LOCK_OWNER(thin) == self->GetThinLockId()) {
692      /*
693       * We are the lock owner.  It is safe to update the lock
694       * without CAS as lock ownership guards the lock itself.
695       */
696      if (LW_LOCK_COUNT(thin) == 0) {
697        /*
698         * The lock was not recursively acquired, the common
699         * case.  Unlock by clearing all bits except for the
700         * hash state.
701         */
702        thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
703        android_atomic_release_store(thin, thinp);
704      } else {
705        /*
706         * The object was recursively acquired.  Decrement the
707         * lock recursion count field.
708         */
709        *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
710      }
711    } else {
712      /*
713       * We do not own the lock.  The JVM spec requires that we
714       * throw an exception in this case.
715       */
716      FailedUnlock(obj, self, NULL, NULL);
717      return false;
718    }
719  } else {
720    /*
721     * The lock is fat.  We must check to see if Unlock has
722     * raised any exceptions before continuing.
723     */
724    DCHECK(LW_MONITOR(*thinp) != NULL);
725    if (!LW_MONITOR(*thinp)->Unlock(self, false)) {
726      // An exception has been raised.  Do not fall through.
727      return false;
728    }
729  }
730  return true;
731}
732
733/*
734 * Object.wait().  Also called for class init.
735 */
736void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
737  volatile int32_t* thinp = obj->GetRawLockWordAddress();
738
739  // If the lock is still thin, we need to fatten it.
740  uint32_t thin = *thinp;
741  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
742    // Make sure that 'self' holds the lock.
743    if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
744      ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
745      return;
746    }
747
748    /* This thread holds the lock.  We need to fatten the lock
749     * so 'self' can block on it.  Don't update the object lock
750     * field yet, because 'self' needs to acquire the lock before
751     * any other thread gets a chance.
752     */
753    Inflate(self, obj);
754    VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p by wait()", self->GetThinLockId(), thinp);
755  }
756  LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
757}
758
759void Monitor::Notify(Thread* self, Object *obj) {
760  uint32_t thin = *obj->GetRawLockWordAddress();
761
762  // If the lock is still thin, there aren't any waiters;
763  // waiting on an object forces lock fattening.
764  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
765    // Make sure that 'self' holds the lock.
766    if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
767      ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
768      return;
769    }
770    // no-op;  there are no waiters to notify.
771    Inflate(self, obj);
772  } else {
773    // It's a fat lock.
774    LW_MONITOR(thin)->Notify(self);
775  }
776}
777
778void Monitor::NotifyAll(Thread* self, Object *obj) {
779  uint32_t thin = *obj->GetRawLockWordAddress();
780
781  // If the lock is still thin, there aren't any waiters;
782  // waiting on an object forces lock fattening.
783  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
784    // Make sure that 'self' holds the lock.
785    if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
786      ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
787      return;
788    }
789    // no-op;  there are no waiters to notify.
790    Inflate(self, obj);
791  } else {
792    // It's a fat lock.
793    LW_MONITOR(thin)->NotifyAll(self);
794  }
795}
796
797uint32_t Monitor::GetThinLockId(uint32_t raw_lock_word) {
798  if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
799    return LW_LOCK_OWNER(raw_lock_word);
800  } else {
801    Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
802    return owner ? owner->GetThinLockId() : 0;
803  }
804}
805
806static uint32_t LockOwnerFromThreadLock(Object* thread_lock) {
807  ScopedObjectAccess soa(Thread::Current());
808  if (thread_lock == NULL ||
809      thread_lock->GetClass() != soa.Decode<Class*>(WellKnownClasses::java_lang_ThreadLock)) {
810    return ThreadList::kInvalidId;
811  }
812  Field* thread_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadLock_thread);
813  Object* managed_thread = thread_field->GetObject(thread_lock);
814  if (managed_thread == NULL) {
815    return ThreadList::kInvalidId;
816  }
817  Field* vmData_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_vmData);
818  uintptr_t vmData = static_cast<uintptr_t>(vmData_field->GetInt(managed_thread));
819  Thread* thread = reinterpret_cast<Thread*>(vmData);
820  if (thread == NULL) {
821    return ThreadList::kInvalidId;
822  }
823  return thread->GetThinLockId();
824}
825
826void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
827  ThreadState state;
828  state = thread->GetState();
829
830  Object* object = NULL;
831  uint32_t lock_owner = ThreadList::kInvalidId;
832  if (state == kWaiting || state == kTimedWaiting) {
833    os << "  - waiting on ";
834    Monitor* monitor;
835    {
836      MutexLock mu(Thread::Current(), *thread->wait_mutex_);
837      monitor = thread->wait_monitor_;
838    }
839    if (monitor != NULL) {
840      object = monitor->obj_;
841    }
842    lock_owner = LockOwnerFromThreadLock(object);
843  } else if (state == kBlocked) {
844    os << "  - waiting to lock ";
845    object = thread->monitor_enter_object_;
846    if (object != NULL) {
847      lock_owner = object->GetThinLockId();
848    }
849  } else {
850    // We're not waiting on anything.
851    return;
852  }
853
854  // - waiting on <0x613f83d8> (a java.lang.ThreadLock) held by thread 5
855  // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
856  os << "<" << object << "> (a " << PrettyTypeOf(object) << ")";
857
858  if (lock_owner != ThreadList::kInvalidId) {
859    os << " held by thread " << lock_owner;
860  }
861
862  os << "\n";
863}
864
865static void DumpLockedObject(std::ostream& os, Object* o)
866    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
867  os << "  - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n";
868}
869
870void Monitor::DescribeLocks(std::ostream& os, StackVisitor* stack_visitor) {
871  AbstractMethod* m = stack_visitor->GetMethod();
872  CHECK(m != NULL);
873
874  // Native methods are an easy special case.
875  // TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
876  if (m->IsNative()) {
877    if (m->IsSynchronized()) {
878      Object* jni_this = stack_visitor->GetCurrentSirt()->GetReference(0);
879      DumpLockedObject(os, jni_this);
880    }
881    return;
882  }
883
884  // Proxy methods should not be synchronized.
885  if (m->IsProxyMethod()) {
886    CHECK(!m->IsSynchronized());
887    return;
888  }
889
890  // <clinit> is another special case. The runtime holds the class lock while calling <clinit>.
891  MethodHelper mh(m);
892  if (mh.IsClassInitializer()) {
893    DumpLockedObject(os, m->GetDeclaringClass());
894    // Fall through because there might be synchronization in the user code too.
895  }
896
897  // Is there any reason to believe there's any synchronization in this method?
898  const DexFile::CodeItem* code_item = mh.GetCodeItem();
899  CHECK(code_item != NULL) << PrettyMethod(m);
900  if (code_item->tries_size_ == 0) {
901    return; // No "tries" implies no synchronization, so no held locks to report.
902  }
903
904  // Ask the verifier for the dex pcs of all the monitor-enter instructions corresponding to
905  // the locks held in this stack frame.
906  std::vector<uint32_t> monitor_enter_dex_pcs;
907  verifier::MethodVerifier::FindLocksAtDexPc(m, stack_visitor->GetDexPc(), monitor_enter_dex_pcs);
908  if (monitor_enter_dex_pcs.empty()) {
909    return;
910  }
911
912  // Verification is an iterative process, so it can visit the same monitor-enter instruction
913  // repeatedly with increasingly accurate type information. We don't want duplicates.
914  // TODO: is this fixed if we share the other std::vector-returning verifier code?
915  STLSortAndRemoveDuplicates(&monitor_enter_dex_pcs);
916
917  for (size_t i = 0; i < monitor_enter_dex_pcs.size(); ++i) {
918    // The verifier works in terms of the dex pcs of the monitor-enter instructions.
919    // We want the registers used by those instructions (so we can read the values out of them).
920    uint32_t dex_pc = monitor_enter_dex_pcs[i];
921    uint16_t monitor_enter_instruction = code_item->insns_[dex_pc];
922
923    // Quick sanity check.
924    if ((monitor_enter_instruction & 0xff) != Instruction::MONITOR_ENTER) {
925      LOG(FATAL) << "expected monitor-enter @" << dex_pc << "; was "
926                 << reinterpret_cast<void*>(monitor_enter_instruction);
927    }
928
929    uint16_t monitor_register = ((monitor_enter_instruction >> 8) & 0xff);
930    Object* o = reinterpret_cast<Object*>(stack_visitor->GetVReg(m, monitor_register,
931                                                                 kReferenceVReg));
932    DumpLockedObject(os, o);
933  }
934}
935
936void Monitor::TranslateLocation(const AbstractMethod* method, uint32_t dex_pc,
937                                const char*& source_file, uint32_t& line_number) const {
938  // If method is null, location is unknown
939  if (method == NULL) {
940    source_file = "";
941    line_number = 0;
942    return;
943  }
944  MethodHelper mh(method);
945  source_file = mh.GetDeclaringClassSourceFile();
946  if (source_file == NULL) {
947    source_file = "";
948  }
949  line_number = mh.GetLineNumFromDexPC(dex_pc);
950}
951
952MonitorList::MonitorList() : monitor_list_lock_("MonitorList lock") {
953}
954
955MonitorList::~MonitorList() {
956  MutexLock mu(Thread::Current(), monitor_list_lock_);
957  STLDeleteElements(&list_);
958}
959
960void MonitorList::Add(Monitor* m) {
961  MutexLock mu(Thread::Current(), monitor_list_lock_);
962  list_.push_front(m);
963}
964
965void MonitorList::SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) {
966  MutexLock mu(Thread::Current(), monitor_list_lock_);
967  typedef std::list<Monitor*>::iterator It; // TODO: C++0x auto
968  It it = list_.begin();
969  while (it != list_.end()) {
970    Monitor* m = *it;
971    if (!is_marked(m->GetObject(), arg)) {
972      VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object " << m->GetObject();
973      delete m;
974      it = list_.erase(it);
975    } else {
976      ++it;
977    }
978  }
979}
980
981}  // namespace art
982