Sync.cpp revision fd54266907c3046339b48748f63afa32ee3ab4e1
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18 * Fundamental synchronization mechanisms.
19 *
20 * The top part of the file has operations on "monitor" structs; the
21 * next part has the native calls on objects.
22 *
23 * The current implementation uses "thin locking" to avoid allocating
24 * an Object's full Monitor struct until absolutely necessary (i.e.,
25 * during contention or a call to wait()).
26 *
27 * TODO: make improvements to thin locking
28 * We may be able to improve performance and reduce memory requirements by:
29 *  - reverting to a thin lock once the Monitor is no longer necessary
30 *  - using a pool of monitor objects, with some sort of recycling scheme
31 *
32 * TODO: recycle native-level monitors when objects are garbage collected.
33 */
34#include "Dalvik.h"
35
36#include <stdlib.h>
37#include <unistd.h>
38#include <pthread.h>
39#include <time.h>
40#include <sys/time.h>
41#include <errno.h>
42
43#define LOG_THIN    LOGV
44
45#ifdef WITH_DEADLOCK_PREDICTION     /* fwd */
46static const char* kStartBanner =
47    "<-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#";
48static const char* kEndBanner =
49    "#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#->";
50
51/*
52 * Unsorted, expanding list of objects.
53 *
54 * This is very similar to PointerSet (which came into existence after this),
55 * but these are unsorted, uniqueness is not enforced by the "add" function,
56 * and the base object isn't allocated on the heap.
57 */
58typedef struct ExpandingObjectList {
59    u2          alloc;
60    u2          count;
61    Object**    list;
62} ExpandingObjectList;
63
64/* fwd */
65static void updateDeadlockPrediction(Thread* self, Object* obj);
66static void removeCollectedObject(Object* obj);
67static void expandObjClear(ExpandingObjectList* pList);
68#endif
69
70/*
71 * Every Object has a monitor associated with it, but not every Object is
72 * actually locked.  Even the ones that are locked do not need a
73 * full-fledged monitor until a) there is actual contention or b) wait()
74 * is called on the Object.
75 *
76 * For Dalvik, we have implemented a scheme similar to the one described
77 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
78 * (ACM 1998).  Things are even easier for us, though, because we have
79 * a full 32 bits to work with.
80 *
81 * The two states of an Object's lock are referred to as "thin" and
82 * "fat".  A lock may transition from the "thin" state to the "fat"
83 * state and this transition is referred to as inflation.  Once a lock
84 * has been inflated it remains in the "fat" state indefinitely.
85 *
86 * The lock value itself is stored in Object.lock.  The LSB of the
87 * lock encodes its state.  When cleared, the lock is in the "thin"
88 * state and its bits are formatted as follows:
89 *
90 *    [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
91 *     lock count   thread id  hash state  0
92 *
93 * When set, the lock is in the "fat" state and its bits are formatted
94 * as follows:
95 *
96 *    [31 ---- 3] [2 ---- 1] [0]
97 *      pointer   hash state  1
98 *
99 * For an in-depth description of the mechanics of thin-vs-fat locking,
100 * read the paper referred to above.
101 */
102
103/*
104 * Monitors provide:
105 *  - mutually exclusive access to resources
106 *  - a way for multiple threads to wait for notification
107 *
108 * In effect, they fill the role of both mutexes and condition variables.
109 *
110 * Only one thread can own the monitor at any time.  There may be several
111 * threads waiting on it (the wait call unlocks it).  One or more waiting
112 * threads may be getting interrupted or notified at any given time.
113 */
114struct Monitor {
115    Thread*     owner;          /* which thread currently owns the lock? */
116    int         lockCount;      /* owner's recursive lock depth */
117    Object*     obj;            /* what object are we part of [debug only] */
118
119    Thread*     waitSet;	/* threads currently waiting on this monitor */
120
121    pthread_mutex_t lock;
122
123    Monitor*    next;
124
125#ifdef WITH_DEADLOCK_PREDICTION
126    /*
127     * Objects that have been locked immediately after this one in the
128     * past.  We use an expanding flat array, allocated on first use, to
129     * minimize allocations.  Deletions from the list, expected to be
130     * infrequent, are crunched down.
131     */
132    ExpandingObjectList historyChildren;
133
134    /*
135     * We also track parents.  This isn't strictly necessary, but it makes
136     * the cleanup at GC time significantly faster.
137     */
138    ExpandingObjectList historyParents;
139
140    /* used during cycle detection */
141    bool        historyMark;
142
143    /* stack trace, established the first time we locked the object */
144    int         historyStackDepth;
145    int*        historyRawStackTrace;
146#endif
147};
148
149
150/*
151 * Create and initialize a monitor.
152 */
153Monitor* dvmCreateMonitor(Object* obj)
154{
155    Monitor* mon;
156
157    mon = (Monitor*) calloc(1, sizeof(Monitor));
158    if (mon == NULL) {
159        LOGE("Unable to allocate monitor\n");
160        dvmAbort();
161    }
162    if (((u4)mon & 7) != 0) {
163        LOGE("Misaligned monitor: %p\n", mon);
164        dvmAbort();
165    }
166    mon->obj = obj;
167    dvmInitMutex(&mon->lock);
168
169    /* replace the head of the list with the new monitor */
170    do {
171        mon->next = gDvm.monitorList;
172    } while (!ATOMIC_CMP_SWAP((int32_t*)(void*)&gDvm.monitorList,
173                              (int32_t)mon->next, (int32_t)mon));
174
175    return mon;
176}
177
178/*
179 * Free the monitor list.  Only used when shutting the VM down.
180 */
181void dvmFreeMonitorList(void)
182{
183    Monitor* mon;
184    Monitor* nextMon;
185
186    mon = gDvm.monitorList;
187    while (mon != NULL) {
188        nextMon = mon->next;
189
190#ifdef WITH_DEADLOCK_PREDICTION
191        expandObjClear(&mon->historyChildren);
192        expandObjClear(&mon->historyParents);
193        free(mon->historyRawStackTrace);
194#endif
195        free(mon);
196        mon = nextMon;
197    }
198}
199
200/*
201 * Log some info about our monitors.
202 */
203void dvmDumpMonitorInfo(const char* msg)
204{
205#if QUIET_ZYGOTE_MONITOR
206    if (gDvm.zygote) {
207        return;
208    }
209#endif
210
211    int totalCount;
212    int liveCount;
213
214    totalCount = liveCount = 0;
215    Monitor* mon = gDvm.monitorList;
216    while (mon != NULL) {
217        totalCount++;
218        if (mon->obj != NULL)
219            liveCount++;
220        mon = mon->next;
221    }
222
223    LOGD("%s: monitor list has %d entries (%d live)\n",
224        msg, totalCount, liveCount);
225}
226
227/*
228 * Get the object that a monitor is part of.
229 */
230Object* dvmGetMonitorObject(Monitor* mon)
231{
232    if (mon == NULL)
233        return NULL;
234    else
235        return mon->obj;
236}
237
238/*
239 * Returns the thread id of the thread owning the given lock.
240 */
241static u4 lockOwner(Object* obj)
242{
243    Thread *owner;
244    u4 lock;
245
246    assert(obj != NULL);
247    /*
248     * Since we're reading the lock value multiple times, latch it so
249     * that it doesn't change out from under us if we get preempted.
250     */
251    lock = obj->lock;
252    if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
253        return LW_LOCK_OWNER(lock);
254    } else {
255        owner = LW_MONITOR(lock)->owner;
256        return owner ? owner->threadId : 0;
257    }
258}
259
260/*
261 * Get the thread that holds the lock on the specified object.  The
262 * object may be unlocked, thin-locked, or fat-locked.
263 *
264 * The caller must lock the thread list before calling here.
265 */
266Thread* dvmGetObjectLockHolder(Object* obj)
267{
268    u4 threadId = lockOwner(obj);
269
270    if (threadId == 0)
271        return NULL;
272    return dvmGetThreadByThreadId(threadId);
273}
274
275/*
276 * Checks whether the given thread holds the given
277 * objects's lock.
278 */
279bool dvmHoldsLock(Thread* thread, Object* obj)
280{
281    if (thread == NULL || obj == NULL) {
282        return false;
283    } else {
284        return thread->threadId == lockOwner(obj);
285    }
286}
287
288/*
289 * Free the monitor associated with an object and make the object's lock
290 * thin again.  This is called during garbage collection.
291 */
292static void freeObjectMonitor(Object* obj)
293{
294    Monitor *mon;
295
296    assert(LW_SHAPE(obj->lock) == LW_SHAPE_FAT);
297
298#ifdef WITH_DEADLOCK_PREDICTION
299    if (gDvm.deadlockPredictMode != kDPOff)
300        removeCollectedObject(obj);
301#endif
302
303    mon = LW_MONITOR(obj->lock);
304    obj->lock = DVM_LOCK_INITIAL_THIN_VALUE;
305
306    /* This lock is associated with an object
307     * that's being swept.  The only possible way
308     * anyone could be holding this lock would be
309     * if some JNI code locked but didn't unlock
310     * the object, in which case we've got some bad
311     * native code somewhere.
312     */
313    assert(pthread_mutex_trylock(&mon->lock) == 0);
314    pthread_mutex_destroy(&mon->lock);
315#ifdef WITH_DEADLOCK_PREDICTION
316    expandObjClear(&mon->historyChildren);
317    expandObjClear(&mon->historyParents);
318    free(mon->historyRawStackTrace);
319#endif
320    free(mon);
321}
322
323/*
324 * Frees monitor objects belonging to unmarked objects.
325 */
326void dvmSweepMonitorList(Monitor** mon, int (*isUnmarkedObject)(void*))
327{
328    Monitor handle;
329    Monitor *prev, *curr;
330    Object *obj;
331
332    assert(mon != NULL);
333    assert(isUnmarkedObject != NULL);
334    prev = &handle;
335    prev->next = curr = *mon;
336    while (curr != NULL) {
337        obj = curr->obj;
338        if (obj != NULL && (*isUnmarkedObject)(obj) != 0) {
339            prev->next = curr = curr->next;
340            freeObjectMonitor(obj);
341        } else {
342            prev = curr;
343            curr = curr->next;
344        }
345    }
346    *mon = handle.next;
347}
348
349/*
350 * Lock a monitor.
351 */
352static void lockMonitor(Thread* self, Monitor* mon)
353{
354    int cc;
355
356    if (mon->owner == self) {
357        mon->lockCount++;
358    } else {
359        ThreadStatus oldStatus;
360
361        if (pthread_mutex_trylock(&mon->lock) != 0) {
362            /* mutex is locked, switch to wait status and sleep on it */
363            oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
364            cc = pthread_mutex_lock(&mon->lock);
365            assert(cc == 0);
366            dvmChangeStatus(self, oldStatus);
367        }
368
369        mon->owner = self;
370        assert(mon->lockCount == 0);
371    }
372}
373
374/*
375 * Try to lock a monitor.
376 *
377 * Returns "true" on success.
378 */
379static bool tryLockMonitor(Thread* self, Monitor* mon)
380{
381    int cc;
382
383    if (mon->owner == self) {
384        mon->lockCount++;
385        return true;
386    } else {
387        cc = pthread_mutex_trylock(&mon->lock);
388        if (cc == 0) {
389            mon->owner = self;
390            assert(mon->lockCount == 0);
391            return true;
392        } else {
393            return false;
394        }
395    }
396}
397
398
399/*
400 * Unlock a monitor.
401 *
402 * Returns true if the unlock succeeded.
403 * If the unlock failed, an exception will be pending.
404 */
405static bool unlockMonitor(Thread* self, Monitor* mon)
406{
407    assert(self != NULL);
408    assert(mon != NULL);        // can this happen?
409
410    if (mon->owner == self) {
411        /*
412         * We own the monitor, so nobody else can be in here.
413         */
414        if (mon->lockCount == 0) {
415            int cc;
416            mon->owner = NULL;
417            cc = pthread_mutex_unlock(&mon->lock);
418            assert(cc == 0);
419        } else {
420            mon->lockCount--;
421        }
422    } else {
423        /*
424         * We don't own this, so we're not allowed to unlock it.
425         * The JNI spec says that we should throw IllegalMonitorStateException
426         * in this case.
427         */
428        dvmThrowExceptionFmt("Ljava/lang/IllegalMonitorStateException;",
429                             "unlock of unowned monitor, self=%d owner=%d",
430                             self->threadId,
431                             mon->owner ? mon->owner->threadId : 0);
432        return false;
433    }
434    return true;
435}
436
437/*
438 * Checks the wait set for circular structure.  Returns 0 if the list
439 * is not circular.  Otherwise, returns 1.  Used only by asserts.
440 */
441static int waitSetCheck(Monitor *mon)
442{
443    Thread *fast, *slow;
444    size_t n;
445
446    assert(mon != NULL);
447    fast = slow = mon->waitSet;
448    n = 0;
449    for (;;) {
450        if (fast == NULL) return 0;
451        if (fast->waitNext == NULL) return 0;
452        if (fast == slow && n > 0) return 1;
453        n += 2;
454        fast = fast->waitNext->waitNext;
455        slow = slow->waitNext;
456    }
457}
458
459/*
460 * Links a thread into a monitor's wait set.  The monitor lock must be
461 * held by the caller of this routine.
462 */
463static void waitSetAppend(Monitor *mon, Thread *thread)
464{
465    Thread *elt;
466
467    assert(mon != NULL);
468    assert(mon->owner == dvmThreadSelf());
469    assert(thread != NULL);
470    assert(thread->waitNext == NULL);
471    assert(waitSetCheck(mon) == 0);
472    if (mon->waitSet == NULL) {
473        mon->waitSet = thread;
474        return;
475    }
476    elt = mon->waitSet;
477    while (elt->waitNext != NULL) {
478        elt = elt->waitNext;
479    }
480    elt->waitNext = thread;
481}
482
483/*
484 * Unlinks a thread from a monitor's wait set.  The monitor lock must
485 * be held by the caller of this routine.
486 */
487static void waitSetRemove(Monitor *mon, Thread *thread)
488{
489    Thread *elt;
490
491    assert(mon != NULL);
492    assert(mon->owner == dvmThreadSelf());
493    assert(thread != NULL);
494    assert(waitSetCheck(mon) == 0);
495    if (mon->waitSet == NULL) {
496        return;
497    }
498    if (mon->waitSet == thread) {
499        mon->waitSet = thread->waitNext;
500        thread->waitNext = NULL;
501        return;
502    }
503    elt = mon->waitSet;
504    while (elt->waitNext != NULL) {
505        if (elt->waitNext == thread) {
506            elt->waitNext = thread->waitNext;
507            thread->waitNext = NULL;
508            return;
509        }
510        elt = elt->waitNext;
511    }
512}
513
514/*
515 * Converts the given relative waiting time into an absolute time.
516 */
517void absoluteTime(s8 msec, s4 nsec, struct timespec *ts)
518{
519    s8 endSec;
520
521#ifdef HAVE_TIMEDWAIT_MONOTONIC
522    clock_gettime(CLOCK_MONOTONIC, ts);
523#else
524    {
525        struct timeval tv;
526        gettimeofday(&tv, NULL);
527        ts->tv_sec = tv.tv_sec;
528        ts->tv_nsec = tv.tv_usec * 1000;
529    }
530#endif
531    endSec = ts->tv_sec + msec / 1000;
532    if (endSec >= 0x7fffffff) {
533        LOGV("NOTE: end time exceeds epoch\n");
534        endSec = 0x7ffffffe;
535    }
536    ts->tv_sec = endSec;
537    ts->tv_nsec = (ts->tv_nsec + (msec % 1000) * 1000000) + nsec;
538
539    /* catch rollover */
540    if (ts->tv_nsec >= 1000000000L) {
541        ts->tv_sec++;
542        ts->tv_nsec -= 1000000000L;
543    }
544}
545
546int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex,
547                        s8 msec, s4 nsec)
548{
549    int ret;
550    struct timespec ts;
551    absoluteTime(msec, nsec, &ts);
552#if defined(HAVE_TIMEDWAIT_MONOTONIC)
553    ret = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
554#else
555    ret = pthread_cond_timedwait(cond, mutex, &ts);
556#endif
557    assert(ret == 0 || ret == ETIMEDOUT);
558    return ret;
559}
560
561/*
562 * Wait on a monitor until timeout, interrupt, or notification.  Used for
563 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
564 *
565 * If another thread calls Thread.interrupt(), we throw InterruptedException
566 * and return immediately if one of the following are true:
567 *  - blocked in wait(), wait(long), or wait(long, int) methods of Object
568 *  - blocked in join(), join(long), or join(long, int) methods of Thread
569 *  - blocked in sleep(long), or sleep(long, int) methods of Thread
570 * Otherwise, we set the "interrupted" flag.
571 *
572 * Checks to make sure that "nsec" is in the range 0-999999
573 * (i.e. fractions of a millisecond) and throws the appropriate
574 * exception if it isn't.
575 *
576 * The spec allows "spurious wakeups", and recommends that all code using
577 * Object.wait() do so in a loop.  This appears to derive from concerns
578 * about pthread_cond_wait() on multiprocessor systems.  Some commentary
579 * on the web casts doubt on whether these can/should occur.
580 *
581 * Since we're allowed to wake up "early", we clamp extremely long durations
582 * to return at the end of the 32-bit time epoch.
583 */
584static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
585    bool interruptShouldThrow)
586{
587    struct timespec ts;
588    bool wasInterrupted = false;
589    bool timed;
590    int ret;
591
592    assert(self != NULL);
593    assert(mon != NULL);
594
595    /* Make sure that we hold the lock. */
596    if (mon->owner != self) {
597        dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
598            "object not locked by thread before wait()");
599        return;
600    }
601
602    /*
603     * Enforce the timeout range.
604     */
605    if (msec < 0 || nsec < 0 || nsec > 999999) {
606        dvmThrowException("Ljava/lang/IllegalArgumentException;",
607            "timeout arguments out of range");
608        return;
609    }
610
611    /*
612     * Compute absolute wakeup time, if necessary.
613     */
614    if (msec == 0 && nsec == 0) {
615        timed = false;
616    } else {
617        absoluteTime(msec, nsec, &ts);
618        timed = true;
619    }
620
621    /*
622     * Add ourselves to the set of threads waiting on this monitor, and
623     * release our hold.  We need to let it go even if we're a few levels
624     * deep in a recursive lock, and we need to restore that later.
625     *
626     * We append to the wait set ahead of clearing the count and owner
627     * fields so the subroutine can check that the calling thread owns
628     * the monitor.  Aside from that, the order of member updates is
629     * not order sensitive as we hold the pthread mutex.
630     */
631    waitSetAppend(mon, self);
632    int prevLockCount = mon->lockCount;
633    mon->lockCount = 0;
634    mon->owner = NULL;
635
636    /*
637     * Update thread status.  If the GC wakes up, it'll ignore us, knowing
638     * that we won't touch any references in this state, and we'll check
639     * our suspend mode before we transition out.
640     */
641    if (timed)
642        dvmChangeStatus(self, THREAD_TIMED_WAIT);
643    else
644        dvmChangeStatus(self, THREAD_WAIT);
645
646    ret = pthread_mutex_lock(&self->waitMutex);
647    assert(ret == 0);
648
649    /*
650     * Set waitMonitor to the monitor object we will be waiting on.
651     * When waitMonitor is non-NULL a notifying or interrupting thread
652     * must signal the thread's waitCond to wake it up.
653     */
654    assert(self->waitMonitor == NULL);
655    self->waitMonitor = mon;
656
657    /*
658     * Handle the case where the thread was interrupted before we called
659     * wait().
660     */
661    if (self->interrupted) {
662        wasInterrupted = true;
663        self->waitMonitor = NULL;
664        pthread_mutex_unlock(&self->waitMutex);
665        goto done;
666    }
667
668    /*
669     * Release the monitor lock and wait for a notification or
670     * a timeout to occur.
671     */
672    pthread_mutex_unlock(&mon->lock);
673
674    if (!timed) {
675        ret = pthread_cond_wait(&self->waitCond, &self->waitMutex);
676        assert(ret == 0);
677    } else {
678#ifdef HAVE_TIMEDWAIT_MONOTONIC
679        ret = pthread_cond_timedwait_monotonic(&self->waitCond, &self->waitMutex, &ts);
680#else
681        ret = pthread_cond_timedwait(&self->waitCond, &self->waitMutex, &ts);
682#endif
683        assert(ret == 0 || ret == ETIMEDOUT);
684    }
685    if (self->interrupted) {
686        wasInterrupted = true;
687    }
688
689    self->interrupted = false;
690    self->waitMonitor = NULL;
691
692    pthread_mutex_unlock(&self->waitMutex);
693
694    /* Reacquire the monitor lock. */
695    lockMonitor(self, mon);
696
697done:
698    /*
699     * We remove our thread from wait set after restoring the count
700     * and owner fields so the subroutine can check that the calling
701     * thread owns the monitor. Aside from that, the order of member
702     * updates is not order sensitive as we hold the pthread mutex.
703     */
704    mon->owner = self;
705    mon->lockCount = prevLockCount;
706    waitSetRemove(mon, self);
707
708    /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
709    dvmChangeStatus(self, THREAD_RUNNING);
710
711    if (wasInterrupted) {
712        /*
713         * We were interrupted while waiting, or somebody interrupted an
714         * un-interruptible thread earlier and we're bailing out immediately.
715         *
716         * The doc sayeth: "The interrupted status of the current thread is
717         * cleared when this exception is thrown."
718         */
719        self->interrupted = false;
720        if (interruptShouldThrow)
721            dvmThrowException("Ljava/lang/InterruptedException;", NULL);
722    }
723}
724
725/*
726 * Notify one thread waiting on this monitor.
727 */
728static void notifyMonitor(Thread* self, Monitor* mon)
729{
730    Thread* thread;
731
732    assert(self != NULL);
733    assert(mon != NULL);
734
735    /* Make sure that we hold the lock. */
736    if (mon->owner != self) {
737        dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
738            "object not locked by thread before notify()");
739        return;
740    }
741    /* Signal the first waiting thread in the wait set. */
742    while (mon->waitSet != NULL) {
743        thread = mon->waitSet;
744        mon->waitSet = thread->waitNext;
745        thread->waitNext = NULL;
746        pthread_mutex_lock(&thread->waitMutex);
747        /* Check to see if the thread is still waiting. */
748        if (thread->waitMonitor != NULL) {
749            pthread_cond_signal(&thread->waitCond);
750            pthread_mutex_unlock(&thread->waitMutex);
751            return;
752        }
753        pthread_mutex_unlock(&thread->waitMutex);
754    }
755}
756
757/*
758 * Notify all threads waiting on this monitor.
759 */
760static void notifyAllMonitor(Thread* self, Monitor* mon)
761{
762    Thread* thread;
763
764    assert(self != NULL);
765    assert(mon != NULL);
766
767    /* Make sure that we hold the lock. */
768    if (mon->owner != self) {
769        dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
770            "object not locked by thread before notifyAll()");
771        return;
772    }
773    /* Signal all threads in the wait set. */
774    while (mon->waitSet != NULL) {
775        thread = mon->waitSet;
776        mon->waitSet = thread->waitNext;
777        thread->waitNext = NULL;
778        pthread_mutex_lock(&thread->waitMutex);
779        /* Check to see if the thread is still waiting. */
780        if (thread->waitMonitor != NULL) {
781            pthread_cond_signal(&thread->waitCond);
782        }
783        pthread_mutex_unlock(&thread->waitMutex);
784    }
785}
786
787/*
788 * Changes the shape of a monitor from thin to fat, preserving the
789 * internal lock state.  The calling thread must own the lock.
790 */
791static void inflateMonitor(Thread *self, Object *obj)
792{
793    Monitor *mon;
794    u4 thin;
795
796    assert(self != NULL);
797    assert(obj != NULL);
798    assert(LW_SHAPE(obj->lock) == LW_SHAPE_THIN);
799    assert(LW_LOCK_OWNER(obj->lock) == self->threadId);
800    /* Allocate and acquire a new monitor. */
801    mon = dvmCreateMonitor(obj);
802    lockMonitor(self, mon);
803    /* Propagate the lock state. */
804    thin = obj->lock;
805    mon->lockCount = LW_LOCK_COUNT(thin);
806    thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
807    thin |= (u4)mon | LW_SHAPE_FAT;
808    /* Publish the updated lock word. */
809    MEM_BARRIER();
810    obj->lock = thin;
811}
812
813/*
814 * Implements monitorenter for "synchronized" stuff.
815 *
816 * This does not fail or throw an exception (unless deadlock prediction
817 * is enabled and set to "err" mode).
818 */
819void dvmLockObject(Thread* self, Object *obj)
820{
821    volatile u4 *thinp;
822    ThreadStatus oldStatus;
823    useconds_t sleepDelay;
824    const useconds_t maxSleepDelay = 1 << 20;
825    u4 thin, newThin, threadId;
826
827    assert(self != NULL);
828    assert(obj != NULL);
829    threadId = self->threadId;
830    thinp = &obj->lock;
831retry:
832    thin = *thinp;
833    if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
834        /*
835         * The lock is a thin lock.  The owner field is used to
836         * determine the acquire method, ordered by cost.
837         */
838        if (LW_LOCK_OWNER(thin) == threadId) {
839            /*
840             * The calling thread owns the lock.  Increment the
841             * value of the recursion count field.
842             */
843            obj->lock += 1 << LW_LOCK_COUNT_SHIFT;
844        } else if (LW_LOCK_OWNER(thin) == 0) {
845            /*
846             * The lock is unowned.  Install the thread id of the
847             * calling thread into the owner field.  This is the
848             * common case.  In performance critical code the JIT
849             * will have tried this before calling out to the VM.
850             */
851            newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
852            if (!ATOMIC_CMP_SWAP((int32_t *)thinp, thin, newThin)) {
853                /*
854                 * The acquire failed.  Try again.
855                 */
856                goto retry;
857            }
858        } else {
859            LOG_THIN("(%d) spin on lock %p: %#x (%#x) %#x",
860                     threadId, &obj->lock, 0, *thinp, thin);
861            /*
862             * The lock is owned by another thread.  Notify the VM
863             * that we are about to wait.
864             */
865            oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
866            /*
867             * Spin until the thin lock is released or inflated.
868             */
869            sleepDelay = 0;
870            for (;;) {
871                thin = *thinp;
872                /*
873                 * Check the shape of the lock word.  Another thread
874                 * may have inflated the lock while we were waiting.
875                 */
876                if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
877                    if (LW_LOCK_OWNER(thin) == 0) {
878                        /*
879                         * The lock has been released.  Install the
880                         * thread id of the calling thread into the
881                         * owner field.
882                         */
883                        newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
884                        if (ATOMIC_CMP_SWAP((int32_t *)thinp,
885                                            thin, newThin)) {
886                            /*
887                             * The acquire succeed.  Break out of the
888                             * loop and proceed to inflate the lock.
889                             */
890                            break;
891                        }
892                    } else {
893                        /*
894                         * The lock has not been released.  Yield so
895                         * the owning thread can run.
896                         */
897                        if (sleepDelay == 0) {
898                            sched_yield();
899                            sleepDelay = 1000;
900                        } else {
901                            usleep(sleepDelay);
902                            if (sleepDelay < maxSleepDelay / 2) {
903                                sleepDelay *= 2;
904                            }
905                        }
906                    }
907                } else {
908                    /*
909                     * The thin lock was inflated by another thread.
910                     * Let the VM know we are no longer waiting and
911                     * try again.
912                     */
913                    LOG_THIN("(%d) lock %p surprise-fattened",
914                             threadId, &obj->lock);
915                    dvmChangeStatus(self, oldStatus);
916                    goto retry;
917                }
918            }
919            LOG_THIN("(%d) spin on lock done %p: %#x (%#x) %#x",
920                     threadId, &obj->lock, 0, *thinp, thin);
921            /*
922             * We have acquired the thin lock.  Let the VM know that
923             * we are no longer waiting.
924             */
925            dvmChangeStatus(self, oldStatus);
926            /*
927             * Fatten the lock.
928             */
929            inflateMonitor(self, obj);
930            LOG_THIN("(%d) lock %p fattened", threadId, &obj->lock);
931        }
932    } else {
933        /*
934         * The lock is a fat lock.
935         */
936        assert(LW_MONITOR(obj->lock) != NULL);
937        lockMonitor(self, LW_MONITOR(obj->lock));
938    }
939#ifdef WITH_DEADLOCK_PREDICTION
940    /*
941     * See if we were allowed to grab the lock at this time.  We do it
942     * *after* acquiring the lock, rather than before, so that we can
943     * freely update the Monitor struct.  This seems counter-intuitive,
944     * but our goal is deadlock *prediction* not deadlock *prevention*.
945     * (If we actually deadlock, the situation is easy to diagnose from
946     * a thread dump, so there's no point making a special effort to do
947     * the checks before the lock is held.)
948     *
949     * This needs to happen before we add the object to the thread's
950     * monitor list, so we can tell the difference between first-lock and
951     * re-lock.
952     *
953     * It's also important that we do this while in THREAD_RUNNING, so
954     * that we don't interfere with cleanup operations in the GC.
955     */
956    if (gDvm.deadlockPredictMode != kDPOff) {
957        if (self->status != THREAD_RUNNING) {
958            LOGE("Bad thread status (%d) in DP\n", self->status);
959            dvmDumpThread(self, false);
960            dvmAbort();
961        }
962        assert(!dvmCheckException(self));
963        updateDeadlockPrediction(self, obj);
964        if (dvmCheckException(self)) {
965            /*
966             * If we're throwing an exception here, we need to free the
967             * lock.  We add the object to the thread's monitor list so the
968             * "unlock" code can remove it.
969             */
970            dvmAddToMonitorList(self, obj, false);
971            dvmUnlockObject(self, obj);
972            LOGV("--- unlocked, pending is '%s'\n",
973                dvmGetException(self)->clazz->descriptor);
974        }
975    }
976
977    /*
978     * Add the locked object, and the current stack trace, to the list
979     * held by the Thread object.  If deadlock prediction isn't on,
980     * don't capture the stack trace.
981     */
982    dvmAddToMonitorList(self, obj, gDvm.deadlockPredictMode != kDPOff);
983#elif defined(WITH_MONITOR_TRACKING)
984    /*
985     * Add the locked object to the list held by the Thread object.
986     */
987    dvmAddToMonitorList(self, obj, false);
988#endif
989}
990
991/*
992 * Implements monitorexit for "synchronized" stuff.
993 *
994 * On failure, throws an exception and returns "false".
995 */
996bool dvmUnlockObject(Thread* self, Object *obj)
997{
998    u4 thin;
999
1000    assert(self != NULL);
1001    assert(self->status == THREAD_RUNNING);
1002    assert(obj != NULL);
1003    /*
1004     * Cache the lock word as its value can change while we are
1005     * examining its state.
1006     */
1007    thin = obj->lock;
1008    if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1009        /*
1010         * The lock is thin.  We must ensure that the lock is owned
1011         * by the given thread before unlocking it.
1012         */
1013        if (LW_LOCK_OWNER(thin) == self->threadId) {
1014            /*
1015             * We are the lock owner.  It is safe to update the lock
1016             * without CAS as lock ownership guards the lock itself.
1017             */
1018            if (LW_LOCK_COUNT(thin) == 0) {
1019                /*
1020                 * The lock was not recursively acquired, the common
1021                 * case.  Unlock by clearing all bits except for the
1022                 * hash state.
1023                 */
1024                obj->lock &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
1025            } else {
1026                /*
1027                 * The object was recursively acquired.  Decrement the
1028                 * lock recursion count field.
1029                 */
1030                obj->lock -= 1 << LW_LOCK_COUNT_SHIFT;
1031            }
1032        } else {
1033            /*
1034             * We do not own the lock.  The JVM spec requires that we
1035             * throw an exception in this case.
1036             */
1037            dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1038                              "unlock of unowned monitor");
1039            return false;
1040        }
1041    } else {
1042        /*
1043         * The lock is fat.  We must check to see if unlockMonitor has
1044         * raised any exceptions before continuing.
1045         */
1046        assert(LW_MONITOR(obj->lock) != NULL);
1047        if (!unlockMonitor(self, LW_MONITOR(obj->lock))) {
1048            /*
1049             * An exception has been raised.  Do not fall through.
1050             */
1051            return false;
1052        }
1053    }
1054
1055#ifdef WITH_MONITOR_TRACKING
1056    /*
1057     * Remove the object from the Thread's list.
1058     */
1059    dvmRemoveFromMonitorList(self, obj);
1060#endif
1061
1062    return true;
1063}
1064
1065/*
1066 * Object.wait().  Also called for class init.
1067 */
1068void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec,
1069    bool interruptShouldThrow)
1070{
1071    Monitor* mon;
1072    u4 thin = obj->lock;
1073
1074    /* If the lock is still thin, we need to fatten it.
1075     */
1076    if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1077        /* Make sure that 'self' holds the lock.
1078         */
1079        if (LW_LOCK_OWNER(thin) != self->threadId) {
1080            dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1081                "object not locked by thread before wait()");
1082            return;
1083        }
1084
1085        /* This thread holds the lock.  We need to fatten the lock
1086         * so 'self' can block on it.  Don't update the object lock
1087         * field yet, because 'self' needs to acquire the lock before
1088         * any other thread gets a chance.
1089         */
1090        inflateMonitor(self, obj);
1091        LOG_THIN("(%d) lock %p fattened by wait() to count %d",
1092                 self->threadId, &obj->lock, mon->lockCount);
1093    }
1094    mon = LW_MONITOR(obj->lock);
1095    waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
1096}
1097
1098/*
1099 * Object.notify().
1100 */
1101void dvmObjectNotify(Thread* self, Object *obj)
1102{
1103    u4 thin = obj->lock;
1104
1105    /* If the lock is still thin, there aren't any waiters;
1106     * waiting on an object forces lock fattening.
1107     */
1108    if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1109        /* Make sure that 'self' holds the lock.
1110         */
1111        if (LW_LOCK_OWNER(thin) != self->threadId) {
1112            dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1113                "object not locked by thread before notify()");
1114            return;
1115        }
1116
1117        /* no-op;  there are no waiters to notify.
1118         */
1119    } else {
1120        /* It's a fat lock.
1121         */
1122        notifyMonitor(self, LW_MONITOR(thin));
1123    }
1124}
1125
1126/*
1127 * Object.notifyAll().
1128 */
1129void dvmObjectNotifyAll(Thread* self, Object *obj)
1130{
1131    u4 thin = obj->lock;
1132
1133    /* If the lock is still thin, there aren't any waiters;
1134     * waiting on an object forces lock fattening.
1135     */
1136    if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1137        /* Make sure that 'self' holds the lock.
1138         */
1139        if (LW_LOCK_OWNER(thin) != self->threadId) {
1140            dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1141                "object not locked by thread before notifyAll()");
1142            return;
1143        }
1144
1145        /* no-op;  there are no waiters to notify.
1146         */
1147    } else {
1148        /* It's a fat lock.
1149         */
1150        notifyAllMonitor(self, LW_MONITOR(thin));
1151    }
1152}
1153
1154/*
1155 * This implements java.lang.Thread.sleep(long msec, int nsec).
1156 *
1157 * The sleep is interruptible by other threads, which means we can't just
1158 * plop into an OS sleep call.  (We probably could if we wanted to send
1159 * signals around and rely on EINTR, but that's inefficient and relies
1160 * on native code respecting our signal mask.)
1161 *
1162 * We have to do all of this stuff for Object.wait() as well, so it's
1163 * easiest to just sleep on a private Monitor.
1164 *
1165 * It appears that we want sleep(0,0) to go through the motions of sleeping
1166 * for a very short duration, rather than just returning.
1167 */
1168void dvmThreadSleep(u8 msec, u4 nsec)
1169{
1170    Thread* self = dvmThreadSelf();
1171    Monitor* mon = gDvm.threadSleepMon;
1172
1173    /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
1174    if (msec == 0 && nsec == 0)
1175        nsec++;
1176
1177    lockMonitor(self, mon);
1178    waitMonitor(self, mon, msec, nsec, true);
1179    unlockMonitor(self, mon);
1180}
1181
1182/*
1183 * Implement java.lang.Thread.interrupt().
1184 */
1185void dvmThreadInterrupt(Thread* thread)
1186{
1187    assert(thread != NULL);
1188
1189    pthread_mutex_lock(&thread->waitMutex);
1190
1191    /*
1192     * If the interrupted flag is already set no additional action is
1193     * required.
1194     */
1195    if (thread->interrupted == true) {
1196        pthread_mutex_unlock(&thread->waitMutex);
1197        return;
1198    }
1199
1200    /*
1201     * Raise the "interrupted" flag.  This will cause it to bail early out
1202     * of the next wait() attempt, if it's not currently waiting on
1203     * something.
1204     */
1205    thread->interrupted = true;
1206    MEM_BARRIER();
1207
1208    /*
1209     * Is the thread waiting?
1210     *
1211     * Note that fat vs. thin doesn't matter here;  waitMonitor
1212     * is only set when a thread actually waits on a monitor,
1213     * which implies that the monitor has already been fattened.
1214     */
1215    if (thread->waitMonitor != NULL) {
1216        pthread_cond_signal(&thread->waitCond);
1217    }
1218
1219    pthread_mutex_unlock(&thread->waitMutex);
1220}
1221
1222#ifndef WITH_COPYING_GC
1223u4 dvmIdentityHashCode(Object *obj)
1224{
1225    return (u4)obj;
1226}
1227#else
1228static size_t arrayElementWidth(const ArrayObject *array)
1229{
1230    const char *descriptor;
1231
1232    if (dvmIsObjectArray(array)) {
1233	return sizeof(Object *);
1234    } else {
1235	descriptor = array->obj.clazz->descriptor;
1236        switch (descriptor[1]) {
1237        case 'B': return 1;  /* byte */
1238        case 'C': return 2;  /* char */
1239        case 'D': return 8;  /* double */
1240        case 'F': return 4;  /* float */
1241        case 'I': return 4;  /* int */
1242        case 'J': return 8;  /* long */
1243        case 'S': return 2;  /* short */
1244        case 'Z': return 1;  /* boolean */
1245        }
1246    }
1247    LOGE("object %p has an unhandled descriptor '%s'", array, descriptor);
1248    dvmDumpThread(dvmThreadSelf(), false);
1249    dvmAbort();
1250    return 0;  /* Quiet the compiler. */
1251}
1252
1253static size_t arrayObjectLength(const ArrayObject *array)
1254{
1255    size_t length;
1256
1257    length = offsetof(ArrayObject, contents);
1258    length += array->length * arrayElementWidth(array);
1259    return length;
1260}
1261
1262/*
1263 * Returns the identity hash code of the given object.
1264 */
1265u4 dvmIdentityHashCode(Object *obj)
1266{
1267    Thread *self, *thread;
1268    volatile u4 *lw;
1269    size_t length;
1270    u4 lock, owner, hashState;
1271
1272    if (obj == NULL) {
1273        /*
1274         * Null is defined to have an identity hash code of 0.
1275         */
1276        return 0;
1277    }
1278    lw = &obj->lock;
1279retry:
1280    hashState = LW_HASH_STATE(*lw);
1281    if (hashState == LW_HASH_STATE_HASHED) {
1282        /*
1283         * The object has been hashed but has not had its hash code
1284         * relocated by the garbage collector.  Use the raw object
1285         * address.
1286         */
1287        return (u4)obj >> 3;
1288    } else if (hashState == LW_HASH_STATE_HASHED_AND_MOVED) {
1289        /*
1290         * The object has been hashed and its hash code has been
1291         * relocated by the collector.  Use the value of the naturally
1292         * aligned word following the instance data.
1293         */
1294        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
1295            length = arrayObjectLength((ArrayObject *)obj);
1296            length = (length + 3) & ~3;
1297        } else {
1298            length = obj->clazz->objectSize;
1299        }
1300        return *(u4 *)(((char *)obj) + length);
1301    } else if (hashState == LW_HASH_STATE_UNHASHED) {
1302        /*
1303         * The object has never been hashed.  Change the hash state to
1304         * hashed and use the raw object address.
1305         */
1306        self = dvmThreadSelf();
1307        if (self->threadId == lockOwner(obj)) {
1308            /*
1309             * We already own the lock so we can update the hash state
1310             * directly.
1311             */
1312            *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1313            return (u4)obj >> 3;
1314        }
1315        /*
1316         * We do not own the lock.  Try acquiring the lock.  Should
1317         * this fail, we must suspend the owning thread.
1318         */
1319        if (LW_SHAPE(*lw) == LW_SHAPE_THIN) {
1320            /*
1321             * If the lock is thin assume it is unowned.  We simulate
1322             * an acquire, update, and release with a single CAS.
1323             */
1324            lock = DVM_LOCK_INITIAL_THIN_VALUE;
1325            lock |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1326            if (ATOMIC_CMP_SWAP((int32_t *)lw,
1327                                (int32_t)DVM_LOCK_INITIAL_THIN_VALUE,
1328                                (int32_t)lock)) {
1329                /*
1330                 * A new lockword has been installed with a hash state
1331                 * of hashed.  Use the raw object address.
1332                 */
1333                return (u4)obj >> 3;
1334            }
1335        } else {
1336            if (tryLockMonitor(self, LW_MONITOR(*lw))) {
1337                /*
1338                 * The monitor lock has been acquired.  Change the
1339                 * hash state to hashed and use the raw object
1340                 * address.
1341                 */
1342                *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1343                unlockMonitor(self, LW_MONITOR(*lw));
1344                return (u4)obj >> 3;
1345            }
1346        }
1347        /*
1348         * At this point we have failed to acquire the lock.  We must
1349         * identify the owning thread and suspend it.
1350         */
1351        dvmLockThreadList(self);
1352        /*
1353         * Cache the lock word as its value can change between
1354         * determining its shape and retrieving its owner.
1355         */
1356        lock = *lw;
1357        if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
1358            /*
1359             * Find the thread with the corresponding thread id.
1360             */
1361            owner = LW_LOCK_OWNER(lock);
1362            assert(owner != self->threadId);
1363            /*
1364             * If the lock has no owner do not bother scanning the
1365             * thread list and fall through to the failure handler.
1366             */
1367            thread = owner ? gDvm.threadList : NULL;
1368            while (thread != NULL) {
1369                if (thread->threadId == owner) {
1370                    break;
1371                }
1372                thread = thread->next;
1373            }
1374        } else {
1375            thread = LW_MONITOR(lock)->owner;
1376        }
1377        /*
1378         * If thread is NULL the object has been released since the
1379         * thread list lock was acquired.  Try again.
1380         */
1381        if (thread == NULL) {
1382            dvmUnlockThreadList();
1383            goto retry;
1384        }
1385        /*
1386         * Wait for the owning thread to suspend.
1387         */
1388        dvmSuspendThread(thread);
1389        if (dvmHoldsLock(thread, obj)) {
1390            /*
1391             * The owning thread has been suspended.  We can safely
1392             * change the hash state to hashed.
1393             */
1394            *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1395            dvmResumeThread(thread);
1396            dvmUnlockThreadList();
1397            return (u4)obj >> 3;
1398        }
1399        /*
1400         * The wrong thread has been suspended.  Try again.
1401         */
1402        dvmResumeThread(thread);
1403        dvmUnlockThreadList();
1404        goto retry;
1405    }
1406    LOGE("object %p has an unknown hash state %#x", obj, hashState);
1407    dvmDumpThread(dvmThreadSelf(), false);
1408    dvmAbort();
1409    return 0;  /* Quiet the compiler. */
1410}
1411#endif  /* WITH_COPYING_GC */
1412
1413#ifdef WITH_DEADLOCK_PREDICTION
1414/*
1415 * ===========================================================================
1416 *      Deadlock prediction
1417 * ===========================================================================
1418 */
1419/*
1420The idea is to predict the possibility of deadlock by recording the order
1421in which monitors are acquired.  If we see an attempt to acquire a lock
1422out of order, we can identify the locks and offending code.
1423
1424To make this work, we need to keep track of the locks held by each thread,
1425and create history trees for each lock.  When a thread tries to acquire
1426a new lock, we walk through the "history children" of the lock, looking
1427for a match with locks the thread already holds.  If we find a match,
1428it means the thread has made a request that could result in a deadlock.
1429
1430To support recursive locks, we always allow re-locking a currently-held
1431lock, and maintain a recursion depth count.
1432
1433An ASCII-art example, where letters represent Objects:
1434
1435        A
1436       /|\
1437      / | \
1438     B  |  D
1439      \ |
1440       \|
1441        C
1442
1443The above is the tree we'd have after handling Object synchronization
1444sequences "ABC", "AC", "AD".  A has three children, {B, C, D}.  C is also
1445a child of B.  (The lines represent pointers between parent and child.
1446Every node can have multiple parents and multiple children.)
1447
1448If we hold AC, and want to lock B, we recursively search through B's
1449children to see if A or C appears.  It does, so we reject the attempt.
1450(A straightforward way to implement it: add a link from C to B, then
1451determine whether the graph starting at B contains a cycle.)
1452
1453If we hold AC and want to lock D, we would succeed, creating a new link
1454from C to D.
1455
1456The lock history and a stack trace is attached to the Object's Monitor
1457struct, which means we need to fatten every Object we lock (thin locking
1458is effectively disabled).  If we don't need the stack trace we can
1459avoid fattening the leaf nodes, only fattening objects that need to hold
1460history trees.
1461
1462Updates to Monitor structs are only allowed for the thread that holds
1463the Monitor, so we actually do most of our deadlock prediction work after
1464the lock has been acquired.
1465
1466When an object with a monitor is GCed, we need to remove it from the
1467history trees.  There are two basic approaches:
1468 (1) For through the entire set of known monitors, search all child
1469     lists for the object in question.  This is rather slow, resulting
1470     in GC passes that take upwards of 10 seconds to complete.
1471 (2) Maintain "parent" pointers in each node.  Remove the entries as
1472     required.  This requires additional storage and maintenance for
1473     every operation, but is significantly faster at GC time.
1474For each GCed object, we merge all of the object's children into each of
1475the object's parents.
1476*/
1477
1478#if !defined(WITH_MONITOR_TRACKING)
1479# error "WITH_DEADLOCK_PREDICTION requires WITH_MONITOR_TRACKING"
1480#endif
1481
1482/*
1483 * Clear out the contents of an ExpandingObjectList, freeing any
1484 * dynamic allocations.
1485 */
1486static void expandObjClear(ExpandingObjectList* pList)
1487{
1488    if (pList->list != NULL) {
1489        free(pList->list);
1490        pList->list = NULL;
1491    }
1492    pList->alloc = pList->count = 0;
1493}
1494
1495/*
1496 * Get the number of objects currently stored in the list.
1497 */
1498static inline int expandBufGetCount(const ExpandingObjectList* pList)
1499{
1500    return pList->count;
1501}
1502
1503/*
1504 * Get the Nth entry from the list.
1505 */
1506static inline Object* expandBufGetEntry(const ExpandingObjectList* pList,
1507    int i)
1508{
1509    return pList->list[i];
1510}
1511
1512/*
1513 * Add a new entry to the list.
1514 *
1515 * We don't check for or try to enforce uniqueness.  It's expected that
1516 * the higher-level code does this for us.
1517 */
1518static void expandObjAddEntry(ExpandingObjectList* pList, Object* obj)
1519{
1520    if (pList->count == pList->alloc) {
1521        /* time to expand */
1522        Object** newList;
1523
1524        if (pList->alloc == 0)
1525            pList->alloc = 4;
1526        else
1527            pList->alloc *= 2;
1528        LOGVV("expanding %p to %d\n", pList, pList->alloc);
1529        newList = realloc(pList->list, pList->alloc * sizeof(Object*));
1530        if (newList == NULL) {
1531            LOGE("Failed expanding DP object list (alloc=%d)\n", pList->alloc);
1532            dvmAbort();
1533        }
1534        pList->list = newList;
1535    }
1536
1537    pList->list[pList->count++] = obj;
1538}
1539
1540/*
1541 * Returns "true" if the element was successfully removed.
1542 */
1543static bool expandObjRemoveEntry(ExpandingObjectList* pList, Object* obj)
1544{
1545    int i;
1546
1547    for (i = pList->count-1; i >= 0; i--) {
1548        if (pList->list[i] == obj)
1549            break;
1550    }
1551    if (i < 0)
1552        return false;
1553
1554    if (i != pList->count-1) {
1555        /*
1556         * The order of elements is not important, so we just copy the
1557         * last entry into the new slot.
1558         */
1559        //memmove(&pList->list[i], &pList->list[i+1],
1560        //    (pList->count-1 - i) * sizeof(pList->list[0]));
1561        pList->list[i] = pList->list[pList->count-1];
1562    }
1563
1564    pList->count--;
1565    pList->list[pList->count] = (Object*) 0xdecadead;
1566    return true;
1567}
1568
1569/*
1570 * Returns "true" if "obj" appears in the list.
1571 */
1572static bool expandObjHas(const ExpandingObjectList* pList, Object* obj)
1573{
1574    int i;
1575
1576    for (i = 0; i < pList->count; i++) {
1577        if (pList->list[i] == obj)
1578            return true;
1579    }
1580    return false;
1581}
1582
1583/*
1584 * Print the list contents to stdout.  For debugging.
1585 */
1586static void expandObjDump(const ExpandingObjectList* pList)
1587{
1588    int i;
1589    for (i = 0; i < pList->count; i++)
1590        printf(" %p", pList->list[i]);
1591}
1592
1593/*
1594 * Check for duplicate entries.  Returns the index of the first instance
1595 * of the duplicated value, or -1 if no duplicates were found.
1596 */
1597static int expandObjCheckForDuplicates(const ExpandingObjectList* pList)
1598{
1599    int i, j;
1600    for (i = 0; i < pList->count-1; i++) {
1601        for (j = i + 1; j < pList->count; j++) {
1602            if (pList->list[i] == pList->list[j]) {
1603                return i;
1604            }
1605        }
1606    }
1607
1608    return -1;
1609}
1610
1611
1612/*
1613 * Determine whether "child" appears in the list of objects associated
1614 * with the Monitor in "parent".  If "parent" is a thin lock, we return
1615 * false immediately.
1616 */
1617static bool objectInChildList(const Object* parent, Object* child)
1618{
1619    u4 lock = parent->lock;
1620    if (!IS_LOCK_FAT(&lock)) {
1621        //LOGI("on thin\n");
1622        return false;
1623    }
1624
1625    return expandObjHas(&LW_MONITOR(lock)->historyChildren, child);
1626}
1627
1628/*
1629 * Print the child list.
1630 */
1631static void dumpKids(Object* parent)
1632{
1633    Monitor* mon = LW_MONITOR(parent->lock);
1634
1635    printf("Children of %p:", parent);
1636    expandObjDump(&mon->historyChildren);
1637    printf("\n");
1638}
1639
1640/*
1641 * Add "child" to the list of children in "parent", and add "parent" to
1642 * the list of parents in "child".
1643 */
1644static void linkParentToChild(Object* parent, Object* child)
1645{
1646    //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf());   // !owned for merge
1647    assert(IS_LOCK_FAT(&parent->lock));
1648    assert(IS_LOCK_FAT(&child->lock));
1649    assert(parent != child);
1650    Monitor* mon;
1651
1652    mon = LW_MONITOR(parent->lock);
1653    assert(!expandObjHas(&mon->historyChildren, child));
1654    expandObjAddEntry(&mon->historyChildren, child);
1655
1656    mon = LW_MONITOR(child->lock);
1657    assert(!expandObjHas(&mon->historyParents, parent));
1658    expandObjAddEntry(&mon->historyParents, parent);
1659}
1660
1661
1662/*
1663 * Remove "child" from the list of children in "parent".
1664 */
1665static void unlinkParentFromChild(Object* parent, Object* child)
1666{
1667    //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf());   // !owned for GC
1668    assert(IS_LOCK_FAT(&parent->lock));
1669    assert(IS_LOCK_FAT(&child->lock));
1670    assert(parent != child);
1671    Monitor* mon;
1672
1673    mon = LW_MONITOR(parent->lock);
1674    if (!expandObjRemoveEntry(&mon->historyChildren, child)) {
1675        LOGW("WARNING: child %p not found in parent %p\n", child, parent);
1676    }
1677    assert(!expandObjHas(&mon->historyChildren, child));
1678    assert(expandObjCheckForDuplicates(&mon->historyChildren) < 0);
1679
1680    mon = LW_MONITOR(child->lock);
1681    if (!expandObjRemoveEntry(&mon->historyParents, parent)) {
1682        LOGW("WARNING: parent %p not found in child %p\n", parent, child);
1683    }
1684    assert(!expandObjHas(&mon->historyParents, parent));
1685    assert(expandObjCheckForDuplicates(&mon->historyParents) < 0);
1686}
1687
1688
1689/*
1690 * Log the monitors held by the current thread.  This is done as part of
1691 * flagging an error.
1692 */
1693static void logHeldMonitors(Thread* self)
1694{
1695    char* name = NULL;
1696
1697    name = dvmGetThreadName(self);
1698    LOGW("Monitors currently held by thread (threadid=%d '%s')\n",
1699        self->threadId, name);
1700    LOGW("(most-recently-acquired on top):\n");
1701    free(name);
1702
1703    LockedObjectData* lod = self->pLockedObjects;
1704    while (lod != NULL) {
1705        LOGW("--- object %p[%d] (%s)\n",
1706            lod->obj, lod->recursionCount, lod->obj->clazz->descriptor);
1707        dvmLogRawStackTrace(lod->rawStackTrace, lod->stackDepth);
1708
1709        lod = lod->next;
1710    }
1711}
1712
1713/*
1714 * Recursively traverse the object hierarchy starting at "obj".  We mark
1715 * ourselves on entry and clear the mark on exit.  If we ever encounter
1716 * a marked object, we have a cycle.
1717 *
1718 * Returns "true" if all is well, "false" if we found a cycle.
1719 */
1720static bool traverseTree(Thread* self, const Object* obj)
1721{
1722    assert(IS_LOCK_FAT(&obj->lock));
1723    Monitor* mon = LW_MONITOR(obj->lock);
1724
1725    /*
1726     * Have we been here before?
1727     */
1728    if (mon->historyMark) {
1729        int* rawStackTrace;
1730        int stackDepth;
1731
1732        LOGW("%s\n", kStartBanner);
1733        LOGW("Illegal lock attempt:\n");
1734        LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
1735
1736        rawStackTrace = dvmFillInStackTraceRaw(self, &stackDepth);
1737        dvmLogRawStackTrace(rawStackTrace, stackDepth);
1738        free(rawStackTrace);
1739
1740        LOGW(" ");
1741        logHeldMonitors(self);
1742
1743        LOGW(" ");
1744        LOGW("Earlier, the following lock order (from last to first) was\n");
1745        LOGW("established -- stack trace is from first successful lock):\n");
1746        return false;
1747    }
1748    mon->historyMark = true;
1749
1750    /*
1751     * Examine the children.  We do NOT hold these locks, so they might
1752     * very well transition from thin to fat or change ownership while
1753     * we work.
1754     *
1755     * NOTE: we rely on the fact that they cannot revert from fat to thin
1756     * while we work.  This is currently a safe assumption.
1757     *
1758     * We can safely ignore thin-locked children, because by definition
1759     * they have no history and are leaf nodes.  In the current
1760     * implementation we always fatten the locks to provide a place to
1761     * hang the stack trace.
1762     */
1763    ExpandingObjectList* pList = &mon->historyChildren;
1764    int i;
1765    for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1766        const Object* child = expandBufGetEntry(pList, i);
1767        u4 lock = child->lock;
1768        if (!IS_LOCK_FAT(&lock))
1769            continue;
1770        if (!traverseTree(self, child)) {
1771            LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
1772            dvmLogRawStackTrace(mon->historyRawStackTrace,
1773                mon->historyStackDepth);
1774            mon->historyMark = false;
1775            return false;
1776        }
1777    }
1778
1779    mon->historyMark = false;
1780
1781    return true;
1782}
1783
1784/*
1785 * Update the deadlock prediction tree, based on the current thread
1786 * acquiring "acqObj".  This must be called before the object is added to
1787 * the thread's list of held monitors.
1788 *
1789 * If the thread already holds the lock (recursion), or this is a known
1790 * lock configuration, we return without doing anything.  Otherwise, we add
1791 * a link from the most-recently-acquired lock in this thread to "acqObj"
1792 * after ensuring that the parent lock is "fat".
1793 *
1794 * This MUST NOT be called while a GC is in progress in another thread,
1795 * because we assume exclusive access to history trees in owned monitors.
1796 */
1797static void updateDeadlockPrediction(Thread* self, Object* acqObj)
1798{
1799    LockedObjectData* lod;
1800    LockedObjectData* mrl;
1801
1802    /*
1803     * Quick check for recursive access.
1804     */
1805    lod = dvmFindInMonitorList(self, acqObj);
1806    if (lod != NULL) {
1807        LOGV("+++ DP: recursive %p\n", acqObj);
1808        return;
1809    }
1810
1811    /*
1812     * Make the newly-acquired object's monitor "fat".  In some ways this
1813     * isn't strictly necessary, but we need the GC to tell us when
1814     * "interesting" objects go away, and right now the only way to make
1815     * an object look interesting is to give it a monitor.
1816     *
1817     * This also gives us a place to hang a stack trace.
1818     *
1819     * Our thread holds the lock, so we're allowed to rewrite the lock
1820     * without worrying that something will change out from under us.
1821     */
1822    if (!IS_LOCK_FAT(&acqObj->lock)) {
1823        LOGVV("fattening lockee %p (recur=%d)\n",
1824            acqObj, LW_LOCK_COUNT(acqObj->lock.thin));
1825        inflateMonitor(self, acqObj);
1826    }
1827
1828    /* if we don't have a stack trace for this monitor, establish one */
1829    if (LW_MONITOR(acqObj->lock)->historyRawStackTrace == NULL) {
1830        Monitor* mon = LW_MONITOR(acqObj->lock);
1831        mon->historyRawStackTrace = dvmFillInStackTraceRaw(self,
1832            &mon->historyStackDepth);
1833    }
1834
1835    /*
1836     * We need to examine and perhaps modify the most-recently-locked
1837     * monitor.  We own that, so there's no risk of another thread
1838     * stepping on us.
1839     *
1840     * Retrieve the most-recently-locked entry from our thread.
1841     */
1842    mrl = self->pLockedObjects;
1843    if (mrl == NULL)
1844        return;         /* no other locks held */
1845
1846    /*
1847     * Do a quick check to see if "acqObj" is a direct descendant.  We can do
1848     * this without holding the global lock because of our assertion that
1849     * a GC is not running in parallel -- nobody except the GC can
1850     * modify a history list in a Monitor they don't own, and we own "mrl".
1851     * (There might be concurrent *reads*, but no concurrent *writes.)
1852     *
1853     * If we find it, this is a known good configuration, and we're done.
1854     */
1855    if (objectInChildList(mrl->obj, acqObj))
1856        return;
1857
1858    /*
1859     * "mrl" is going to need to have a history tree.  If it's currently
1860     * a thin lock, we make it fat now.  The thin lock might have a
1861     * nonzero recursive lock count, which we need to carry over.
1862     *
1863     * Our thread holds the lock, so we're allowed to rewrite the lock
1864     * without worrying that something will change out from under us.
1865     */
1866    if (!IS_LOCK_FAT(&mrl->obj->lock)) {
1867        LOGVV("fattening parent %p f/b/o child %p (recur=%d)\n",
1868            mrl->obj, acqObj, LW_LOCK_COUNT(mrl->obj->lock));
1869        inflateMonitor(self, mrl->obj);
1870    }
1871
1872    /*
1873     * We haven't seen this configuration before.  We need to scan down
1874     * acqObj's tree to see if any of the monitors in self->pLockedObjects
1875     * appear.  We grab a global lock before traversing or updating the
1876     * history list.
1877     *
1878     * If we find a match for any of our held locks, we know that the lock
1879     * has previously been acquired *after* acqObj, and we throw an error.
1880     *
1881     * The easiest way to do this is to create a link from "mrl" to "acqObj"
1882     * and do a recursive traversal, marking nodes as we cross them.  If
1883     * we cross one a second time, we have a cycle and can throw an error.
1884     * (We do the flag-clearing traversal before adding the new link, so
1885     * that we're guaranteed to terminate.)
1886     *
1887     * If "acqObj" is a thin lock, it has no history, and we can create a
1888     * link to it without additional checks.  [ We now guarantee that it's
1889     * always fat. ]
1890     */
1891    bool failed = false;
1892    dvmLockMutex(&gDvm.deadlockHistoryLock);
1893    linkParentToChild(mrl->obj, acqObj);
1894    if (!traverseTree(self, acqObj)) {
1895        LOGW("%s\n", kEndBanner);
1896        failed = true;
1897
1898        /* remove the entry so we're still okay when in "warning" mode */
1899        unlinkParentFromChild(mrl->obj, acqObj);
1900    }
1901    dvmUnlockMutex(&gDvm.deadlockHistoryLock);
1902
1903    if (failed) {
1904        switch (gDvm.deadlockPredictMode) {
1905        case kDPErr:
1906            dvmThrowException("Ldalvik/system/PotentialDeadlockError;", NULL);
1907            break;
1908        case kDPAbort:
1909            LOGE("Aborting due to potential deadlock\n");
1910            dvmAbort();
1911            break;
1912        default:
1913            /* warn only */
1914            break;
1915        }
1916    }
1917}
1918
1919/*
1920 * We're removing "child" from existence.  We want to pull all of
1921 * child's children into "parent", filtering out duplicates.  This is
1922 * called during the GC.
1923 *
1924 * This does not modify "child", which might have multiple parents.
1925 */
1926static void mergeChildren(Object* parent, const Object* child)
1927{
1928    Monitor* mon;
1929    int i;
1930
1931    assert(IS_LOCK_FAT(&child->lock));
1932    mon = LW_MONITOR(child->lock);
1933    ExpandingObjectList* pList = &mon->historyChildren;
1934
1935    for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1936        Object* grandChild = expandBufGetEntry(pList, i);
1937
1938        if (!objectInChildList(parent, grandChild)) {
1939            LOGVV("+++  migrating %p link to %p\n", grandChild, parent);
1940            linkParentToChild(parent, grandChild);
1941        } else {
1942            LOGVV("+++  parent %p already links to %p\n", parent, grandChild);
1943        }
1944    }
1945}
1946
1947/*
1948 * An object with a fat lock is being collected during a GC pass.  We
1949 * want to remove it from any lock history trees that it is a part of.
1950 *
1951 * This may require updating the history trees in several monitors.  The
1952 * monitor semantics guarantee that no other thread will be accessing
1953 * the history trees at the same time.
1954 */
1955static void removeCollectedObject(Object* obj)
1956{
1957    Monitor* mon;
1958
1959    LOGVV("+++ collecting %p\n", obj);
1960
1961#if 0
1962    /*
1963     * We're currently running through the entire set of known monitors.
1964     * This can be somewhat slow.  We may want to keep lists of parents
1965     * in each child to speed up GC.
1966     */
1967    mon = gDvm.monitorList;
1968    while (mon != NULL) {
1969        Object* parent = mon->obj;
1970        if (parent != NULL) {       /* value nulled for deleted entries */
1971            if (objectInChildList(parent, obj)) {
1972                LOGVV("removing child %p from parent %p\n", obj, parent);
1973                unlinkParentFromChild(parent, obj);
1974                mergeChildren(parent, obj);
1975            }
1976        }
1977        mon = mon->next;
1978    }
1979#endif
1980
1981    /*
1982     * For every parent of this object:
1983     *  - merge all of our children into the parent's child list (creates
1984     *    a two-way link between parent and child)
1985     *  - remove ourselves from the parent's child list
1986     */
1987    ExpandingObjectList* pList;
1988    int i;
1989
1990    assert(IS_LOCK_FAT(&obj->lock));
1991    mon = LW_MONITOR(obj->lock);
1992    pList = &mon->historyParents;
1993    for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1994        Object* parent = expandBufGetEntry(pList, i);
1995        Monitor* parentMon = LW_MONITOR(parent->lock);
1996
1997        if (!expandObjRemoveEntry(&parentMon->historyChildren, obj)) {
1998            LOGW("WARNING: child %p not found in parent %p\n", obj, parent);
1999        }
2000        assert(!expandObjHas(&parentMon->historyChildren, obj));
2001
2002        mergeChildren(parent, obj);
2003    }
2004
2005    /*
2006     * For every child of this object:
2007     *  - remove ourselves from the child's parent list
2008     */
2009    pList = &mon->historyChildren;
2010    for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
2011        Object* child = expandBufGetEntry(pList, i);
2012        Monitor* childMon = LW_MONITOR(child->lock);
2013
2014        if (!expandObjRemoveEntry(&childMon->historyParents, obj)) {
2015            LOGW("WARNING: parent %p not found in child %p\n", obj, child);
2016        }
2017        assert(!expandObjHas(&childMon->historyParents, obj));
2018    }
2019}
2020
2021#endif /*WITH_DEADLOCK_PREDICTION*/
2022