Sync.cpp revision 71938023ea2bacc1923ec44c71a797f254d268e2
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17/* 18 * Fundamental synchronization mechanisms. 19 * 20 * The top part of the file has operations on "monitor" structs; the 21 * next part has the native calls on objects. 22 * 23 * The current implementation uses "thin locking" to avoid allocating 24 * an Object's full Monitor struct until absolutely necessary (i.e., 25 * during contention or a call to wait()). 26 * 27 * TODO: make improvements to thin locking 28 * We may be able to improve performance and reduce memory requirements by: 29 * - reverting to a thin lock once the Monitor is no longer necessary 30 * - using a pool of monitor objects, with some sort of recycling scheme 31 * 32 * TODO: recycle native-level monitors when objects are garbage collected. 33 * 34 * NOTE: if we broadcast a notify, and somebody sneaks in a Thread.interrupt 35 * before the notify finishes (i.e. before all threads sleeping on the 36 * condition variable have awoken), we could end up with a nonzero value for 37 * "notifying" after everybody is gone because one of the notified threads 38 * will actually exit via the "interrupted" path. This can be detected as 39 * (notifying + interrupting > waiting), i.e. the number of threads that need 40 * to be woken is greater than the number waiting. The fix is to test and 41 * adjust "notifying" at the start of the wait() call. 42 * -> This is probably not a problem if we notify less than the full set 43 * before the interrupt comes in. If we have four waiters, two pending 44 * notifies, and an interrupt hits, we will interrupt one thread and notify 45 * two others. Doesn't matter if the interrupted thread would have been 46 * one of the notified. Count is only screwed up if we have two waiters, 47 * in which case it's safe to fix it at the start of the next wait(). 48 */ 49#include "Dalvik.h" 50 51#include <stdlib.h> 52#include <unistd.h> 53#include <pthread.h> 54#include <time.h> 55#include <sys/time.h> 56#include <errno.h> 57 58#define LOG_THIN LOGV 59 60#ifdef WITH_DEADLOCK_PREDICTION /* fwd */ 61static const char* kStartBanner = 62 "<-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#"; 63static const char* kEndBanner = 64 "#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#->"; 65 66/* 67 * Unsorted, expanding list of objects. 68 * 69 * This is very similar to PointerSet (which came into existence after this), 70 * but these are unsorted, uniqueness is not enforced by the "add" function, 71 * and the base object isn't allocated on the heap. 72 */ 73typedef struct ExpandingObjectList { 74 u2 alloc; 75 u2 count; 76 Object** list; 77} ExpandingObjectList; 78 79/* fwd */ 80static void updateDeadlockPrediction(Thread* self, Object* obj); 81static void removeCollectedObject(Object* obj); 82static void expandObjClear(ExpandingObjectList* pList); 83#endif 84 85/* 86 * Every Object has a monitor associated with it, but not every Object is 87 * actually locked. Even the ones that are locked do not need a 88 * full-fledged monitor until a) there is actual contention or b) wait() 89 * is called on the Object. 90 * 91 * For Dalvik, we have implemented a scheme similar to the one described 92 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java" 93 * (ACM 1998). Things are even easier for us, though, because we have 94 * a full 32 bits to work with. 95 * 96 * The two states of an Object's lock are referred to as "thin" and 97 * "fat". A lock may transition from the "thin" state to the "fat" 98 * state and this transition is referred to as inflation. Once a lock 99 * has been inflated it remains in the "fat" state indefinitely. 100 * 101 * The lock value itself is stored in Object.lock, which is a union of 102 * the form: 103 * 104 * typedef union Lock { 105 * u4 thin; 106 * Monitor* mon; 107 * } Lock; 108 * 109 * The LSB of the lock value encodes its state. If cleared, the lock 110 * is in the "thin" state and its bits are formatted as follows: 111 * 112 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0] 113 * lock count thread id hash state 0 114 * 115 * If set, the lock is in the "fat" state and its bits are formatted 116 * as follows: 117 * 118 * [31 ---- 3] [2 ---- 1] [0] 119 * pointer hash state 1 120 * 121 * For an in-depth description of the mechanics of thin-vs-fat locking, 122 * read the paper referred to above. 123 */ 124 125/* 126 * Monitors provide: 127 * - mutually exclusive access to resources 128 * - a way for multiple threads to wait for notification 129 * 130 * In effect, they fill the role of both mutexes and condition variables. 131 * 132 * Only one thread can own the monitor at any time. There may be several 133 * threads waiting on it (the wait call unlocks it). One or more waiting 134 * threads may be getting interrupted or notified at any given time. 135 */ 136struct Monitor { 137 Thread* owner; /* which thread currently owns the lock? */ 138 int lockCount; /* owner's recursive lock depth */ 139 Object* obj; /* what object are we part of [debug only] */ 140 141 int waiting; /* total #of threads waiting on this */ 142 int notifying; /* #of threads being notified */ 143 int interrupting; /* #of threads being interrupted */ 144 145 pthread_mutex_t lock; 146 pthread_cond_t cond; 147 148 Monitor* next; 149 150#ifdef WITH_DEADLOCK_PREDICTION 151 /* 152 * Objects that have been locked immediately after this one in the 153 * past. We use an expanding flat array, allocated on first use, to 154 * minimize allocations. Deletions from the list, expected to be 155 * infrequent, are crunched down. 156 */ 157 ExpandingObjectList historyChildren; 158 159 /* 160 * We also track parents. This isn't strictly necessary, but it makes 161 * the cleanup at GC time significantly faster. 162 */ 163 ExpandingObjectList historyParents; 164 165 /* used during cycle detection */ 166 bool historyMark; 167 168 /* stack trace, established the first time we locked the object */ 169 int historyStackDepth; 170 int* historyRawStackTrace; 171#endif 172}; 173 174 175/* 176 * Create and initialize a monitor. 177 */ 178Monitor* dvmCreateMonitor(Object* obj) 179{ 180 Monitor* mon; 181 182 mon = (Monitor*) calloc(1, sizeof(Monitor)); 183 if (mon == NULL) { 184 LOGE("Unable to allocate monitor\n"); 185 dvmAbort(); 186 } 187 if (((u4)mon & 7) != 0) { 188 LOGE("Misaligned monitor: %p\n", mon); 189 dvmAbort(); 190 } 191 mon->obj = obj; 192 dvmInitMutex(&mon->lock); 193 pthread_cond_init(&mon->cond, NULL); 194 195 /* replace the head of the list with the new monitor */ 196 do { 197 mon->next = gDvm.monitorList; 198 } while (!ATOMIC_CMP_SWAP((int32_t*)(void*)&gDvm.monitorList, 199 (int32_t)mon->next, (int32_t)mon)); 200 201 return mon; 202} 203 204/* 205 * Release a Monitor. 206 */ 207static void releaseMonitor(Monitor* mon) 208{ 209 // TODO 210} 211 212/* 213 * Free the monitor list. Only used when shutting the VM down. 214 */ 215void dvmFreeMonitorList(void) 216{ 217 Monitor* mon; 218 Monitor* nextMon; 219 220 mon = gDvm.monitorList; 221 while (mon != NULL) { 222 nextMon = mon->next; 223 224#ifdef WITH_DEADLOCK_PREDICTION 225 expandObjClear(&mon->historyChildren); 226 expandObjClear(&mon->historyParents); 227 free(mon->historyRawStackTrace); 228#endif 229 free(mon); 230 mon = nextMon; 231 } 232} 233 234/* 235 * Log some info about our monitors. 236 */ 237void dvmDumpMonitorInfo(const char* msg) 238{ 239#if QUIET_ZYGOTE_MONITOR 240 if (gDvm.zygote) { 241 return; 242 } 243#endif 244 245 int totalCount; 246 int liveCount; 247 248 totalCount = liveCount = 0; 249 Monitor* mon = gDvm.monitorList; 250 while (mon != NULL) { 251 totalCount++; 252 if (mon->obj != NULL) 253 liveCount++; 254 mon = mon->next; 255 } 256 257 LOGD("%s: monitor list has %d entries (%d live)\n", 258 msg, totalCount, liveCount); 259} 260 261/* 262 * Get the object that a monitor is part of. 263 */ 264Object* dvmGetMonitorObject(Monitor* mon) 265{ 266 if (mon == NULL) 267 return NULL; 268 else 269 return mon->obj; 270} 271 272/* 273 * Checks whether the given thread holds the given 274 * objects's lock. 275 */ 276bool dvmHoldsLock(Thread* thread, Object* obj) 277{ 278 u4 thin; 279 280 if (thread == NULL || obj == NULL) { 281 return false; 282 } 283 284 /* Since we're reading the lock value multiple times, 285 * latch it so that it doesn't change out from under 286 * us if we get preempted. 287 */ 288 thin = obj->lock.thin; 289 if (LW_SHAPE(thin) == LW_SHAPE_FAT) { 290 return thread == LW_MONITOR(thin)->owner; 291 } else { 292 return thread->threadId == LW_LOCK_OWNER(thin); 293 } 294} 295 296/* 297 * Free the monitor associated with an object and make the object's lock 298 * thin again. This is called during garbage collection. 299 */ 300void dvmFreeObjectMonitor_internal(Lock *lock) 301{ 302 Monitor *mon; 303 304 /* The macro that wraps this function checks IS_LOCK_FAT() first. 305 */ 306 assert(IS_LOCK_FAT(lock)); 307 308#ifdef WITH_DEADLOCK_PREDICTION 309 if (gDvm.deadlockPredictMode != kDPOff) 310 removeCollectedObject(LW_MONITOR(lock->thin)->obj); 311#endif 312 313 mon = LW_MONITOR(lock->thin); 314 lock->thin = DVM_LOCK_INITIAL_THIN_VALUE; 315 316 /* This lock is associated with an object 317 * that's being swept. The only possible way 318 * anyone could be holding this lock would be 319 * if some JNI code locked but didn't unlock 320 * the object, in which case we've got some bad 321 * native code somewhere. 322 */ 323 assert(pthread_mutex_trylock(&mon->lock) == 0); 324 pthread_mutex_destroy(&mon->lock); 325 pthread_cond_destroy(&mon->cond); 326#if 1 327//TODO: unlink from the monitor list (would require a lock) 328// (might not -- the GC suspension may be enough) 329 { 330 Monitor *next; 331 next = mon->next; 332#ifdef WITH_DEADLOCK_PREDICTION 333 expandObjClear(&mon->historyChildren); 334 expandObjClear(&mon->historyParents); 335 free(mon->historyRawStackTrace); 336#endif 337 memset(mon, 0, sizeof (*mon)); 338 mon->next = next; 339 } 340//free(mon); 341#endif 342} 343 344 345/* 346 * Lock a monitor. 347 */ 348static void lockMonitor(Thread* self, Monitor* mon) 349{ 350 int cc; 351 352 if (mon->owner == self) { 353 mon->lockCount++; 354 } else { 355 ThreadStatus oldStatus; 356 357 if (pthread_mutex_trylock(&mon->lock) != 0) { 358 /* mutex is locked, switch to wait status and sleep on it */ 359 oldStatus = dvmChangeStatus(self, THREAD_MONITOR); 360 cc = pthread_mutex_lock(&mon->lock); 361 assert(cc == 0); 362 dvmChangeStatus(self, oldStatus); 363 } 364 365 mon->owner = self; 366 assert(mon->lockCount == 0); 367 368 /* 369 * "waiting", "notifying", and "interrupting" could all be nonzero 370 * if we're locking an object on which other threads are waiting. 371 * Nothing worth assert()ing about here. 372 */ 373 } 374} 375 376/* 377 * Try to lock a monitor. 378 * 379 * Returns "true" on success. 380 */ 381static bool tryLockMonitor(Thread* self, Monitor* mon) 382{ 383 int cc; 384 385 if (mon->owner == self) { 386 mon->lockCount++; 387 return true; 388 } else { 389 cc = pthread_mutex_trylock(&mon->lock); 390 if (cc == 0) { 391 mon->owner = self; 392 assert(mon->lockCount == 0); 393 return true; 394 } else { 395 return false; 396 } 397 } 398} 399 400 401/* 402 * Unlock a monitor. 403 * 404 * Returns true if the unlock succeeded. 405 * If the unlock failed, an exception will be pending. 406 */ 407static bool unlockMonitor(Thread* self, Monitor* mon) 408{ 409 assert(mon != NULL); // can this happen? 410 411 if (mon->owner == self) { 412 /* 413 * We own the monitor, so nobody else can be in here. 414 */ 415 if (mon->lockCount == 0) { 416 int cc; 417 mon->owner = NULL; 418 cc = pthread_mutex_unlock(&mon->lock); 419 assert(cc == 0); 420 } else { 421 mon->lockCount--; 422 } 423 } else { 424 /* 425 * We don't own this, so we're not allowed to unlock it. 426 * The JNI spec says that we should throw IllegalMonitorStateException 427 * in this case. 428 */ 429 if (mon->owner == NULL) { 430 //LOGW("Unlock fat %p: not owned\n", mon->obj); 431 } else { 432 //LOGW("Unlock fat %p: id %d vs %d\n", 433 // mon->obj, mon->owner->threadId, self->threadId); 434 } 435 dvmThrowException("Ljava/lang/IllegalMonitorStateException;", 436 "unlock of unowned monitor"); 437 return false; 438 } 439 return true; 440} 441 442/* 443 * Wait on a monitor until timeout, interrupt, or notification. Used for 444 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join(). 445 * 446 * If another thread calls Thread.interrupt(), we throw InterruptedException 447 * and return immediately if one of the following are true: 448 * - blocked in wait(), wait(long), or wait(long, int) methods of Object 449 * - blocked in join(), join(long), or join(long, int) methods of Thread 450 * - blocked in sleep(long), or sleep(long, int) methods of Thread 451 * Otherwise, we set the "interrupted" flag. 452 * 453 * Checks to make sure that "nsec" is in the range 0-999999 454 * (i.e. fractions of a millisecond) and throws the appropriate 455 * exception if it isn't. 456 * 457 * The spec allows "spurious wakeups", and recommends that all code using 458 * Object.wait() do so in a loop. This appears to derive from concerns 459 * about pthread_cond_wait() on multiprocessor systems. Some commentary 460 * on the web casts doubt on whether these can/should occur. 461 * 462 * Since we're allowed to wake up "early", we clamp extremely long durations 463 * to return at the end of the 32-bit time epoch. 464 */ 465static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec, 466 bool interruptShouldThrow) 467{ 468 struct timespec ts; 469 bool wasInterrupted = false; 470 bool timed; 471 int cc; 472 473 assert(self != NULL); 474 assert(mon != NULL); 475 476 /* Make sure that we hold the lock. */ 477 if (mon->owner != self) { 478 dvmThrowException("Ljava/lang/IllegalMonitorStateException;", 479 "object not locked by thread before wait()"); 480 return; 481 } 482 483 /* 484 * Enforce the timeout range. 485 */ 486 if (msec < 0 || nsec < 0 || nsec > 999999) { 487 dvmThrowException("Ljava/lang/IllegalArgumentException;", 488 "timeout arguments out of range"); 489 return; 490 } 491 492 /* 493 * Compute absolute wakeup time, if necessary. 494 */ 495 if (msec == 0 && nsec == 0) { 496 timed = false; 497 } else { 498 s8 endSec; 499 500#ifdef HAVE_TIMEDWAIT_MONOTONIC 501 struct timespec now; 502 clock_gettime(CLOCK_MONOTONIC, &now); 503 endSec = now.tv_sec + msec / 1000; 504 if (endSec >= 0x7fffffff) { 505 LOGV("NOTE: end time exceeds epoch\n"); 506 endSec = 0x7ffffffe; 507 } 508 ts.tv_sec = endSec; 509 ts.tv_nsec = (now.tv_nsec + (msec % 1000) * 1000 * 1000) + nsec; 510#else 511 struct timeval now; 512 gettimeofday(&now, NULL); 513 endSec = now.tv_sec + msec / 1000; 514 if (endSec >= 0x7fffffff) { 515 LOGV("NOTE: end time exceeds epoch\n"); 516 endSec = 0x7ffffffe; 517 } 518 ts.tv_sec = endSec; 519 ts.tv_nsec = (now.tv_usec + (msec % 1000) * 1000) * 1000 + nsec; 520#endif 521 522 /* catch rollover */ 523 if (ts.tv_nsec >= 1000000000L) { 524 ts.tv_sec++; 525 ts.tv_nsec -= 1000000000L; 526 } 527 timed = true; 528 } 529 530 /* 531 * Make sure "notifying" wasn't screwed up by earlier activity. If this 532 * is wrong we could end up waking up too many people. (This is a rare 533 * situation, but we need to handle it correctly.) 534 */ 535 if (mon->notifying + mon->interrupting > mon->waiting) { 536 LOGD("threadid=%d: bogus mon %d+%d>%d; adjusting\n", 537 self->threadId, mon->notifying, mon->interrupting, 538 mon->waiting); 539 540 assert(mon->waiting >= mon->interrupting); 541 mon->notifying = mon->waiting - mon->interrupting; 542 } 543 544 /* 545 * Add ourselves to the set of threads waiting on this monitor, and 546 * release our hold. We need to let it go even if we're a few levels 547 * deep in a recursive lock, and we need to restore that later. 548 * 549 * The order of operations here isn't significant, because we still 550 * hold the pthread mutex. 551 */ 552 int prevLockCount; 553 554 prevLockCount = mon->lockCount; 555 mon->lockCount = 0; 556 mon->waiting++; 557 mon->owner = NULL; 558 559 /* 560 * Update thread status. If the GC wakes up, it'll ignore us, knowing 561 * that we won't touch any references in this state, and we'll check 562 * our suspend mode before we transition out. 563 */ 564 if (timed) 565 dvmChangeStatus(self, THREAD_TIMED_WAIT); 566 else 567 dvmChangeStatus(self, THREAD_WAIT); 568 569 /* 570 * Tell the thread which monitor we're waiting on. This is necessary 571 * so that Thread.interrupt() can wake us up. Thread.interrupt needs 572 * to gain ownership of the monitor mutex before it can signal us, so 573 * we're still not worried about race conditions. 574 */ 575 self->waitMonitor = mon; 576 577 /* 578 * Handle the case where the thread was interrupted before we called 579 * wait(). 580 */ 581 if (self->interrupted) { 582 wasInterrupted = true; 583 goto done; 584 } 585 586 LOGVV("threadid=%d: waiting on %p\n", self->threadId, mon); 587 588 while (true) { 589 if (!timed) { 590 cc = pthread_cond_wait(&mon->cond, &mon->lock); 591 assert(cc == 0); 592 } else { 593#ifdef HAVE_TIMEDWAIT_MONOTONIC 594 cc = pthread_cond_timedwait_monotonic(&mon->cond, &mon->lock, &ts); 595#else 596 cc = pthread_cond_timedwait(&mon->cond, &mon->lock, &ts); 597#endif 598 if (cc == ETIMEDOUT) { 599 LOGVV("threadid=%d wakeup: timeout\n", self->threadId); 600 break; 601 } 602 } 603 604 /* 605 * We woke up because of an interrupt (which does a broadcast) or 606 * a notification (which might be a signal or a broadcast). Figure 607 * out what we need to do. 608 */ 609 if (self->interruptingWait) { 610 /* 611 * The other thread successfully gained the monitor lock, and 612 * has confirmed that we were waiting on it. If this is an 613 * interruptible wait, we bail out immediately. If not, we 614 * continue on. 615 */ 616 self->interruptingWait = false; 617 mon->interrupting--; 618 assert(self->interrupted); 619 if (interruptShouldThrow) { 620 wasInterrupted = true; 621 LOGD("threadid=%d wakeup: interrupted\n", self->threadId); 622 break; 623 } else { 624 LOGD("threadid=%d wakeup: not interruptible\n", self->threadId); 625 } 626 } 627 if (mon->notifying) { 628 /* 629 * One or more threads are being notified. Remove ourselves 630 * from the set. 631 */ 632 mon->notifying--; 633 LOGVV("threadid=%d wakeup: notified\n", self->threadId); 634 break; 635 } else { 636 /* 637 * Looks like we were woken unnecessarily, probably as a 638 * result of another thread being interrupted. Go back to 639 * sleep. 640 */ 641 LOGVV("threadid=%d wakeup: going back to sleep\n", self->threadId); 642 } 643 } 644 645done: 646 //if (wasInterrupted) { 647 // LOGW("threadid=%d: throwing InterruptedException:\n", self->threadId); 648 // dvmDumpThread(self, false); 649 //} 650 651 /* 652 * Put everything back. Again, we hold the pthread mutex, so the order 653 * here isn't significant. 654 */ 655 self->waitMonitor = NULL; 656 mon->owner = self; 657 mon->waiting--; 658 mon->lockCount = prevLockCount; 659 660 /* set self->status back to THREAD_RUNNING, and self-suspend if needed */ 661 dvmChangeStatus(self, THREAD_RUNNING); 662 663 if (wasInterrupted) { 664 /* 665 * We were interrupted while waiting, or somebody interrupted an 666 * un-interruptable thread earlier and we're bailing out immediately. 667 * 668 * The doc sayeth: "The interrupted status of the current thread is 669 * cleared when this exception is thrown." 670 */ 671 self->interrupted = false; 672 if (interruptShouldThrow) 673 dvmThrowException("Ljava/lang/InterruptedException;", NULL); 674 } 675} 676 677/* 678 * Notify one thread waiting on this monitor. 679 */ 680static void notifyMonitor(Thread* self, Monitor* mon) 681{ 682 assert(self != NULL); 683 assert(mon != NULL); 684 685 /* Make sure that we hold the lock. */ 686 if (mon->owner != self) { 687 dvmThrowException("Ljava/lang/IllegalMonitorStateException;", 688 "object not locked by thread before notify()"); 689 return; 690 } 691 692 /* 693 * Check to see if anybody is there to notify. We subtract off 694 * threads that are being interrupted and anything that has 695 * potentially already been notified. 696 */ 697 if (mon->notifying + mon->interrupting < mon->waiting) { 698 /* wake up one thread */ 699 int cc; 700 701 LOGVV("threadid=%d: signaling on %p\n", self->threadId, mon); 702 703 mon->notifying++; 704 cc = pthread_cond_signal(&mon->cond); 705 assert(cc == 0); 706 } else { 707 LOGVV("threadid=%d: nobody to signal on %p\n", self->threadId, mon); 708 } 709} 710 711/* 712 * Notify all threads waiting on this monitor. 713 * 714 * We keep a count of how many threads we notified, so that our various 715 * counts remain accurate. 716 */ 717static void notifyAllMonitor(Thread* self, Monitor* mon) 718{ 719 assert(self != NULL); 720 assert(mon != NULL); 721 722 /* Make sure that we hold the lock. */ 723 if (mon->owner != self) { 724 dvmThrowException("Ljava/lang/IllegalMonitorStateException;", 725 "object not locked by thread before notifyAll()"); 726 return; 727 } 728 729 mon->notifying = mon->waiting - mon->interrupting; 730 if (mon->notifying > 0) { 731 int cc; 732 733 LOGVV("threadid=%d: broadcasting to %d threads on %p\n", 734 self->threadId, mon->notifying, mon); 735 736 cc = pthread_cond_broadcast(&mon->cond); 737 assert(cc == 0); 738 } else { 739 LOGVV("threadid=%d: nobody to broadcast to on %p\n", self->threadId,mon); 740 } 741} 742 743/* 744 * Implements monitorenter for "synchronized" stuff. 745 * 746 * This does not fail or throw an exception (unless deadlock prediction 747 * is enabled and set to "err" mode). 748 */ 749void dvmLockObject(Thread* self, Object *obj) 750{ 751 volatile u4 *thinp = &obj->lock.thin; 752 u4 thin; 753 u4 threadId = self->threadId; 754 Monitor *mon; 755 756 /* First, try to grab the lock as if it's thin; 757 * this is the common case and will usually succeed. 758 */ 759 thin = threadId << LW_LOCK_OWNER_SHIFT; 760 thin |= *thinp & (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT); 761 if (!ATOMIC_CMP_SWAP((int32_t *)thinp, 762 (int32_t)DVM_LOCK_INITIAL_THIN_VALUE, 763 (int32_t)thin)) { 764 /* The lock is either a thin lock held by someone (possibly 'self'), 765 * or a fat lock. 766 */ 767 if (LW_LOCK_OWNER(*thinp) == threadId) { 768 /* 'self' is already holding the thin lock; we can just 769 * bump the count. Atomic operations are not necessary 770 * because only the thread holding the lock is allowed 771 * to modify the Lock field. 772 */ 773 *thinp += 1 << LW_LOCK_COUNT_SHIFT; 774 } else { 775 /* If this is a thin lock we need to spin on it, if it's fat 776 * we need to acquire the monitor. 777 */ 778 if (LW_SHAPE(*thinp) == LW_SHAPE_THIN) { 779 ThreadStatus oldStatus; 780 static const unsigned long maxSleepDelay = 1 * 1024 * 1024; 781 unsigned long sleepDelay; 782 783 LOG_THIN("(%d) spin on lock 0x%08x: 0x%08x (0x%08x) 0x%08x\n", 784 threadId, (uint)&obj->lock, 785 DVM_LOCK_INITIAL_THIN_VALUE, *thinp, thin); 786 787 /* The lock is still thin, but some other thread is 788 * holding it. Let the VM know that we're about 789 * to wait on another thread. 790 */ 791 oldStatus = dvmChangeStatus(self, THREAD_MONITOR); 792 793 /* Spin until the other thread lets go. 794 */ 795 sleepDelay = 0; 796 do { 797 /* In addition to looking for an unlock, 798 * we need to watch out for some other thread 799 * fattening the lock behind our back. 800 */ 801 while (LW_LOCK_OWNER(*thinp) != 0) { 802 if (LW_SHAPE(*thinp) == LW_SHAPE_FAT) { 803 /* The lock has been fattened already. 804 */ 805 LOG_THIN("(%d) lock 0x%08x surprise-fattened\n", 806 threadId, (uint)&obj->lock); 807 dvmChangeStatus(self, oldStatus); 808 goto fat_lock; 809 } 810 811 if (sleepDelay == 0) { 812 sched_yield(); 813 sleepDelay = 1 * 1000; 814 } else { 815 usleep(sleepDelay); 816 if (sleepDelay < maxSleepDelay / 2) { 817 sleepDelay *= 2; 818 } 819 } 820 } 821 thin = threadId << LW_LOCK_OWNER_SHIFT; 822 thin |= *thinp & (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT); 823 } while (!ATOMIC_CMP_SWAP((int32_t *)thinp, 824 (int32_t)DVM_LOCK_INITIAL_THIN_VALUE, 825 (int32_t)thin)); 826 LOG_THIN("(%d) spin on lock done 0x%08x: " 827 "0x%08x (0x%08x) 0x%08x\n", 828 threadId, (uint)&obj->lock, 829 DVM_LOCK_INITIAL_THIN_VALUE, *thinp, thin); 830 831 /* We've got the thin lock; let the VM know that we're 832 * done waiting. 833 */ 834 dvmChangeStatus(self, oldStatus); 835 836 /* Fatten the lock. Note this relinquishes ownership. 837 * We could also create the monitor in an "owned" state 838 * to avoid "re-locking" it in fat_lock. 839 */ 840 mon = dvmCreateMonitor(obj); 841 obj->lock.thin = (u4)mon | LW_SHAPE_FAT; 842 LOG_THIN("(%d) lock 0x%08x fattened\n", 843 threadId, (uint)&obj->lock); 844 845 /* Fall through to acquire the newly fat lock. 846 */ 847 } 848 849 /* The lock is already fat, which means 850 * that obj->lock.mon is a regular (Monitor *). 851 */ 852 fat_lock: 853 assert(LW_MONITOR(obj->lock.thin) != NULL); 854 lockMonitor(self, LW_MONITOR(obj->lock.thin)); 855 } 856 } 857 // else, the lock was acquired with the ATOMIC_CMP_SWAP(). 858 859#ifdef WITH_DEADLOCK_PREDICTION 860 /* 861 * See if we were allowed to grab the lock at this time. We do it 862 * *after* acquiring the lock, rather than before, so that we can 863 * freely update the Monitor struct. This seems counter-intuitive, 864 * but our goal is deadlock *prediction* not deadlock *prevention*. 865 * (If we actually deadlock, the situation is easy to diagnose from 866 * a thread dump, so there's no point making a special effort to do 867 * the checks before the lock is held.) 868 * 869 * This needs to happen before we add the object to the thread's 870 * monitor list, so we can tell the difference between first-lock and 871 * re-lock. 872 * 873 * It's also important that we do this while in THREAD_RUNNING, so 874 * that we don't interfere with cleanup operations in the GC. 875 */ 876 if (gDvm.deadlockPredictMode != kDPOff) { 877 if (self->status != THREAD_RUNNING) { 878 LOGE("Bad thread status (%d) in DP\n", self->status); 879 dvmDumpThread(self, false); 880 dvmAbort(); 881 } 882 assert(!dvmCheckException(self)); 883 updateDeadlockPrediction(self, obj); 884 if (dvmCheckException(self)) { 885 /* 886 * If we're throwing an exception here, we need to free the 887 * lock. We add the object to the thread's monitor list so the 888 * "unlock" code can remove it. 889 */ 890 dvmAddToMonitorList(self, obj, false); 891 dvmUnlockObject(self, obj); 892 LOGV("--- unlocked, pending is '%s'\n", 893 dvmGetException(self)->clazz->descriptor); 894 } 895 } 896 897 /* 898 * Add the locked object, and the current stack trace, to the list 899 * held by the Thread object. If deadlock prediction isn't on, 900 * don't capture the stack trace. 901 */ 902 dvmAddToMonitorList(self, obj, gDvm.deadlockPredictMode != kDPOff); 903#elif defined(WITH_MONITOR_TRACKING) 904 /* 905 * Add the locked object to the list held by the Thread object. 906 */ 907 dvmAddToMonitorList(self, obj, false); 908#endif 909} 910 911/* 912 * Implements monitorexit for "synchronized" stuff. 913 * 914 * On failure, throws an exception and returns "false". 915 */ 916bool dvmUnlockObject(Thread* self, Object *obj) 917{ 918 volatile u4 *thinp = &obj->lock.thin; 919 u4 threadId = self->threadId; 920 u4 thin; 921 922 /* Check the common case, where 'self' has locked 'obj' once, first. 923 */ 924 thin = *thinp; 925 if (LW_LOCK_OWNER(thin) == threadId && LW_LOCK_COUNT(thin) == 0) { 926 /* Unlock 'obj' by clearing our threadId from 'thin'. 927 * The lock protects the lock field itself, so it's 928 * safe to update non-atomically. 929 */ 930 *thinp &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT); 931 } else if (LW_SHAPE(*thinp) == LW_SHAPE_THIN) { 932 /* If the object is locked, it had better be locked by us. 933 */ 934 if (LW_LOCK_OWNER(*thinp) != threadId) { 935 /* The JNI spec says that we should throw an exception 936 * in this case. 937 */ 938 //LOGW("Unlock thin %p: id %d vs %d\n", 939 // obj, (*thinp & 0xfff), threadId); 940 dvmThrowException("Ljava/lang/IllegalMonitorStateException;", 941 "unlock of unowned monitor"); 942 return false; 943 } 944 945 /* It's a thin lock, but 'self' has locked 'obj' 946 * more than once. Decrement the count. 947 */ 948 *thinp -= 1 << LW_LOCK_COUNT_SHIFT; 949 } else { 950 /* It's a fat lock. 951 */ 952 assert(LW_MONITOR(obj->lock.thin) != NULL); 953 if (!unlockMonitor(self, LW_MONITOR(obj->lock.thin))) { 954 /* exception has been raised */ 955 return false; 956 } 957 } 958 959#ifdef WITH_MONITOR_TRACKING 960 /* 961 * Remove the object from the Thread's list. 962 */ 963 dvmRemoveFromMonitorList(self, obj); 964#endif 965 966 return true; 967} 968 969/* 970 * Object.wait(). Also called for class init. 971 */ 972void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec, 973 bool interruptShouldThrow) 974{ 975 Monitor* mon = LW_MONITOR(obj->lock.thin); 976 u4 thin = obj->lock.thin; 977 978 /* If the lock is still thin, we need to fatten it. 979 */ 980 if (LW_SHAPE(thin) == LW_SHAPE_THIN) { 981 /* Make sure that 'self' holds the lock. 982 */ 983 if (LW_LOCK_OWNER(thin) != self->threadId) { 984 dvmThrowException("Ljava/lang/IllegalMonitorStateException;", 985 "object not locked by thread before wait()"); 986 return; 987 } 988 989 /* This thread holds the lock. We need to fatten the lock 990 * so 'self' can block on it. Don't update the object lock 991 * field yet, because 'self' needs to acquire the lock before 992 * any other thread gets a chance. 993 */ 994 mon = dvmCreateMonitor(obj); 995 996 /* 'self' has actually locked the object one or more times; 997 * make sure that the monitor reflects this. 998 */ 999 lockMonitor(self, mon); 1000 mon->lockCount = LW_LOCK_COUNT(thin); 1001 LOG_THIN("(%d) lock 0x%08x fattened by wait() to count %d\n", 1002 self->threadId, (uint)&obj->lock, mon->lockCount); 1003 1004 1005 /* Make the monitor public now that it's in the right state. 1006 */ 1007 MEM_BARRIER(); 1008 obj->lock.thin = (u4)mon | LW_SHAPE_FAT; 1009 } 1010 1011 waitMonitor(self, mon, msec, nsec, interruptShouldThrow); 1012} 1013 1014/* 1015 * Object.notify(). 1016 */ 1017void dvmObjectNotify(Thread* self, Object *obj) 1018{ 1019 u4 thin = obj->lock.thin; 1020 1021 /* If the lock is still thin, there aren't any waiters; 1022 * waiting on an object forces lock fattening. 1023 */ 1024 if (LW_SHAPE(thin) == LW_SHAPE_THIN) { 1025 /* Make sure that 'self' holds the lock. 1026 */ 1027 if (LW_LOCK_OWNER(thin) != self->threadId) { 1028 dvmThrowException("Ljava/lang/IllegalMonitorStateException;", 1029 "object not locked by thread before notify()"); 1030 return; 1031 } 1032 1033 /* no-op; there are no waiters to notify. 1034 */ 1035 } else { 1036 /* It's a fat lock. 1037 */ 1038 notifyMonitor(self, LW_MONITOR(thin)); 1039 } 1040} 1041 1042/* 1043 * Object.notifyAll(). 1044 */ 1045void dvmObjectNotifyAll(Thread* self, Object *obj) 1046{ 1047 u4 thin = obj->lock.thin; 1048 1049 /* If the lock is still thin, there aren't any waiters; 1050 * waiting on an object forces lock fattening. 1051 */ 1052 if (LW_SHAPE(thin) == LW_SHAPE_THIN) { 1053 /* Make sure that 'self' holds the lock. 1054 */ 1055 if (LW_LOCK_OWNER(thin) != self->threadId) { 1056 dvmThrowException("Ljava/lang/IllegalMonitorStateException;", 1057 "object not locked by thread before notifyAll()"); 1058 return; 1059 } 1060 1061 /* no-op; there are no waiters to notify. 1062 */ 1063 } else { 1064 /* It's a fat lock. 1065 */ 1066 notifyAllMonitor(self, LW_MONITOR(thin)); 1067 } 1068} 1069 1070/* 1071 * This implements java.lang.Thread.sleep(long msec, int nsec). 1072 * 1073 * The sleep is interruptible by other threads, which means we can't just 1074 * plop into an OS sleep call. (We probably could if we wanted to send 1075 * signals around and rely on EINTR, but that's inefficient and relies 1076 * on native code respecting our signal mask.) 1077 * 1078 * We have to do all of this stuff for Object.wait() as well, so it's 1079 * easiest to just sleep on a private Monitor. 1080 * 1081 * It appears that we want sleep(0,0) to go through the motions of sleeping 1082 * for a very short duration, rather than just returning. 1083 */ 1084void dvmThreadSleep(u8 msec, u4 nsec) 1085{ 1086 Thread* self = dvmThreadSelf(); 1087 Monitor* mon = gDvm.threadSleepMon; 1088 1089 /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */ 1090 if (msec == 0 && nsec == 0) 1091 nsec++; 1092 1093 lockMonitor(self, mon); 1094 waitMonitor(self, mon, msec, nsec, true); 1095 unlockMonitor(self, mon); 1096} 1097 1098/* 1099 * Implement java.lang.Thread.interrupt(). 1100 * 1101 * We need to increment the monitor's "interrupting" count, and set the 1102 * interrupted status for the thread in question. Doing so requires 1103 * gaining the monitor's lock, which may not happen in a timely fashion. 1104 * We are left with a decision between failing to interrupt the thread 1105 * and stalling the interrupting thread. 1106 * 1107 * We must take some care to ensure that we don't try to interrupt the same 1108 * thread on the same mutex twice. Doing so would leave us with an 1109 * incorrect value for Monitor.interrupting. 1110 */ 1111void dvmThreadInterrupt(volatile Thread* thread) 1112{ 1113 Monitor* mon; 1114 1115 /* 1116 * Raise the "interrupted" flag. This will cause it to bail early out 1117 * of the next wait() attempt, if it's not currently waiting on 1118 * something. 1119 */ 1120 thread->interrupted = true; 1121 MEM_BARRIER(); 1122 1123 /* 1124 * Is the thread waiting? 1125 * 1126 * Note that fat vs. thin doesn't matter here; waitMonitor 1127 * is only set when a thread actually waits on a monitor, 1128 * which implies that the monitor has already been fattened. 1129 */ 1130 mon = thread->waitMonitor; 1131 if (mon == NULL) 1132 return; 1133 1134 /* 1135 * Try to acquire the monitor, if we don't already own it. We need 1136 * to hold the same mutex as the thread in order to signal the 1137 * condition it's waiting on. When the thread goes to sleep it will 1138 * release the monitor's mutex, allowing us to signal it. 1139 * 1140 * TODO: we may be able to get rid of the explicit lock by coordinating 1141 * this more closely with waitMonitor. 1142 */ 1143 Thread* self = dvmThreadSelf(); 1144 if (!tryLockMonitor(self, mon)) { 1145 /* 1146 * Failed to get the monitor the thread is waiting on; most likely 1147 * the other thread is in the middle of doing something. 1148 */ 1149 const int kSpinSleepTime = 500*1000; /* 0.5s */ 1150 u8 startWhen = dvmGetRelativeTimeUsec(); 1151 int sleepIter = 0; 1152 1153 while (dvmIterativeSleep(sleepIter++, kSpinSleepTime, startWhen)) { 1154 /* 1155 * Still time left on the clock, try to grab it again. 1156 */ 1157 if (tryLockMonitor(self, mon)) 1158 goto gotit; 1159 1160 /* 1161 * If the target thread is no longer waiting on the same monitor, 1162 * the "interrupted" flag we set earlier will have caused the 1163 * interrupt when the thread woke up, so we can stop now. 1164 */ 1165 if (thread->waitMonitor != mon) 1166 return; 1167 } 1168 1169 /* 1170 * We have to give up or risk deadlock. 1171 */ 1172 LOGW("threadid=%d: unable to interrupt threadid=%d\n", 1173 self->threadId, thread->threadId); 1174 return; 1175 } 1176 1177gotit: 1178 /* 1179 * We've got the monitor lock, which means nobody can be added or 1180 * removed from the wait list. This also means that the Thread's 1181 * waitMonitor/interruptingWait fields can't be modified by anyone 1182 * else. 1183 * 1184 * If things look good, raise flags and wake the threads sleeping 1185 * on the monitor's condition variable. 1186 */ 1187 if (thread->waitMonitor == mon && // still on same monitor? 1188 thread->interrupted && // interrupt still pending? 1189 !thread->interruptingWait) // nobody else is interrupting too? 1190 { 1191 int cc; 1192 1193 LOGVV("threadid=%d: interrupting threadid=%d waiting on %p\n", 1194 self->threadId, thread->threadId, mon); 1195 1196 thread->interruptingWait = true; // prevent re-interrupt... 1197 mon->interrupting++; // ...so we only do this once 1198 cc = pthread_cond_broadcast(&mon->cond); 1199 assert(cc == 0); 1200 } 1201 1202 unlockMonitor(self, mon); 1203} 1204 1205u4 dvmIdentityHashCode(Object *obj) 1206{ 1207 return (u4)obj; 1208} 1209 1210#ifdef WITH_DEADLOCK_PREDICTION 1211/* 1212 * =========================================================================== 1213 * Deadlock prediction 1214 * =========================================================================== 1215 */ 1216/* 1217The idea is to predict the possibility of deadlock by recording the order 1218in which monitors are acquired. If we see an attempt to acquire a lock 1219out of order, we can identify the locks and offending code. 1220 1221To make this work, we need to keep track of the locks held by each thread, 1222and create history trees for each lock. When a thread tries to acquire 1223a new lock, we walk through the "history children" of the lock, looking 1224for a match with locks the thread already holds. If we find a match, 1225it means the thread has made a request that could result in a deadlock. 1226 1227To support recursive locks, we always allow re-locking a currently-held 1228lock, and maintain a recursion depth count. 1229 1230An ASCII-art example, where letters represent Objects: 1231 1232 A 1233 /|\ 1234 / | \ 1235 B | D 1236 \ | 1237 \| 1238 C 1239 1240The above is the tree we'd have after handling Object synchronization 1241sequences "ABC", "AC", "AD". A has three children, {B, C, D}. C is also 1242a child of B. (The lines represent pointers between parent and child. 1243Every node can have multiple parents and multiple children.) 1244 1245If we hold AC, and want to lock B, we recursively search through B's 1246children to see if A or C appears. It does, so we reject the attempt. 1247(A straightforward way to implement it: add a link from C to B, then 1248determine whether the graph starting at B contains a cycle.) 1249 1250If we hold AC and want to lock D, we would succeed, creating a new link 1251from C to D. 1252 1253The lock history and a stack trace is attached to the Object's Monitor 1254struct, which means we need to fatten every Object we lock (thin locking 1255is effectively disabled). If we don't need the stack trace we can 1256avoid fattening the leaf nodes, only fattening objects that need to hold 1257history trees. 1258 1259Updates to Monitor structs are only allowed for the thread that holds 1260the Monitor, so we actually do most of our deadlock prediction work after 1261the lock has been acquired. 1262 1263When an object with a monitor is GCed, we need to remove it from the 1264history trees. There are two basic approaches: 1265 (1) For through the entire set of known monitors, search all child 1266 lists for the object in question. This is rather slow, resulting 1267 in GC passes that take upwards of 10 seconds to complete. 1268 (2) Maintain "parent" pointers in each node. Remove the entries as 1269 required. This requires additional storage and maintenance for 1270 every operation, but is significantly faster at GC time. 1271For each GCed object, we merge all of the object's children into each of 1272the object's parents. 1273*/ 1274 1275#if !defined(WITH_MONITOR_TRACKING) 1276# error "WITH_DEADLOCK_PREDICTION requires WITH_MONITOR_TRACKING" 1277#endif 1278 1279/* 1280 * Clear out the contents of an ExpandingObjectList, freeing any 1281 * dynamic allocations. 1282 */ 1283static void expandObjClear(ExpandingObjectList* pList) 1284{ 1285 if (pList->list != NULL) { 1286 free(pList->list); 1287 pList->list = NULL; 1288 } 1289 pList->alloc = pList->count = 0; 1290} 1291 1292/* 1293 * Get the number of objects currently stored in the list. 1294 */ 1295static inline int expandBufGetCount(const ExpandingObjectList* pList) 1296{ 1297 return pList->count; 1298} 1299 1300/* 1301 * Get the Nth entry from the list. 1302 */ 1303static inline Object* expandBufGetEntry(const ExpandingObjectList* pList, 1304 int i) 1305{ 1306 return pList->list[i]; 1307} 1308 1309/* 1310 * Add a new entry to the list. 1311 * 1312 * We don't check for or try to enforce uniqueness. It's expected that 1313 * the higher-level code does this for us. 1314 */ 1315static void expandObjAddEntry(ExpandingObjectList* pList, Object* obj) 1316{ 1317 if (pList->count == pList->alloc) { 1318 /* time to expand */ 1319 Object** newList; 1320 1321 if (pList->alloc == 0) 1322 pList->alloc = 4; 1323 else 1324 pList->alloc *= 2; 1325 LOGVV("expanding %p to %d\n", pList, pList->alloc); 1326 newList = realloc(pList->list, pList->alloc * sizeof(Object*)); 1327 if (newList == NULL) { 1328 LOGE("Failed expanding DP object list (alloc=%d)\n", pList->alloc); 1329 dvmAbort(); 1330 } 1331 pList->list = newList; 1332 } 1333 1334 pList->list[pList->count++] = obj; 1335} 1336 1337/* 1338 * Returns "true" if the element was successfully removed. 1339 */ 1340static bool expandObjRemoveEntry(ExpandingObjectList* pList, Object* obj) 1341{ 1342 int i; 1343 1344 for (i = pList->count-1; i >= 0; i--) { 1345 if (pList->list[i] == obj) 1346 break; 1347 } 1348 if (i < 0) 1349 return false; 1350 1351 if (i != pList->count-1) { 1352 /* 1353 * The order of elements is not important, so we just copy the 1354 * last entry into the new slot. 1355 */ 1356 //memmove(&pList->list[i], &pList->list[i+1], 1357 // (pList->count-1 - i) * sizeof(pList->list[0])); 1358 pList->list[i] = pList->list[pList->count-1]; 1359 } 1360 1361 pList->count--; 1362 pList->list[pList->count] = (Object*) 0xdecadead; 1363 return true; 1364} 1365 1366/* 1367 * Returns "true" if "obj" appears in the list. 1368 */ 1369static bool expandObjHas(const ExpandingObjectList* pList, Object* obj) 1370{ 1371 int i; 1372 1373 for (i = 0; i < pList->count; i++) { 1374 if (pList->list[i] == obj) 1375 return true; 1376 } 1377 return false; 1378} 1379 1380/* 1381 * Print the list contents to stdout. For debugging. 1382 */ 1383static void expandObjDump(const ExpandingObjectList* pList) 1384{ 1385 int i; 1386 for (i = 0; i < pList->count; i++) 1387 printf(" %p", pList->list[i]); 1388} 1389 1390/* 1391 * Check for duplicate entries. Returns the index of the first instance 1392 * of the duplicated value, or -1 if no duplicates were found. 1393 */ 1394static int expandObjCheckForDuplicates(const ExpandingObjectList* pList) 1395{ 1396 int i, j; 1397 for (i = 0; i < pList->count-1; i++) { 1398 for (j = i + 1; j < pList->count; j++) { 1399 if (pList->list[i] == pList->list[j]) { 1400 return i; 1401 } 1402 } 1403 } 1404 1405 return -1; 1406} 1407 1408 1409/* 1410 * Determine whether "child" appears in the list of objects associated 1411 * with the Monitor in "parent". If "parent" is a thin lock, we return 1412 * false immediately. 1413 */ 1414static bool objectInChildList(const Object* parent, Object* child) 1415{ 1416 Lock lock = parent->lock; 1417 if (!IS_LOCK_FAT(&lock)) { 1418 //LOGI("on thin\n"); 1419 return false; 1420 } 1421 1422 return expandObjHas(&lock.mon->historyChildren, child); 1423} 1424 1425/* 1426 * Print the child list. 1427 */ 1428static void dumpKids(Object* parent) 1429{ 1430 Monitor* mon = parent->lock.mon; 1431 1432 printf("Children of %p:", parent); 1433 expandObjDump(&mon->historyChildren); 1434 printf("\n"); 1435} 1436 1437/* 1438 * Add "child" to the list of children in "parent", and add "parent" to 1439 * the list of parents in "child". 1440 */ 1441static void linkParentToChild(Object* parent, Object* child) 1442{ 1443 //assert(parent->lock.mon->owner == dvmThreadSelf()); // !owned for merge 1444 assert(IS_LOCK_FAT(&parent->lock)); 1445 assert(IS_LOCK_FAT(&child->lock)); 1446 assert(parent != child); 1447 Monitor* mon; 1448 1449 mon = parent->lock.mon; 1450 assert(!expandObjHas(&mon->historyChildren, child)); 1451 expandObjAddEntry(&mon->historyChildren, child); 1452 1453 mon = child->lock.mon; 1454 assert(!expandObjHas(&mon->historyParents, parent)); 1455 expandObjAddEntry(&mon->historyParents, parent); 1456} 1457 1458 1459/* 1460 * Remove "child" from the list of children in "parent". 1461 */ 1462static void unlinkParentFromChild(Object* parent, Object* child) 1463{ 1464 //assert(parent->lock.mon->owner == dvmThreadSelf()); // !owned for GC 1465 assert(IS_LOCK_FAT(&parent->lock)); 1466 assert(IS_LOCK_FAT(&child->lock)); 1467 assert(parent != child); 1468 Monitor* mon; 1469 1470 mon = parent->lock.mon; 1471 if (!expandObjRemoveEntry(&mon->historyChildren, child)) { 1472 LOGW("WARNING: child %p not found in parent %p\n", child, parent); 1473 } 1474 assert(!expandObjHas(&mon->historyChildren, child)); 1475 assert(expandObjCheckForDuplicates(&mon->historyChildren) < 0); 1476 1477 mon = child->lock.mon; 1478 if (!expandObjRemoveEntry(&mon->historyParents, parent)) { 1479 LOGW("WARNING: parent %p not found in child %p\n", parent, child); 1480 } 1481 assert(!expandObjHas(&mon->historyParents, parent)); 1482 assert(expandObjCheckForDuplicates(&mon->historyParents) < 0); 1483} 1484 1485 1486/* 1487 * Log the monitors held by the current thread. This is done as part of 1488 * flagging an error. 1489 */ 1490static void logHeldMonitors(Thread* self) 1491{ 1492 char* name = NULL; 1493 1494 name = dvmGetThreadName(self); 1495 LOGW("Monitors currently held by thread (threadid=%d '%s')\n", 1496 self->threadId, name); 1497 LOGW("(most-recently-acquired on top):\n"); 1498 free(name); 1499 1500 LockedObjectData* lod = self->pLockedObjects; 1501 while (lod != NULL) { 1502 LOGW("--- object %p[%d] (%s)\n", 1503 lod->obj, lod->recursionCount, lod->obj->clazz->descriptor); 1504 dvmLogRawStackTrace(lod->rawStackTrace, lod->stackDepth); 1505 1506 lod = lod->next; 1507 } 1508} 1509 1510/* 1511 * Recursively traverse the object hierarchy starting at "obj". We mark 1512 * ourselves on entry and clear the mark on exit. If we ever encounter 1513 * a marked object, we have a cycle. 1514 * 1515 * Returns "true" if all is well, "false" if we found a cycle. 1516 */ 1517static bool traverseTree(Thread* self, const Object* obj) 1518{ 1519 assert(IS_LOCK_FAT(&obj->lock)); 1520 Monitor* mon = obj->lock.mon; 1521 1522 /* 1523 * Have we been here before? 1524 */ 1525 if (mon->historyMark) { 1526 int* rawStackTrace; 1527 int stackDepth; 1528 1529 LOGW("%s\n", kStartBanner); 1530 LOGW("Illegal lock attempt:\n"); 1531 LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor); 1532 1533 rawStackTrace = dvmFillInStackTraceRaw(self, &stackDepth); 1534 dvmLogRawStackTrace(rawStackTrace, stackDepth); 1535 free(rawStackTrace); 1536 1537 LOGW(" "); 1538 logHeldMonitors(self); 1539 1540 LOGW(" "); 1541 LOGW("Earlier, the following lock order (from last to first) was\n"); 1542 LOGW("established -- stack trace is from first successful lock):\n"); 1543 return false; 1544 } 1545 mon->historyMark = true; 1546 1547 /* 1548 * Examine the children. We do NOT hold these locks, so they might 1549 * very well transition from thin to fat or change ownership while 1550 * we work. 1551 * 1552 * NOTE: we rely on the fact that they cannot revert from fat to thin 1553 * while we work. This is currently a safe assumption. 1554 * 1555 * We can safely ignore thin-locked children, because by definition 1556 * they have no history and are leaf nodes. In the current 1557 * implementation we always fatten the locks to provide a place to 1558 * hang the stack trace. 1559 */ 1560 ExpandingObjectList* pList = &mon->historyChildren; 1561 int i; 1562 for (i = expandBufGetCount(pList)-1; i >= 0; i--) { 1563 const Object* child = expandBufGetEntry(pList, i); 1564 Lock lock = child->lock; 1565 if (!IS_LOCK_FAT(&lock)) 1566 continue; 1567 if (!traverseTree(self, child)) { 1568 LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor); 1569 dvmLogRawStackTrace(mon->historyRawStackTrace, 1570 mon->historyStackDepth); 1571 mon->historyMark = false; 1572 return false; 1573 } 1574 } 1575 1576 mon->historyMark = false; 1577 1578 return true; 1579} 1580 1581/* 1582 * Update the deadlock prediction tree, based on the current thread 1583 * acquiring "acqObj". This must be called before the object is added to 1584 * the thread's list of held monitors. 1585 * 1586 * If the thread already holds the lock (recursion), or this is a known 1587 * lock configuration, we return without doing anything. Otherwise, we add 1588 * a link from the most-recently-acquired lock in this thread to "acqObj" 1589 * after ensuring that the parent lock is "fat". 1590 * 1591 * This MUST NOT be called while a GC is in progress in another thread, 1592 * because we assume exclusive access to history trees in owned monitors. 1593 */ 1594static void updateDeadlockPrediction(Thread* self, Object* acqObj) 1595{ 1596 LockedObjectData* lod; 1597 LockedObjectData* mrl; 1598 1599 /* 1600 * Quick check for recursive access. 1601 */ 1602 lod = dvmFindInMonitorList(self, acqObj); 1603 if (lod != NULL) { 1604 LOGV("+++ DP: recursive %p\n", acqObj); 1605 return; 1606 } 1607 1608 /* 1609 * Make the newly-acquired object's monitor "fat". In some ways this 1610 * isn't strictly necessary, but we need the GC to tell us when 1611 * "interesting" objects go away, and right now the only way to make 1612 * an object look interesting is to give it a monitor. 1613 * 1614 * This also gives us a place to hang a stack trace. 1615 * 1616 * Our thread holds the lock, so we're allowed to rewrite the lock 1617 * without worrying that something will change out from under us. 1618 */ 1619 if (!IS_LOCK_FAT(&acqObj->lock)) { 1620 LOGVV("fattening lockee %p (recur=%d)\n", 1621 acqObj, LW_LOCK_COUNT(acqObj->lock.thin)); 1622 Monitor* newMon = dvmCreateMonitor(acqObj); 1623 lockMonitor(self, newMon); // can't stall, don't need VMWAIT 1624 newMon->lockCount += LW_LOCK_COUNT(acqObj->lock.thin); 1625 acqObj->lock.mon = newMon; 1626 } 1627 1628 /* if we don't have a stack trace for this monitor, establish one */ 1629 if (acqObj->lock.mon->historyRawStackTrace == NULL) { 1630 Monitor* mon = acqObj->lock.mon; 1631 mon->historyRawStackTrace = dvmFillInStackTraceRaw(self, 1632 &mon->historyStackDepth); 1633 } 1634 1635 /* 1636 * We need to examine and perhaps modify the most-recently-locked 1637 * monitor. We own that, so there's no risk of another thread 1638 * stepping on us. 1639 * 1640 * Retrieve the most-recently-locked entry from our thread. 1641 */ 1642 mrl = self->pLockedObjects; 1643 if (mrl == NULL) 1644 return; /* no other locks held */ 1645 1646 /* 1647 * Do a quick check to see if "acqObj" is a direct descendant. We can do 1648 * this without holding the global lock because of our assertion that 1649 * a GC is not running in parallel -- nobody except the GC can 1650 * modify a history list in a Monitor they don't own, and we own "mrl". 1651 * (There might be concurrent *reads*, but no concurrent *writes.) 1652 * 1653 * If we find it, this is a known good configuration, and we're done. 1654 */ 1655 if (objectInChildList(mrl->obj, acqObj)) 1656 return; 1657 1658 /* 1659 * "mrl" is going to need to have a history tree. If it's currently 1660 * a thin lock, we make it fat now. The thin lock might have a 1661 * nonzero recursive lock count, which we need to carry over. 1662 * 1663 * Our thread holds the lock, so we're allowed to rewrite the lock 1664 * without worrying that something will change out from under us. 1665 */ 1666 if (!IS_LOCK_FAT(&mrl->obj->lock)) { 1667 LOGVV("fattening parent %p f/b/o child %p (recur=%d)\n", 1668 mrl->obj, acqObj, LW_LOCK_COUNT(mrl->obj->lock.thin)); 1669 Monitor* newMon = dvmCreateMonitor(mrl->obj); 1670 lockMonitor(self, newMon); // can't stall, don't need VMWAIT 1671 newMon->lockCount += LW_LOCK_COUNT(mrl->obj->lock.thin); 1672 mrl->obj->lock.mon = newMon; 1673 } 1674 1675 /* 1676 * We haven't seen this configuration before. We need to scan down 1677 * acqObj's tree to see if any of the monitors in self->pLockedObjects 1678 * appear. We grab a global lock before traversing or updating the 1679 * history list. 1680 * 1681 * If we find a match for any of our held locks, we know that the lock 1682 * has previously been acquired *after* acqObj, and we throw an error. 1683 * 1684 * The easiest way to do this is to create a link from "mrl" to "acqObj" 1685 * and do a recursive traversal, marking nodes as we cross them. If 1686 * we cross one a second time, we have a cycle and can throw an error. 1687 * (We do the flag-clearing traversal before adding the new link, so 1688 * that we're guaranteed to terminate.) 1689 * 1690 * If "acqObj" is a thin lock, it has no history, and we can create a 1691 * link to it without additional checks. [ We now guarantee that it's 1692 * always fat. ] 1693 */ 1694 bool failed = false; 1695 dvmLockMutex(&gDvm.deadlockHistoryLock); 1696 linkParentToChild(mrl->obj, acqObj); 1697 if (!traverseTree(self, acqObj)) { 1698 LOGW("%s\n", kEndBanner); 1699 failed = true; 1700 1701 /* remove the entry so we're still okay when in "warning" mode */ 1702 unlinkParentFromChild(mrl->obj, acqObj); 1703 } 1704 dvmUnlockMutex(&gDvm.deadlockHistoryLock); 1705 1706 if (failed) { 1707 switch (gDvm.deadlockPredictMode) { 1708 case kDPErr: 1709 dvmThrowException("Ldalvik/system/PotentialDeadlockError;", NULL); 1710 break; 1711 case kDPAbort: 1712 LOGE("Aborting due to potential deadlock\n"); 1713 dvmAbort(); 1714 break; 1715 default: 1716 /* warn only */ 1717 break; 1718 } 1719 } 1720} 1721 1722/* 1723 * We're removing "child" from existence. We want to pull all of 1724 * child's children into "parent", filtering out duplicates. This is 1725 * called during the GC. 1726 * 1727 * This does not modify "child", which might have multiple parents. 1728 */ 1729static void mergeChildren(Object* parent, const Object* child) 1730{ 1731 Monitor* mon; 1732 int i; 1733 1734 assert(IS_LOCK_FAT(&child->lock)); 1735 mon = child->lock.mon; 1736 ExpandingObjectList* pList = &mon->historyChildren; 1737 1738 for (i = expandBufGetCount(pList)-1; i >= 0; i--) { 1739 Object* grandChild = expandBufGetEntry(pList, i); 1740 1741 if (!objectInChildList(parent, grandChild)) { 1742 LOGVV("+++ migrating %p link to %p\n", grandChild, parent); 1743 linkParentToChild(parent, grandChild); 1744 } else { 1745 LOGVV("+++ parent %p already links to %p\n", parent, grandChild); 1746 } 1747 } 1748} 1749 1750/* 1751 * An object with a fat lock is being collected during a GC pass. We 1752 * want to remove it from any lock history trees that it is a part of. 1753 * 1754 * This may require updating the history trees in several monitors. The 1755 * monitor semantics guarantee that no other thread will be accessing 1756 * the history trees at the same time. 1757 */ 1758static void removeCollectedObject(Object* obj) 1759{ 1760 Monitor* mon; 1761 1762 LOGVV("+++ collecting %p\n", obj); 1763 1764#if 0 1765 /* 1766 * We're currently running through the entire set of known monitors. 1767 * This can be somewhat slow. We may want to keep lists of parents 1768 * in each child to speed up GC. 1769 */ 1770 mon = gDvm.monitorList; 1771 while (mon != NULL) { 1772 Object* parent = mon->obj; 1773 if (parent != NULL) { /* value nulled for deleted entries */ 1774 if (objectInChildList(parent, obj)) { 1775 LOGVV("removing child %p from parent %p\n", obj, parent); 1776 unlinkParentFromChild(parent, obj); 1777 mergeChildren(parent, obj); 1778 } 1779 } 1780 mon = mon->next; 1781 } 1782#endif 1783 1784 /* 1785 * For every parent of this object: 1786 * - merge all of our children into the parent's child list (creates 1787 * a two-way link between parent and child) 1788 * - remove ourselves from the parent's child list 1789 */ 1790 ExpandingObjectList* pList; 1791 int i; 1792 1793 assert(IS_LOCK_FAT(&obj->lock)); 1794 mon = obj->lock.mon; 1795 pList = &mon->historyParents; 1796 for (i = expandBufGetCount(pList)-1; i >= 0; i--) { 1797 Object* parent = expandBufGetEntry(pList, i); 1798 Monitor* parentMon = parent->lock.mon; 1799 1800 if (!expandObjRemoveEntry(&parentMon->historyChildren, obj)) { 1801 LOGW("WARNING: child %p not found in parent %p\n", obj, parent); 1802 } 1803 assert(!expandObjHas(&parentMon->historyChildren, obj)); 1804 1805 mergeChildren(parent, obj); 1806 } 1807 1808 /* 1809 * For every child of this object: 1810 * - remove ourselves from the child's parent list 1811 */ 1812 pList = &mon->historyChildren; 1813 for (i = expandBufGetCount(pList)-1; i >= 0; i--) { 1814 Object* child = expandBufGetEntry(pList, i); 1815 Monitor* childMon = child->lock.mon; 1816 1817 if (!expandObjRemoveEntry(&childMon->historyParents, obj)) { 1818 LOGW("WARNING: parent %p not found in child %p\n", obj, child); 1819 } 1820 assert(!expandObjHas(&childMon->historyParents, obj)); 1821 } 1822} 1823 1824#endif /*WITH_DEADLOCK_PREDICTION*/ 1825 1826