Heap.cpp revision e6c0ef210ee6c62cf4c63d50c04f451d5fa505f5
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16/*
17 * Garbage-collecting memory allocator.
18 */
19#include "Dalvik.h"
20#include "alloc/HeapBitmap.h"
21#include "alloc/Verify.h"
22#include "alloc/Heap.h"
23#include "alloc/HeapInternal.h"
24#include "alloc/DdmHeap.h"
25#include "alloc/HeapSource.h"
26#include "alloc/MarkSweep.h"
27#include "os/os.h"
28
29#include <sys/time.h>
30#include <sys/resource.h>
31#include <limits.h>
32#include <errno.h>
33
34static const GcSpec kGcForMallocSpec = {
35    true,  /* isPartial */
36    false,  /* isConcurrent */
37    true,  /* doPreserve */
38    "GC_FOR_ALLOC"
39};
40
41const GcSpec *GC_FOR_MALLOC = &kGcForMallocSpec;
42
43static const GcSpec kGcConcurrentSpec  = {
44    true,  /* isPartial */
45    true,  /* isConcurrent */
46    true,  /* doPreserve */
47    "GC_CONCURRENT"
48};
49
50const GcSpec *GC_CONCURRENT = &kGcConcurrentSpec;
51
52static const GcSpec kGcExplicitSpec = {
53    false,  /* isPartial */
54    true,  /* isConcurrent */
55    true,  /* doPreserve */
56    "GC_EXPLICIT"
57};
58
59const GcSpec *GC_EXPLICIT = &kGcExplicitSpec;
60
61static const GcSpec kGcBeforeOomSpec = {
62    false,  /* isPartial */
63    false,  /* isConcurrent */
64    false,  /* doPreserve */
65    "GC_BEFORE_OOM"
66};
67
68const GcSpec *GC_BEFORE_OOM = &kGcBeforeOomSpec;
69
70/*
71 * Initialize the GC heap.
72 *
73 * Returns true if successful, false otherwise.
74 */
75bool dvmHeapStartup()
76{
77    GcHeap *gcHeap;
78
79    if (gDvm.heapGrowthLimit == 0) {
80        gDvm.heapGrowthLimit = gDvm.heapMaximumSize;
81    }
82
83    gcHeap = dvmHeapSourceStartup(gDvm.heapStartingSize,
84                                  gDvm.heapMaximumSize,
85                                  gDvm.heapGrowthLimit);
86    if (gcHeap == NULL) {
87        return false;
88    }
89    gcHeap->ddmHpifWhen = 0;
90    gcHeap->ddmHpsgWhen = 0;
91    gcHeap->ddmHpsgWhat = 0;
92    gcHeap->ddmNhsgWhen = 0;
93    gcHeap->ddmNhsgWhat = 0;
94    gDvm.gcHeap = gcHeap;
95
96    /* Set up the lists we'll use for cleared reference objects.
97     */
98    gcHeap->clearedReferences = NULL;
99
100    if (!dvmCardTableStartup(gDvm.heapMaximumSize)) {
101        LOGE_HEAP("card table startup failed.");
102        return false;
103    }
104
105    return true;
106}
107
108bool dvmHeapStartupAfterZygote()
109{
110    return dvmHeapSourceStartupAfterZygote();
111}
112
113void dvmHeapShutdown()
114{
115//TODO: make sure we're locked
116    if (gDvm.gcHeap != NULL) {
117        dvmCardTableShutdown();
118        /* Destroy the heap.  Any outstanding pointers will point to
119         * unmapped memory (unless/until someone else maps it).  This
120         * frees gDvm.gcHeap as a side-effect.
121         */
122        dvmHeapSourceShutdown(&gDvm.gcHeap);
123    }
124}
125
126/*
127 * Shutdown any threads internal to the heap.
128 */
129void dvmHeapThreadShutdown()
130{
131    dvmHeapSourceThreadShutdown();
132}
133
134/*
135 * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
136 * we're going to have to wait on the mutex.
137 */
138bool dvmLockHeap()
139{
140    if (dvmTryLockMutex(&gDvm.gcHeapLock) != 0) {
141        Thread *self;
142        ThreadStatus oldStatus;
143
144        self = dvmThreadSelf();
145        oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
146        dvmLockMutex(&gDvm.gcHeapLock);
147        dvmChangeStatus(self, oldStatus);
148    }
149
150    return true;
151}
152
153void dvmUnlockHeap()
154{
155    dvmUnlockMutex(&gDvm.gcHeapLock);
156}
157
158/* Do a full garbage collection, which may grow the
159 * heap as a side-effect if the live set is large.
160 */
161static void gcForMalloc(bool clearSoftReferences)
162{
163    if (gDvm.allocProf.enabled) {
164        Thread* self = dvmThreadSelf();
165        gDvm.allocProf.gcCount++;
166        if (self != NULL) {
167            self->allocProf.gcCount++;
168        }
169    }
170    /* This may adjust the soft limit as a side-effect.
171     */
172    const GcSpec *spec = clearSoftReferences ? GC_BEFORE_OOM : GC_FOR_MALLOC;
173    dvmCollectGarbageInternal(spec);
174}
175
176/* Try as hard as possible to allocate some memory.
177 */
178static void *tryMalloc(size_t size)
179{
180    void *ptr;
181
182    /* Don't try too hard if there's no way the allocation is
183     * going to succeed.  We have to collect SoftReferences before
184     * throwing an OOME, though.
185     */
186    if (size >= gDvm.heapGrowthLimit) {
187        LOGW("%zd byte allocation exceeds the %zd byte maximum heap size",
188             size, gDvm.heapGrowthLimit);
189        ptr = NULL;
190        goto collect_soft_refs;
191    }
192
193//TODO: figure out better heuristics
194//    There will be a lot of churn if someone allocates a bunch of
195//    big objects in a row, and we hit the frag case each time.
196//    A full GC for each.
197//    Maybe we grow the heap in bigger leaps
198//    Maybe we skip the GC if the size is large and we did one recently
199//      (number of allocations ago) (watch for thread effects)
200//    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
201//      (or, at least, there are only 0-5 objects swept each time)
202
203    ptr = dvmHeapSourceAlloc(size);
204    if (ptr != NULL) {
205        return ptr;
206    }
207
208    /*
209     * The allocation failed.  If the GC is running, block until it
210     * completes and retry.
211     */
212    if (gDvm.gcHeap->gcRunning) {
213        /*
214         * The GC is concurrently tracing the heap.  Release the heap
215         * lock, wait for the GC to complete, and retrying allocating.
216         */
217        dvmWaitForConcurrentGcToComplete();
218        ptr = dvmHeapSourceAlloc(size);
219        if (ptr != NULL) {
220            return ptr;
221        }
222    }
223    /*
224     * Another failure.  Our thread was starved or there may be too
225     * many live objects.  Try a foreground GC.  This will have no
226     * effect if the concurrent GC is already running.
227     */
228    gcForMalloc(false);
229    ptr = dvmHeapSourceAlloc(size);
230    if (ptr != NULL) {
231        return ptr;
232    }
233
234    /* Even that didn't work;  this is an exceptional state.
235     * Try harder, growing the heap if necessary.
236     */
237    ptr = dvmHeapSourceAllocAndGrow(size);
238    if (ptr != NULL) {
239        size_t newHeapSize;
240
241        newHeapSize = dvmHeapSourceGetIdealFootprint();
242//TODO: may want to grow a little bit more so that the amount of free
243//      space is equal to the old free space + the utilization slop for
244//      the new allocation.
245        LOGI_HEAP("Grow heap (frag case) to "
246                "%zu.%03zuMB for %zu-byte allocation",
247                FRACTIONAL_MB(newHeapSize), size);
248        return ptr;
249    }
250
251    /* Most allocations should have succeeded by now, so the heap
252     * is really full, really fragmented, or the requested size is
253     * really big.  Do another GC, collecting SoftReferences this
254     * time.  The VM spec requires that all SoftReferences have
255     * been collected and cleared before throwing an OOME.
256     */
257//TODO: wait for the finalizers from the previous GC to finish
258collect_soft_refs:
259    LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation",
260            size);
261    gcForMalloc(true);
262    ptr = dvmHeapSourceAllocAndGrow(size);
263    if (ptr != NULL) {
264        return ptr;
265    }
266//TODO: maybe wait for finalizers and try one last time
267
268    LOGE_HEAP("Out of memory on a %zd-byte allocation.", size);
269//TODO: tell the HeapSource to dump its state
270    dvmDumpThread(dvmThreadSelf(), false);
271
272    return NULL;
273}
274
275/* Throw an OutOfMemoryError if there's a thread to attach it to.
276 * Avoid recursing.
277 *
278 * The caller must not be holding the heap lock, or else the allocations
279 * in dvmThrowException() will deadlock.
280 */
281static void throwOOME()
282{
283    Thread *self;
284
285    if ((self = dvmThreadSelf()) != NULL) {
286        /* If the current (failing) dvmMalloc() happened as part of thread
287         * creation/attachment before the thread became part of the root set,
288         * we can't rely on the thread-local trackedAlloc table, so
289         * we can't keep track of a real allocated OOME object.  But, since
290         * the thread is in the process of being created, it won't have
291         * a useful stack anyway, so we may as well make things easier
292         * by throwing the (stackless) pre-built OOME.
293         */
294        if (dvmIsOnThreadList(self) && !self->throwingOOME) {
295            /* Let ourselves know that we tried to throw an OOM
296             * error in the normal way in case we run out of
297             * memory trying to allocate it inside dvmThrowException().
298             */
299            self->throwingOOME = true;
300
301            /* Don't include a description string;
302             * one fewer allocation.
303             */
304            dvmThrowOutOfMemoryError(NULL);
305        } else {
306            /*
307             * This thread has already tried to throw an OutOfMemoryError,
308             * which probably means that we're running out of memory
309             * while recursively trying to throw.
310             *
311             * To avoid any more allocation attempts, "throw" a pre-built
312             * OutOfMemoryError object (which won't have a useful stack trace).
313             *
314             * Note that since this call can't possibly allocate anything,
315             * we don't care about the state of self->throwingOOME
316             * (which will usually already be set).
317             */
318            dvmSetException(self, gDvm.outOfMemoryObj);
319        }
320        /* We're done with the possible recursion.
321         */
322        self->throwingOOME = false;
323    }
324}
325
326/*
327 * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
328 *
329 * The new storage is zeroed out.
330 *
331 * Note that, in rare cases, this could get called while a GC is in
332 * progress.  If a non-VM thread tries to attach itself through JNI,
333 * it will need to allocate some objects.  If this becomes annoying to
334 * deal with, we can block it at the source, but holding the allocation
335 * mutex should be enough.
336 *
337 * In rare circumstances (JNI AttachCurrentThread) we can be called
338 * from a non-VM thread.
339 *
340 * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
341 * (because it's being done for the interpreter "new" operation and will
342 * be part of the root set immediately) or we can't (because this allocation
343 * is for a brand new thread).
344 *
345 * Returns NULL and throws an exception on failure.
346 *
347 * TODO: don't do a GC if the debugger thinks all threads are suspended
348 */
349void* dvmMalloc(size_t size, int flags)
350{
351    void *ptr;
352
353    dvmLockHeap();
354
355    /* Try as hard as possible to allocate some memory.
356     */
357    ptr = tryMalloc(size);
358    if (ptr != NULL) {
359        /* We've got the memory.
360         */
361        if (gDvm.allocProf.enabled) {
362            Thread* self = dvmThreadSelf();
363            gDvm.allocProf.allocCount++;
364            gDvm.allocProf.allocSize += size;
365            if (self != NULL) {
366                self->allocProf.allocCount++;
367                self->allocProf.allocSize += size;
368            }
369        }
370    } else {
371        /* The allocation failed.
372         */
373
374        if (gDvm.allocProf.enabled) {
375            Thread* self = dvmThreadSelf();
376            gDvm.allocProf.failedAllocCount++;
377            gDvm.allocProf.failedAllocSize += size;
378            if (self != NULL) {
379                self->allocProf.failedAllocCount++;
380                self->allocProf.failedAllocSize += size;
381            }
382        }
383    }
384
385    dvmUnlockHeap();
386
387    if (ptr != NULL) {
388        /*
389         * If caller hasn't asked us not to track it, add it to the
390         * internal tracking list.
391         */
392        if ((flags & ALLOC_DONT_TRACK) == 0) {
393            dvmAddTrackedAlloc((Object*)ptr, NULL);
394        }
395    } else {
396        /*
397         * The allocation failed; throw an OutOfMemoryError.
398         */
399        throwOOME();
400    }
401
402    return ptr;
403}
404
405/*
406 * Returns true iff <obj> points to a valid allocated object.
407 */
408bool dvmIsValidObject(const Object* obj)
409{
410    /* Don't bother if it's NULL or not 8-byte aligned.
411     */
412    if (obj != NULL && ((uintptr_t)obj & (8-1)) == 0) {
413        /* Even if the heap isn't locked, this shouldn't return
414         * any false negatives.  The only mutation that could
415         * be happening is allocation, which means that another
416         * thread could be in the middle of a read-modify-write
417         * to add a new bit for a new object.  However, that
418         * RMW will have completed by the time any other thread
419         * could possibly see the new pointer, so there is no
420         * danger of dvmIsValidObject() being called on a valid
421         * pointer whose bit isn't set.
422         *
423         * Freeing will only happen during the sweep phase, which
424         * only happens while the heap is locked.
425         */
426        return dvmHeapSourceContains(obj);
427    }
428    return false;
429}
430
431size_t dvmObjectSizeInHeap(const Object *obj)
432{
433    return dvmHeapSourceChunkSize(obj);
434}
435
436static void verifyRootsAndHeap()
437{
438    dvmVerifyRoots();
439    dvmVerifyBitmap(dvmHeapSourceGetLiveBits());
440}
441
442/*
443 * Initiate garbage collection.
444 *
445 * NOTES:
446 * - If we don't hold gDvm.threadListLock, it's possible for a thread to
447 *   be added to the thread list while we work.  The thread should NOT
448 *   start executing, so this is only interesting when we start chasing
449 *   thread stacks.  (Before we do so, grab the lock.)
450 *
451 * We are not allowed to GC when the debugger has suspended the VM, which
452 * is awkward because debugger requests can cause allocations.  The easiest
453 * way to enforce this is to refuse to GC on an allocation made by the
454 * JDWP thread -- we have to expand the heap or fail.
455 */
456void dvmCollectGarbageInternal(const GcSpec* spec)
457{
458    GcHeap *gcHeap = gDvm.gcHeap;
459    u4 rootSuspend, rootSuspendTime, rootStart = 0 , rootEnd = 0;
460    u4 dirtySuspend, dirtyStart = 0, dirtyEnd = 0;
461    u4 totalTime;
462    size_t numObjectsFreed, numBytesFreed;
463    size_t currAllocated, currFootprint;
464    size_t percentFree;
465    int oldThreadPriority = INT_MAX;
466
467    /* The heap lock must be held.
468     */
469
470    if (gcHeap->gcRunning) {
471        LOGW_HEAP("Attempted recursive GC");
472        return;
473    }
474
475    gcHeap->gcRunning = true;
476
477    rootSuspend = dvmGetRelativeTimeMsec();
478    dvmSuspendAllThreads(SUSPEND_FOR_GC);
479    rootStart = dvmGetRelativeTimeMsec();
480    rootSuspendTime = rootStart - rootSuspend;
481
482    /*
483     * If we are not marking concurrently raise the priority of the
484     * thread performing the garbage collection.
485     */
486    if (!spec->isConcurrent) {
487        oldThreadPriority = os_raiseThreadPriority();
488    }
489    if (gDvm.preVerify) {
490        LOGV_HEAP("Verifying roots and heap before GC");
491        verifyRootsAndHeap();
492    }
493
494    dvmMethodTraceGCBegin();
495
496    /* Set up the marking context.
497     */
498    if (!dvmHeapBeginMarkStep(spec->isPartial)) {
499        LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting");
500        dvmAbort();
501    }
502
503    /* Mark the set of objects that are strongly reachable from the roots.
504     */
505    LOGD_HEAP("Marking...");
506    dvmClearCardTable();
507    dvmHeapMarkRootSet();
508
509    /* dvmHeapScanMarkedObjects() will build the lists of known
510     * instances of the Reference classes.
511     */
512    assert(gcHeap->softReferences == NULL);
513    assert(gcHeap->weakReferences == NULL);
514    assert(gcHeap->finalizerReferences == NULL);
515    assert(gcHeap->phantomReferences == NULL);
516    assert(gcHeap->clearedReferences == NULL);
517
518    if (spec->isConcurrent) {
519        /*
520         * Resume threads while tracing from the roots.  We unlock the
521         * heap to allow mutator threads to allocate from free space.
522         */
523        rootEnd = dvmGetRelativeTimeMsec();
524        dvmUnlockHeap();
525        dvmResumeAllThreads(SUSPEND_FOR_GC);
526    }
527
528    /* Recursively mark any objects that marked objects point to strongly.
529     * If we're not collecting soft references, soft-reachable
530     * objects will also be marked.
531     */
532    LOGD_HEAP("Recursing...");
533    dvmHeapScanMarkedObjects(spec->isPartial);
534
535    if (spec->isConcurrent) {
536        /*
537         * Re-acquire the heap lock and perform the final thread
538         * suspension.
539         */
540        dvmLockHeap();
541        dirtySuspend = dvmGetRelativeTimeMsec();
542        dvmSuspendAllThreads(SUSPEND_FOR_GC);
543        dirtyStart = dvmGetRelativeTimeMsec();
544        /*
545         * As no barrier intercepts root updates, we conservatively
546         * assume all roots may be gray and re-mark them.
547         */
548        dvmHeapReMarkRootSet();
549        /*
550         * With the exception of reference objects and weak interned
551         * strings, all gray objects should now be on dirty cards.
552         */
553        if (gDvm.verifyCardTable) {
554            dvmVerifyCardTable();
555        }
556        /*
557         * Recursively mark gray objects pointed to by the roots or by
558         * heap objects dirtied during the concurrent mark.
559         */
560        dvmHeapReScanMarkedObjects();
561    }
562
563    /*
564     * All strongly-reachable objects have now been marked.  Process
565     * weakly-reachable objects discovered while tracing.
566     */
567    dvmHeapProcessReferences(&gcHeap->softReferences,
568                             spec->doPreserve == false,
569                             &gcHeap->weakReferences,
570                             &gcHeap->finalizerReferences,
571                             &gcHeap->phantomReferences);
572
573#if defined(WITH_JIT)
574    /*
575     * Patching a chaining cell is very cheap as it only updates 4 words. It's
576     * the overhead of stopping all threads and synchronizing the I/D cache
577     * that makes it expensive.
578     *
579     * Therefore we batch those work orders in a queue and go through them
580     * when threads are suspended for GC.
581     */
582    dvmCompilerPerformSafePointChecks();
583#endif
584
585    LOGD_HEAP("Sweeping...");
586
587    dvmHeapSweepSystemWeaks();
588
589    /*
590     * Live objects have a bit set in the mark bitmap, swap the mark
591     * and live bitmaps.  The sweep can proceed concurrently viewing
592     * the new live bitmap as the old mark bitmap, and vice versa.
593     */
594    dvmHeapSourceSwapBitmaps();
595
596    if (gDvm.postVerify) {
597        LOGV_HEAP("Verifying roots and heap after GC");
598        verifyRootsAndHeap();
599    }
600
601    if (spec->isConcurrent) {
602        dirtyEnd = dvmGetRelativeTimeMsec();
603        dvmUnlockHeap();
604        dvmResumeAllThreads(SUSPEND_FOR_GC);
605    }
606    dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent,
607                                &numObjectsFreed, &numBytesFreed);
608    LOGD_HEAP("Cleaning up...");
609    dvmHeapFinishMarkStep();
610    if (spec->isConcurrent) {
611        dvmLockHeap();
612    }
613
614    LOGD_HEAP("Done.");
615
616    /* Now's a good time to adjust the heap size, since
617     * we know what our utilization is.
618     *
619     * This doesn't actually resize any memory;
620     * it just lets the heap grow more when necessary.
621     */
622    dvmHeapSourceGrowForUtilization();
623
624    currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
625    currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
626
627    dvmMethodTraceGCEnd();
628    LOGV_HEAP("GC finished");
629
630    gcHeap->gcRunning = false;
631
632    LOGV_HEAP("Resuming threads");
633
634    if (spec->isConcurrent) {
635        /*
636         * Wake-up any threads that blocked after a failed allocation
637         * request.
638         */
639        dvmBroadcastCond(&gDvm.gcHeapCond);
640    }
641
642    if (!spec->isConcurrent) {
643        dirtyEnd = dvmGetRelativeTimeMsec();
644        dvmResumeAllThreads(SUSPEND_FOR_GC);
645        /*
646         * Restore the original thread scheduling priority if it was
647         * changed at the start of the current garbage collection.
648         */
649        if (oldThreadPriority != INT_MAX) {
650            os_lowerThreadPriority(oldThreadPriority);
651        }
652    }
653
654    /*
655     * Move queue of pending references back into Java.
656     */
657    dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences);
658
659    percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
660    if (!spec->isConcurrent) {
661        u4 markSweepTime = dirtyEnd - rootStart;
662        bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
663        totalTime = rootSuspendTime + markSweepTime;
664        LOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums",
665             spec->reason,
666             isSmall ? "<" : "",
667             numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
668             percentFree,
669             currAllocated / 1024, currFootprint / 1024,
670             markSweepTime);
671    } else {
672        u4 rootTime = rootEnd - rootStart;
673        u4 dirtySuspendTime = dirtyStart - dirtySuspend;
674        u4 dirtyTime = dirtyEnd - dirtyStart;
675        bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
676        totalTime = rootSuspendTime + rootTime + dirtySuspendTime + dirtyTime;
677        LOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums",
678             spec->reason,
679             isSmall ? "<" : "",
680             numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
681             percentFree,
682             currAllocated / 1024, currFootprint / 1024,
683             rootTime, dirtyTime);
684    }
685    if (gcHeap->ddmHpifWhen != 0) {
686        LOGD_HEAP("Sending VM heap info to DDM");
687        dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
688    }
689    if (gcHeap->ddmHpsgWhen != 0) {
690        LOGD_HEAP("Dumping VM heap to DDM");
691        dvmDdmSendHeapSegments(false, false);
692    }
693    if (gcHeap->ddmNhsgWhen != 0) {
694        LOGD_HEAP("Dumping native heap to DDM");
695        dvmDdmSendHeapSegments(false, true);
696    }
697}
698
699/*
700 * If the concurrent GC is running, wait for it to finish.  The caller
701 * must hold the heap lock.
702 *
703 * Note: the second dvmChangeStatus() could stall if we were in RUNNING
704 * on entry, and some other thread has asked us to suspend.  In that
705 * case we will be suspended with the heap lock held, which can lead to
706 * deadlock if the other thread tries to do something with the managed heap.
707 * For example, the debugger might suspend us and then execute a method that
708 * allocates memory.  We can avoid this situation by releasing the lock
709 * before self-suspending.  (The developer can work around this specific
710 * situation by single-stepping the VM.  Alternatively, we could disable
711 * concurrent GC when the debugger is attached, but that might change
712 * behavior more than is desirable.)
713 *
714 * This should not be a problem in production, because any GC-related
715 * activity will grab the lock before issuing a suspend-all.  (We may briefly
716 * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads,
717 * but there's no risk of deadlock.)
718 */
719void dvmWaitForConcurrentGcToComplete()
720{
721    Thread *self = dvmThreadSelf();
722    assert(self != NULL);
723    while (gDvm.gcHeap->gcRunning) {
724        ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
725        dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock);
726        dvmChangeStatus(self, oldStatus);
727    }
728}
729