Heap.cpp revision 60fc806b679a3655c228b4093058c59941a49cfe
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16/*
17 * Garbage-collecting memory allocator.
18 */
19#include "Dalvik.h"
20#include "alloc/HeapBitmap.h"
21#include "alloc/Verify.h"
22#include "alloc/Heap.h"
23#include "alloc/HeapInternal.h"
24#include "alloc/DdmHeap.h"
25#include "alloc/HeapSource.h"
26#include "alloc/MarkSweep.h"
27
28#include "utils/threads.h"      // need Android thread priorities
29
30#include <cutils/sched_policy.h>
31
32#include <sys/time.h>
33#include <sys/resource.h>
34#include <limits.h>
35#include <errno.h>
36
37static const GcSpec kGcForMallocSpec = {
38    true,  /* isPartial */
39    false,  /* isConcurrent */
40    true,  /* doPreserve */
41    "GC_FOR_ALLOC"
42};
43
44const GcSpec *GC_FOR_MALLOC = &kGcForMallocSpec;
45
46static const GcSpec kGcConcurrentSpec  = {
47    true,  /* isPartial */
48    true,  /* isConcurrent */
49    true,  /* doPreserve */
50    "GC_CONCURRENT"
51};
52
53const GcSpec *GC_CONCURRENT = &kGcConcurrentSpec;
54
55static const GcSpec kGcExplicitSpec = {
56    false,  /* isPartial */
57    true,  /* isConcurrent */
58    true,  /* doPreserve */
59    "GC_EXPLICIT"
60};
61
62const GcSpec *GC_EXPLICIT = &kGcExplicitSpec;
63
64static const GcSpec kGcBeforeOomSpec = {
65    false,  /* isPartial */
66    false,  /* isConcurrent */
67    false,  /* doPreserve */
68    "GC_BEFORE_OOM"
69};
70
71const GcSpec *GC_BEFORE_OOM = &kGcBeforeOomSpec;
72
73/*
74 * Initialize the GC heap.
75 *
76 * Returns true if successful, false otherwise.
77 */
78bool dvmHeapStartup()
79{
80    GcHeap *gcHeap;
81
82    if (gDvm.heapGrowthLimit == 0) {
83        gDvm.heapGrowthLimit = gDvm.heapMaximumSize;
84    }
85
86    gcHeap = dvmHeapSourceStartup(gDvm.heapStartingSize,
87                                  gDvm.heapMaximumSize,
88                                  gDvm.heapGrowthLimit);
89    if (gcHeap == NULL) {
90        return false;
91    }
92    gcHeap->ddmHpifWhen = 0;
93    gcHeap->ddmHpsgWhen = 0;
94    gcHeap->ddmHpsgWhat = 0;
95    gcHeap->ddmNhsgWhen = 0;
96    gcHeap->ddmNhsgWhat = 0;
97    gDvm.gcHeap = gcHeap;
98
99    /* Set up the lists we'll use for cleared reference objects.
100     */
101    gcHeap->clearedReferences = NULL;
102
103    if (!dvmCardTableStartup(gDvm.heapMaximumSize)) {
104        LOGE_HEAP("card table startup failed.");
105        return false;
106    }
107
108    return true;
109}
110
111bool dvmHeapStartupAfterZygote()
112{
113    return dvmHeapSourceStartupAfterZygote();
114}
115
116void dvmHeapShutdown()
117{
118//TODO: make sure we're locked
119    if (gDvm.gcHeap != NULL) {
120        dvmCardTableShutdown();
121        /* Destroy the heap.  Any outstanding pointers will point to
122         * unmapped memory (unless/until someone else maps it).  This
123         * frees gDvm.gcHeap as a side-effect.
124         */
125        dvmHeapSourceShutdown(&gDvm.gcHeap);
126    }
127}
128
129/*
130 * Shutdown any threads internal to the heap.
131 */
132void dvmHeapThreadShutdown()
133{
134    dvmHeapSourceThreadShutdown();
135}
136
137/*
138 * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
139 * we're going to have to wait on the mutex.
140 */
141bool dvmLockHeap()
142{
143    if (dvmTryLockMutex(&gDvm.gcHeapLock) != 0) {
144        Thread *self;
145        ThreadStatus oldStatus;
146
147        self = dvmThreadSelf();
148        oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
149        dvmLockMutex(&gDvm.gcHeapLock);
150        dvmChangeStatus(self, oldStatus);
151    }
152
153    return true;
154}
155
156void dvmUnlockHeap()
157{
158    dvmUnlockMutex(&gDvm.gcHeapLock);
159}
160
161/* Do a full garbage collection, which may grow the
162 * heap as a side-effect if the live set is large.
163 */
164static void gcForMalloc(bool clearSoftReferences)
165{
166    if (gDvm.allocProf.enabled) {
167        Thread* self = dvmThreadSelf();
168        gDvm.allocProf.gcCount++;
169        if (self != NULL) {
170            self->allocProf.gcCount++;
171        }
172    }
173    /* This may adjust the soft limit as a side-effect.
174     */
175    const GcSpec *spec = clearSoftReferences ? GC_BEFORE_OOM : GC_FOR_MALLOC;
176    dvmCollectGarbageInternal(spec);
177}
178
179/* Try as hard as possible to allocate some memory.
180 */
181static void *tryMalloc(size_t size)
182{
183    void *ptr;
184
185    /* Don't try too hard if there's no way the allocation is
186     * going to succeed.  We have to collect SoftReferences before
187     * throwing an OOME, though.
188     */
189    if (size >= gDvm.heapGrowthLimit) {
190        LOGW("%zd byte allocation exceeds the %zd byte maximum heap size",
191             size, gDvm.heapGrowthLimit);
192        ptr = NULL;
193        goto collect_soft_refs;
194    }
195
196//TODO: figure out better heuristics
197//    There will be a lot of churn if someone allocates a bunch of
198//    big objects in a row, and we hit the frag case each time.
199//    A full GC for each.
200//    Maybe we grow the heap in bigger leaps
201//    Maybe we skip the GC if the size is large and we did one recently
202//      (number of allocations ago) (watch for thread effects)
203//    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
204//      (or, at least, there are only 0-5 objects swept each time)
205
206    ptr = dvmHeapSourceAlloc(size);
207    if (ptr != NULL) {
208        return ptr;
209    }
210
211    /*
212     * The allocation failed.  If the GC is running, block until it
213     * completes and retry.
214     */
215    if (gDvm.gcHeap->gcRunning) {
216        /*
217         * The GC is concurrently tracing the heap.  Release the heap
218         * lock, wait for the GC to complete, and retrying allocating.
219         */
220        dvmWaitForConcurrentGcToComplete();
221        ptr = dvmHeapSourceAlloc(size);
222        if (ptr != NULL) {
223            return ptr;
224        }
225    }
226    /*
227     * Another failure.  Our thread was starved or there may be too
228     * many live objects.  Try a foreground GC.  This will have no
229     * effect if the concurrent GC is already running.
230     */
231    gcForMalloc(false);
232    ptr = dvmHeapSourceAlloc(size);
233    if (ptr != NULL) {
234        return ptr;
235    }
236
237    /* Even that didn't work;  this is an exceptional state.
238     * Try harder, growing the heap if necessary.
239     */
240    ptr = dvmHeapSourceAllocAndGrow(size);
241    if (ptr != NULL) {
242        size_t newHeapSize;
243
244        newHeapSize = dvmHeapSourceGetIdealFootprint();
245//TODO: may want to grow a little bit more so that the amount of free
246//      space is equal to the old free space + the utilization slop for
247//      the new allocation.
248        LOGI_HEAP("Grow heap (frag case) to "
249                "%zu.%03zuMB for %zu-byte allocation\n",
250                FRACTIONAL_MB(newHeapSize), size);
251        return ptr;
252    }
253
254    /* Most allocations should have succeeded by now, so the heap
255     * is really full, really fragmented, or the requested size is
256     * really big.  Do another GC, collecting SoftReferences this
257     * time.  The VM spec requires that all SoftReferences have
258     * been collected and cleared before throwing an OOME.
259     */
260//TODO: wait for the finalizers from the previous GC to finish
261collect_soft_refs:
262    LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation",
263            size);
264    gcForMalloc(true);
265    ptr = dvmHeapSourceAllocAndGrow(size);
266    if (ptr != NULL) {
267        return ptr;
268    }
269//TODO: maybe wait for finalizers and try one last time
270
271    LOGE_HEAP("Out of memory on a %zd-byte allocation.", size);
272//TODO: tell the HeapSource to dump its state
273    dvmDumpThread(dvmThreadSelf(), false);
274
275    return NULL;
276}
277
278/* Throw an OutOfMemoryError if there's a thread to attach it to.
279 * Avoid recursing.
280 *
281 * The caller must not be holding the heap lock, or else the allocations
282 * in dvmThrowException() will deadlock.
283 */
284static void throwOOME()
285{
286    Thread *self;
287
288    if ((self = dvmThreadSelf()) != NULL) {
289        /* If the current (failing) dvmMalloc() happened as part of thread
290         * creation/attachment before the thread became part of the root set,
291         * we can't rely on the thread-local trackedAlloc table, so
292         * we can't keep track of a real allocated OOME object.  But, since
293         * the thread is in the process of being created, it won't have
294         * a useful stack anyway, so we may as well make things easier
295         * by throwing the (stackless) pre-built OOME.
296         */
297        if (dvmIsOnThreadList(self) && !self->throwingOOME) {
298            /* Let ourselves know that we tried to throw an OOM
299             * error in the normal way in case we run out of
300             * memory trying to allocate it inside dvmThrowException().
301             */
302            self->throwingOOME = true;
303
304            /* Don't include a description string;
305             * one fewer allocation.
306             */
307            dvmThrowOutOfMemoryError(NULL);
308        } else {
309            /*
310             * This thread has already tried to throw an OutOfMemoryError,
311             * which probably means that we're running out of memory
312             * while recursively trying to throw.
313             *
314             * To avoid any more allocation attempts, "throw" a pre-built
315             * OutOfMemoryError object (which won't have a useful stack trace).
316             *
317             * Note that since this call can't possibly allocate anything,
318             * we don't care about the state of self->throwingOOME
319             * (which will usually already be set).
320             */
321            dvmSetException(self, gDvm.outOfMemoryObj);
322        }
323        /* We're done with the possible recursion.
324         */
325        self->throwingOOME = false;
326    }
327}
328
329/*
330 * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
331 *
332 * The new storage is zeroed out.
333 *
334 * Note that, in rare cases, this could get called while a GC is in
335 * progress.  If a non-VM thread tries to attach itself through JNI,
336 * it will need to allocate some objects.  If this becomes annoying to
337 * deal with, we can block it at the source, but holding the allocation
338 * mutex should be enough.
339 *
340 * In rare circumstances (JNI AttachCurrentThread) we can be called
341 * from a non-VM thread.
342 *
343 * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
344 * (because it's being done for the interpreter "new" operation and will
345 * be part of the root set immediately) or we can't (because this allocation
346 * is for a brand new thread).
347 *
348 * Returns NULL and throws an exception on failure.
349 *
350 * TODO: don't do a GC if the debugger thinks all threads are suspended
351 */
352void* dvmMalloc(size_t size, int flags)
353{
354    void *ptr;
355
356    dvmLockHeap();
357
358    /* Try as hard as possible to allocate some memory.
359     */
360    ptr = tryMalloc(size);
361    if (ptr != NULL) {
362        /* We've got the memory.
363         */
364        if (gDvm.allocProf.enabled) {
365            Thread* self = dvmThreadSelf();
366            gDvm.allocProf.allocCount++;
367            gDvm.allocProf.allocSize += size;
368            if (self != NULL) {
369                self->allocProf.allocCount++;
370                self->allocProf.allocSize += size;
371            }
372        }
373    } else {
374        /* The allocation failed.
375         */
376
377        if (gDvm.allocProf.enabled) {
378            Thread* self = dvmThreadSelf();
379            gDvm.allocProf.failedAllocCount++;
380            gDvm.allocProf.failedAllocSize += size;
381            if (self != NULL) {
382                self->allocProf.failedAllocCount++;
383                self->allocProf.failedAllocSize += size;
384            }
385        }
386    }
387
388    dvmUnlockHeap();
389
390    if (ptr != NULL) {
391        /*
392         * If caller hasn't asked us not to track it, add it to the
393         * internal tracking list.
394         */
395        if ((flags & ALLOC_DONT_TRACK) == 0) {
396            dvmAddTrackedAlloc((Object*)ptr, NULL);
397        }
398    } else {
399        /*
400         * The allocation failed; throw an OutOfMemoryError.
401         */
402        throwOOME();
403    }
404
405    return ptr;
406}
407
408/*
409 * Returns true iff <obj> points to a valid allocated object.
410 */
411bool dvmIsValidObject(const Object* obj)
412{
413    /* Don't bother if it's NULL or not 8-byte aligned.
414     */
415    if (obj != NULL && ((uintptr_t)obj & (8-1)) == 0) {
416        /* Even if the heap isn't locked, this shouldn't return
417         * any false negatives.  The only mutation that could
418         * be happening is allocation, which means that another
419         * thread could be in the middle of a read-modify-write
420         * to add a new bit for a new object.  However, that
421         * RMW will have completed by the time any other thread
422         * could possibly see the new pointer, so there is no
423         * danger of dvmIsValidObject() being called on a valid
424         * pointer whose bit isn't set.
425         *
426         * Freeing will only happen during the sweep phase, which
427         * only happens while the heap is locked.
428         */
429        return dvmHeapSourceContains(obj);
430    }
431    return false;
432}
433
434size_t dvmObjectSizeInHeap(const Object *obj)
435{
436    return dvmHeapSourceChunkSize(obj);
437}
438
439static void verifyRootsAndHeap()
440{
441    dvmVerifyRoots();
442    dvmVerifyBitmap(dvmHeapSourceGetLiveBits());
443}
444
445/*
446 * Raises the scheduling priority of the current thread.  Returns the
447 * original priority if successful.  Otherwise, returns INT_MAX on
448 * failure.
449 */
450static int raiseThreadPriority()
451{
452    /* Get the priority (the "nice" value) of the current thread.  The
453     * getpriority() call can legitimately return -1, so we have to
454     * explicitly test errno.
455     */
456    errno = 0;
457    int oldThreadPriority = getpriority(PRIO_PROCESS, 0);
458    if (errno != 0) {
459        LOGI_HEAP("getpriority(self) failed: %s", strerror(errno));
460    } else if (oldThreadPriority > ANDROID_PRIORITY_NORMAL) {
461        /* Current value is numerically greater than "normal", which
462         * in backward UNIX terms means lower priority.
463         */
464        if (oldThreadPriority >= ANDROID_PRIORITY_BACKGROUND) {
465            set_sched_policy(dvmGetSysThreadId(), SP_FOREGROUND);
466        }
467        if (setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL) != 0) {
468            LOGI_HEAP("Unable to elevate priority from %d to %d",
469                      oldThreadPriority, ANDROID_PRIORITY_NORMAL);
470        } else {
471            /*
472             * The priority has been elevated.  Return the old value
473             * so the caller can restore it later.
474             */
475            LOGD_HEAP("Elevating priority from %d to %d",
476                      oldThreadPriority, ANDROID_PRIORITY_NORMAL);
477            return oldThreadPriority;
478        }
479    }
480    return INT_MAX;
481}
482
483/*
484 * Sets the current thread scheduling priority.
485 */
486static void setThreadPriority(int newThreadPriority)
487{
488    if (setpriority(PRIO_PROCESS, 0, newThreadPriority) != 0) {
489        LOGW_HEAP("Unable to reset priority to %d: %s",
490                  newThreadPriority, strerror(errno));
491    } else {
492        LOGD_HEAP("Reset priority to %d", oldThreadPriority);
493    }
494    if (newThreadPriority >= ANDROID_PRIORITY_BACKGROUND) {
495        set_sched_policy(dvmGetSysThreadId(), SP_BACKGROUND);
496    }
497}
498
499/*
500 * Initiate garbage collection.
501 *
502 * NOTES:
503 * - If we don't hold gDvm.threadListLock, it's possible for a thread to
504 *   be added to the thread list while we work.  The thread should NOT
505 *   start executing, so this is only interesting when we start chasing
506 *   thread stacks.  (Before we do so, grab the lock.)
507 *
508 * We are not allowed to GC when the debugger has suspended the VM, which
509 * is awkward because debugger requests can cause allocations.  The easiest
510 * way to enforce this is to refuse to GC on an allocation made by the
511 * JDWP thread -- we have to expand the heap or fail.
512 */
513void dvmCollectGarbageInternal(const GcSpec* spec)
514{
515    GcHeap *gcHeap = gDvm.gcHeap;
516    u4 rootSuspend, rootSuspendTime, rootStart = 0 , rootEnd = 0;
517    u4 dirtySuspend, dirtyStart = 0, dirtyEnd = 0;
518    u4 totalTime;
519    size_t numObjectsFreed, numBytesFreed;
520    size_t currAllocated, currFootprint;
521    size_t percentFree;
522    int oldThreadPriority = INT_MAX;
523
524    /* The heap lock must be held.
525     */
526
527    if (gcHeap->gcRunning) {
528        LOGW_HEAP("Attempted recursive GC");
529        return;
530    }
531
532    gcHeap->gcRunning = true;
533
534    rootSuspend = dvmGetRelativeTimeMsec();
535    dvmSuspendAllThreads(SUSPEND_FOR_GC);
536    rootStart = dvmGetRelativeTimeMsec();
537    rootSuspendTime = rootStart - rootSuspend;
538
539    /*
540     * If we are not marking concurrently raise the priority of the
541     * thread performing the garbage collection.
542     */
543    if (!spec->isConcurrent) {
544        oldThreadPriority = raiseThreadPriority();
545    }
546    if (gDvm.preVerify) {
547        LOGV_HEAP("Verifying roots and heap before GC");
548        verifyRootsAndHeap();
549    }
550
551    dvmMethodTraceGCBegin();
552
553    /* Set up the marking context.
554     */
555    if (!dvmHeapBeginMarkStep(spec->isPartial)) {
556        LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting");
557        dvmAbort();
558    }
559
560    /* Mark the set of objects that are strongly reachable from the roots.
561     */
562    LOGD_HEAP("Marking...");
563    dvmClearCardTable();
564    dvmHeapMarkRootSet();
565
566    /* dvmHeapScanMarkedObjects() will build the lists of known
567     * instances of the Reference classes.
568     */
569    assert(gcHeap->softReferences == NULL);
570    assert(gcHeap->weakReferences == NULL);
571    assert(gcHeap->finalizerReferences == NULL);
572    assert(gcHeap->phantomReferences == NULL);
573    assert(gcHeap->clearedReferences == NULL);
574
575    if (spec->isConcurrent) {
576        /*
577         * Resume threads while tracing from the roots.  We unlock the
578         * heap to allow mutator threads to allocate from free space.
579         */
580        rootEnd = dvmGetRelativeTimeMsec();
581        dvmUnlockHeap();
582        dvmResumeAllThreads(SUSPEND_FOR_GC);
583    }
584
585    /* Recursively mark any objects that marked objects point to strongly.
586     * If we're not collecting soft references, soft-reachable
587     * objects will also be marked.
588     */
589    LOGD_HEAP("Recursing...");
590    dvmHeapScanMarkedObjects(spec->isPartial);
591
592    if (spec->isConcurrent) {
593        /*
594         * Re-acquire the heap lock and perform the final thread
595         * suspension.
596         */
597        dvmLockHeap();
598        dirtySuspend = dvmGetRelativeTimeMsec();
599        dvmSuspendAllThreads(SUSPEND_FOR_GC);
600        dirtyStart = dvmGetRelativeTimeMsec();
601        /*
602         * As no barrier intercepts root updates, we conservatively
603         * assume all roots may be gray and re-mark them.
604         */
605        dvmHeapReMarkRootSet();
606        /*
607         * With the exception of reference objects and weak interned
608         * strings, all gray objects should now be on dirty cards.
609         */
610        if (gDvm.verifyCardTable) {
611            dvmVerifyCardTable();
612        }
613        /*
614         * Recursively mark gray objects pointed to by the roots or by
615         * heap objects dirtied during the concurrent mark.
616         */
617        dvmHeapReScanMarkedObjects();
618    }
619
620    /*
621     * All strongly-reachable objects have now been marked.  Process
622     * weakly-reachable objects discovered while tracing.
623     */
624    dvmHeapProcessReferences(&gcHeap->softReferences,
625                             spec->doPreserve == false,
626                             &gcHeap->weakReferences,
627                             &gcHeap->finalizerReferences,
628                             &gcHeap->phantomReferences);
629
630#if defined(WITH_JIT)
631    /*
632     * Patching a chaining cell is very cheap as it only updates 4 words. It's
633     * the overhead of stopping all threads and synchronizing the I/D cache
634     * that makes it expensive.
635     *
636     * Therefore we batch those work orders in a queue and go through them
637     * when threads are suspended for GC.
638     */
639    dvmCompilerPerformSafePointChecks();
640#endif
641
642    LOGD_HEAP("Sweeping...");
643
644    dvmHeapSweepSystemWeaks();
645
646    /*
647     * Live objects have a bit set in the mark bitmap, swap the mark
648     * and live bitmaps.  The sweep can proceed concurrently viewing
649     * the new live bitmap as the old mark bitmap, and vice versa.
650     */
651    dvmHeapSourceSwapBitmaps();
652
653    if (gDvm.postVerify) {
654        LOGV_HEAP("Verifying roots and heap after GC");
655        verifyRootsAndHeap();
656    }
657
658    if (spec->isConcurrent) {
659        dirtyEnd = dvmGetRelativeTimeMsec();
660        dvmUnlockHeap();
661        dvmResumeAllThreads(SUSPEND_FOR_GC);
662    }
663    dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent,
664                                &numObjectsFreed, &numBytesFreed);
665    LOGD_HEAP("Cleaning up...");
666    dvmHeapFinishMarkStep();
667    if (spec->isConcurrent) {
668        dvmLockHeap();
669    }
670
671    LOGD_HEAP("Done.");
672
673    /* Now's a good time to adjust the heap size, since
674     * we know what our utilization is.
675     *
676     * This doesn't actually resize any memory;
677     * it just lets the heap grow more when necessary.
678     */
679    dvmHeapSourceGrowForUtilization();
680
681    currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
682    currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
683
684    dvmMethodTraceGCEnd();
685    LOGV_HEAP("GC finished");
686
687    gcHeap->gcRunning = false;
688
689    LOGV_HEAP("Resuming threads");
690
691    if (spec->isConcurrent) {
692        /*
693         * Wake-up any threads that blocked after a failed allocation
694         * request.
695         */
696        dvmBroadcastCond(&gDvm.gcHeapCond);
697    }
698
699    if (!spec->isConcurrent) {
700        dirtyEnd = dvmGetRelativeTimeMsec();
701        dvmResumeAllThreads(SUSPEND_FOR_GC);
702        /*
703         * Restore the original thread scheduling priority if it was
704         * changed at the start of the current garbage collection.
705         */
706        if (oldThreadPriority != INT_MAX) {
707            setThreadPriority(oldThreadPriority);
708        }
709    }
710
711    /*
712     * Move queue of pending references back into Java.
713     */
714    dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences);
715
716    percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
717    if (!spec->isConcurrent) {
718        u4 markSweepTime = dirtyEnd - rootStart;
719        bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
720        totalTime = rootSuspendTime + markSweepTime;
721        LOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums",
722             spec->reason,
723             isSmall ? "<" : "",
724             numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
725             percentFree,
726             currAllocated / 1024, currFootprint / 1024,
727             markSweepTime);
728    } else {
729        u4 rootTime = rootEnd - rootStart;
730        u4 dirtySuspendTime = dirtyStart - dirtySuspend;
731        u4 dirtyTime = dirtyEnd - dirtyStart;
732        bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
733        totalTime = rootSuspendTime + rootTime + dirtySuspendTime + dirtyTime;
734        LOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums",
735             spec->reason,
736             isSmall ? "<" : "",
737             numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
738             percentFree,
739             currAllocated / 1024, currFootprint / 1024,
740             rootTime, dirtyTime);
741    }
742    if (gcHeap->ddmHpifWhen != 0) {
743        LOGD_HEAP("Sending VM heap info to DDM");
744        dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
745    }
746    if (gcHeap->ddmHpsgWhen != 0) {
747        LOGD_HEAP("Dumping VM heap to DDM");
748        dvmDdmSendHeapSegments(false, false);
749    }
750    if (gcHeap->ddmNhsgWhen != 0) {
751        LOGD_HEAP("Dumping native heap to DDM");
752        dvmDdmSendHeapSegments(false, true);
753    }
754}
755
756/*
757 * If the concurrent GC is running, wait for it to finish.  The caller
758 * must hold the heap lock.
759 *
760 * Note: the second dvmChangeStatus() could stall if we were in RUNNING
761 * on entry, and some other thread has asked us to suspend.  In that
762 * case we will be suspended with the heap lock held, which can lead to
763 * deadlock if the other thread tries to do something with the managed heap.
764 * For example, the debugger might suspend us and then execute a method that
765 * allocates memory.  We can avoid this situation by releasing the lock
766 * before self-suspending.  (The developer can work around this specific
767 * situation by single-stepping the VM.  Alternatively, we could disable
768 * concurrent GC when the debugger is attached, but that might change
769 * behavior more than is desirable.)
770 *
771 * This should not be a problem in production, because any GC-related
772 * activity will grab the lock before issuing a suspend-all.  (We may briefly
773 * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads,
774 * but there's no risk of deadlock.)
775 */
776void dvmWaitForConcurrentGcToComplete()
777{
778    Thread *self = dvmThreadSelf();
779    assert(self != NULL);
780    while (gDvm.gcHeap->gcRunning) {
781        ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
782        dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock);
783        dvmChangeStatus(self, oldStatus);
784    }
785}
786