Heap.cpp revision 5617ad30c611f373e16bf10c0feec114faef54ef
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16/*
17 * Garbage-collecting memory allocator.
18 */
19#include "Dalvik.h"
20#include "alloc/HeapBitmap.h"
21#include "alloc/Verify.h"
22#include "alloc/HeapTable.h"
23#include "alloc/Heap.h"
24#include "alloc/HeapInternal.h"
25#include "alloc/DdmHeap.h"
26#include "alloc/HeapSource.h"
27#include "alloc/MarkSweep.h"
28
29#include "utils/threads.h"      // need Android thread priorities
30#define kInvalidPriority        10000
31
32#include <cutils/sched_policy.h>
33
34#include <sys/time.h>
35#include <sys/resource.h>
36#include <limits.h>
37#include <errno.h>
38
39static const char* GcReasonStr[] = {
40    [GC_FOR_MALLOC] = "GC_FOR_MALLOC",
41    [GC_CONCURRENT] = "GC_CONCURRENT",
42    [GC_EXPLICIT] = "GC_EXPLICIT",
43    [GC_EXTERNAL_ALLOC] = "GC_EXTERNAL_ALLOC",
44    [GC_HPROF_DUMP_HEAP] = "GC_HPROF_DUMP_HEAP"
45};
46
47/*
48 * Initialize the GC heap.
49 *
50 * Returns true if successful, false otherwise.
51 */
52bool dvmHeapStartup()
53{
54    GcHeap *gcHeap;
55
56#if defined(WITH_ALLOC_LIMITS)
57    gDvm.checkAllocLimits = false;
58    gDvm.allocationLimit = -1;
59#endif
60
61    gcHeap = dvmHeapSourceStartup(gDvm.heapSizeStart, gDvm.heapSizeMax);
62    if (gcHeap == NULL) {
63        return false;
64    }
65    gcHeap->heapWorkerCurrentObject = NULL;
66    gcHeap->heapWorkerCurrentMethod = NULL;
67    gcHeap->heapWorkerInterpStartTime = 0LL;
68    gcHeap->ddmHpifWhen = 0;
69    gcHeap->ddmHpsgWhen = 0;
70    gcHeap->ddmHpsgWhat = 0;
71    gcHeap->ddmNhsgWhen = 0;
72    gcHeap->ddmNhsgWhat = 0;
73#if WITH_HPROF
74    gcHeap->hprofDumpOnGc = false;
75    gcHeap->hprofContext = NULL;
76#endif
77    gDvm.gcHeap = gcHeap;
78
79    /* Set up the lists and lock we'll use for finalizable
80     * and reference objects.
81     */
82    dvmInitMutex(&gDvm.heapWorkerListLock);
83    gcHeap->finalizableRefs = NULL;
84    gcHeap->pendingFinalizationRefs = NULL;
85    gcHeap->referenceOperations = NULL;
86
87    /* Initialize the HeapWorker locks and other state
88     * that the GC uses.
89     */
90    dvmInitializeHeapWorkerState();
91
92    return true;
93}
94
95bool dvmHeapStartupAfterZygote(void)
96{
97    /* Update our idea of the last GC start time so that we
98     * don't use the last time that Zygote happened to GC.
99     */
100    gDvm.gcHeap->gcStartTime = dvmGetRelativeTimeUsec();
101    return dvmHeapSourceStartupAfterZygote();
102}
103
104void dvmHeapShutdown()
105{
106//TODO: make sure we're locked
107    if (gDvm.gcHeap != NULL) {
108        /* Tables are allocated on the native heap;
109         * they need to be cleaned up explicitly.
110         * The process may stick around, so we don't
111         * want to leak any native memory.
112         */
113        dvmHeapFreeLargeTable(gDvm.gcHeap->finalizableRefs);
114        gDvm.gcHeap->finalizableRefs = NULL;
115
116        dvmHeapFreeLargeTable(gDvm.gcHeap->pendingFinalizationRefs);
117        gDvm.gcHeap->pendingFinalizationRefs = NULL;
118
119        dvmHeapFreeLargeTable(gDvm.gcHeap->referenceOperations);
120        gDvm.gcHeap->referenceOperations = NULL;
121
122        /* Destroy the heap.  Any outstanding pointers
123         * will point to unmapped memory (unless/until
124         * someone else maps it).  This frees gDvm.gcHeap
125         * as a side-effect.
126         */
127        dvmHeapSourceShutdown(&gDvm.gcHeap);
128    }
129}
130
131/*
132 * Shutdown any threads internal to the heap.
133 */
134void dvmHeapThreadShutdown(void)
135{
136    dvmHeapSourceThreadShutdown();
137}
138
139/*
140 * We've been asked to allocate something we can't, e.g. an array so
141 * large that (length * elementWidth) is larger than 2^31.
142 *
143 * _The Java Programming Language_, 4th edition, says, "you can be sure
144 * that all SoftReferences to softly reachable objects will be cleared
145 * before an OutOfMemoryError is thrown."
146 *
147 * It's unclear whether that holds for all situations where an OOM can
148 * be thrown, or just in the context of an allocation that fails due
149 * to lack of heap space.  For simplicity we just throw the exception.
150 *
151 * (OOM due to actually running out of space is handled elsewhere.)
152 */
153void dvmThrowBadAllocException(const char* msg)
154{
155    dvmThrowException("Ljava/lang/OutOfMemoryError;", msg);
156}
157
158/*
159 * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
160 * we're going to have to wait on the mutex.
161 */
162bool dvmLockHeap()
163{
164    if (dvmTryLockMutex(&gDvm.gcHeapLock) != 0) {
165        Thread *self;
166        ThreadStatus oldStatus;
167
168        self = dvmThreadSelf();
169        oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
170        dvmLockMutex(&gDvm.gcHeapLock);
171        dvmChangeStatus(self, oldStatus);
172    }
173
174    return true;
175}
176
177void dvmUnlockHeap()
178{
179    dvmUnlockMutex(&gDvm.gcHeapLock);
180}
181
182/* Pop an object from the list of pending finalizations and
183 * reference clears/enqueues, and return the object.
184 * The caller must call dvmReleaseTrackedAlloc()
185 * on the object when finished.
186 *
187 * Typically only called by the heap worker thread.
188 */
189Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op)
190{
191    Object *obj;
192    GcHeap *gcHeap = gDvm.gcHeap;
193
194    assert(op != NULL);
195
196    dvmLockMutex(&gDvm.heapWorkerListLock);
197
198    obj = dvmHeapGetNextObjectFromLargeTable(&gcHeap->referenceOperations);
199    if (obj != NULL) {
200        *op = WORKER_ENQUEUE;
201    } else {
202        obj = dvmHeapGetNextObjectFromLargeTable(
203                &gcHeap->pendingFinalizationRefs);
204        if (obj != NULL) {
205            *op = WORKER_FINALIZE;
206        }
207    }
208
209    if (obj != NULL) {
210        /* Don't let the GC collect the object until the
211         * worker thread is done with it.
212         */
213        dvmAddTrackedAlloc(obj, NULL);
214    }
215
216    dvmUnlockMutex(&gDvm.heapWorkerListLock);
217
218    return obj;
219}
220
221/* Whenever the effective heap size may have changed,
222 * this function must be called.
223 */
224void dvmHeapSizeChanged()
225{
226}
227
228/* Do a full garbage collection, which may grow the
229 * heap as a side-effect if the live set is large.
230 */
231static void gcForMalloc(bool collectSoftReferences)
232{
233#ifdef WITH_PROFILER
234    if (gDvm.allocProf.enabled) {
235        Thread* self = dvmThreadSelf();
236        gDvm.allocProf.gcCount++;
237        if (self != NULL) {
238            self->allocProf.gcCount++;
239        }
240    }
241#endif
242    /* This may adjust the soft limit as a side-effect.
243     */
244    LOGD_HEAP("dvmMalloc initiating GC%s\n",
245            collectSoftReferences ? "(collect SoftReferences)" : "");
246    dvmCollectGarbageInternal(collectSoftReferences, GC_FOR_MALLOC);
247}
248
249/* Try as hard as possible to allocate some memory.
250 */
251static void *tryMalloc(size_t size)
252{
253    void *ptr;
254
255    /* Don't try too hard if there's no way the allocation is
256     * going to succeed.  We have to collect SoftReferences before
257     * throwing an OOME, though.
258     */
259    if (size >= gDvm.heapSizeMax) {
260        LOGW_HEAP("dvmMalloc(%zu/0x%08zx): "
261                "someone's allocating a huge buffer\n", size, size);
262        ptr = NULL;
263        goto collect_soft_refs;
264    }
265
266//TODO: figure out better heuristics
267//    There will be a lot of churn if someone allocates a bunch of
268//    big objects in a row, and we hit the frag case each time.
269//    A full GC for each.
270//    Maybe we grow the heap in bigger leaps
271//    Maybe we skip the GC if the size is large and we did one recently
272//      (number of allocations ago) (watch for thread effects)
273//    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
274//      (or, at least, there are only 0-5 objects swept each time)
275
276    ptr = dvmHeapSourceAlloc(size);
277    if (ptr != NULL) {
278        return ptr;
279    }
280
281    /*
282     * The allocation failed.  If the GC is running, block until it
283     * completes and retry.
284     */
285    if (gDvm.gcHeap->gcRunning) {
286        /*
287         * The GC is concurrently tracing the heap.  Release the heap
288         * lock, wait for the GC to complete, and retrying allocating.
289         */
290        dvmWaitForConcurrentGcToComplete();
291        ptr = dvmHeapSourceAlloc(size);
292        if (ptr != NULL) {
293            return ptr;
294        }
295    }
296    /*
297     * Another failure.  Our thread was starved or there may be too
298     * many live objects.  Try a foreground GC.  This will have no
299     * effect if the concurrent GC is already running.
300     */
301    gcForMalloc(false);
302    ptr = dvmHeapSourceAlloc(size);
303    if (ptr != NULL) {
304        return ptr;
305    }
306
307    /* Even that didn't work;  this is an exceptional state.
308     * Try harder, growing the heap if necessary.
309     */
310    ptr = dvmHeapSourceAllocAndGrow(size);
311    dvmHeapSizeChanged();
312    if (ptr != NULL) {
313        size_t newHeapSize;
314
315        newHeapSize = dvmHeapSourceGetIdealFootprint();
316//TODO: may want to grow a little bit more so that the amount of free
317//      space is equal to the old free space + the utilization slop for
318//      the new allocation.
319        LOGI_HEAP("Grow heap (frag case) to "
320                "%zu.%03zuMB for %zu-byte allocation\n",
321                FRACTIONAL_MB(newHeapSize), size);
322        return ptr;
323    }
324
325    /* Most allocations should have succeeded by now, so the heap
326     * is really full, really fragmented, or the requested size is
327     * really big.  Do another GC, collecting SoftReferences this
328     * time.  The VM spec requires that all SoftReferences have
329     * been collected and cleared before throwing an OOME.
330     */
331//TODO: wait for the finalizers from the previous GC to finish
332collect_soft_refs:
333    LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation\n",
334            size);
335    gcForMalloc(true);
336    ptr = dvmHeapSourceAllocAndGrow(size);
337    dvmHeapSizeChanged();
338    if (ptr != NULL) {
339        return ptr;
340    }
341//TODO: maybe wait for finalizers and try one last time
342
343    LOGE_HEAP("Out of memory on a %zd-byte allocation.\n", size);
344//TODO: tell the HeapSource to dump its state
345    dvmDumpThread(dvmThreadSelf(), false);
346
347    return NULL;
348}
349
350/* Throw an OutOfMemoryError if there's a thread to attach it to.
351 * Avoid recursing.
352 *
353 * The caller must not be holding the heap lock, or else the allocations
354 * in dvmThrowException() will deadlock.
355 */
356static void throwOOME()
357{
358    Thread *self;
359
360    if ((self = dvmThreadSelf()) != NULL) {
361        /* If the current (failing) dvmMalloc() happened as part of thread
362         * creation/attachment before the thread became part of the root set,
363         * we can't rely on the thread-local trackedAlloc table, so
364         * we can't keep track of a real allocated OOME object.  But, since
365         * the thread is in the process of being created, it won't have
366         * a useful stack anyway, so we may as well make things easier
367         * by throwing the (stackless) pre-built OOME.
368         */
369        if (dvmIsOnThreadList(self) && !self->throwingOOME) {
370            /* Let ourselves know that we tried to throw an OOM
371             * error in the normal way in case we run out of
372             * memory trying to allocate it inside dvmThrowException().
373             */
374            self->throwingOOME = true;
375
376            /* Don't include a description string;
377             * one fewer allocation.
378             */
379            dvmThrowException("Ljava/lang/OutOfMemoryError;", NULL);
380        } else {
381            /*
382             * This thread has already tried to throw an OutOfMemoryError,
383             * which probably means that we're running out of memory
384             * while recursively trying to throw.
385             *
386             * To avoid any more allocation attempts, "throw" a pre-built
387             * OutOfMemoryError object (which won't have a useful stack trace).
388             *
389             * Note that since this call can't possibly allocate anything,
390             * we don't care about the state of self->throwingOOME
391             * (which will usually already be set).
392             */
393            dvmSetException(self, gDvm.outOfMemoryObj);
394        }
395        /* We're done with the possible recursion.
396         */
397        self->throwingOOME = false;
398    }
399}
400
401/*
402 * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
403 *
404 * The new storage is zeroed out.
405 *
406 * Note that, in rare cases, this could get called while a GC is in
407 * progress.  If a non-VM thread tries to attach itself through JNI,
408 * it will need to allocate some objects.  If this becomes annoying to
409 * deal with, we can block it at the source, but holding the allocation
410 * mutex should be enough.
411 *
412 * In rare circumstances (JNI AttachCurrentThread) we can be called
413 * from a non-VM thread.
414 *
415 * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
416 * (because it's being done for the interpreter "new" operation and will
417 * be part of the root set immediately) or we can't (because this allocation
418 * is for a brand new thread).
419 *
420 * Returns NULL and throws an exception on failure.
421 *
422 * TODO: don't do a GC if the debugger thinks all threads are suspended
423 */
424void* dvmMalloc(size_t size, int flags)
425{
426    GcHeap *gcHeap = gDvm.gcHeap;
427    void *ptr;
428
429#if defined(WITH_ALLOC_LIMITS)
430    /*
431     * See if they've exceeded the allocation limit for this thread.
432     *
433     * A limit value of -1 means "no limit".
434     *
435     * This is enabled at compile time because it requires us to do a
436     * TLS lookup for the Thread pointer.  This has enough of a performance
437     * impact that we don't want to do it if we don't have to.  (Now that
438     * we're using gDvm.checkAllocLimits we may want to reconsider this,
439     * but it's probably still best to just compile the check out of
440     * production code -- one less thing to hit on every allocation.)
441     */
442    if (gDvm.checkAllocLimits) {
443        Thread* self = dvmThreadSelf();
444        if (self != NULL) {
445            int count = self->allocLimit;
446            if (count > 0) {
447                self->allocLimit--;
448            } else if (count == 0) {
449                /* fail! */
450                assert(!gDvm.initializing);
451                self->allocLimit = -1;
452                dvmThrowException("Ldalvik/system/AllocationLimitError;",
453                    "thread allocation limit exceeded");
454                return NULL;
455            }
456        }
457    }
458
459    if (gDvm.allocationLimit >= 0) {
460        assert(!gDvm.initializing);
461        gDvm.allocationLimit = -1;
462        dvmThrowException("Ldalvik/system/AllocationLimitError;",
463            "global allocation limit exceeded");
464        return NULL;
465    }
466#endif
467
468    dvmLockHeap();
469
470    /* Try as hard as possible to allocate some memory.
471     */
472    ptr = tryMalloc(size);
473    if (ptr != NULL) {
474        /* We've got the memory.
475         */
476        if ((flags & ALLOC_FINALIZABLE) != 0) {
477            /* This object is an instance of a class that
478             * overrides finalize().  Add it to the finalizable list.
479             */
480            if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs,
481                                    (Object *)ptr))
482            {
483                LOGE_HEAP("dvmMalloc(): no room for any more "
484                        "finalizable objects\n");
485                dvmAbort();
486            }
487        }
488
489#ifdef WITH_PROFILER
490        if (gDvm.allocProf.enabled) {
491            Thread* self = dvmThreadSelf();
492            gDvm.allocProf.allocCount++;
493            gDvm.allocProf.allocSize += size;
494            if (self != NULL) {
495                self->allocProf.allocCount++;
496                self->allocProf.allocSize += size;
497            }
498        }
499#endif
500    } else {
501        /* The allocation failed.
502         */
503
504#ifdef WITH_PROFILER
505        if (gDvm.allocProf.enabled) {
506            Thread* self = dvmThreadSelf();
507            gDvm.allocProf.failedAllocCount++;
508            gDvm.allocProf.failedAllocSize += size;
509            if (self != NULL) {
510                self->allocProf.failedAllocCount++;
511                self->allocProf.failedAllocSize += size;
512            }
513        }
514#endif
515    }
516
517    dvmUnlockHeap();
518
519    if (ptr != NULL) {
520        /*
521         * If caller hasn't asked us not to track it, add it to the
522         * internal tracking list.
523         */
524        if ((flags & ALLOC_DONT_TRACK) == 0) {
525            dvmAddTrackedAlloc(ptr, NULL);
526        }
527    } else {
528        /*
529         * The allocation failed; throw an OutOfMemoryError.
530         */
531        throwOOME();
532    }
533
534    return ptr;
535}
536
537/*
538 * Returns true iff <obj> points to a valid allocated object.
539 */
540bool dvmIsValidObject(const Object* obj)
541{
542    /* Don't bother if it's NULL or not 8-byte aligned.
543     */
544    if (obj != NULL && ((uintptr_t)obj & (8-1)) == 0) {
545        /* Even if the heap isn't locked, this shouldn't return
546         * any false negatives.  The only mutation that could
547         * be happening is allocation, which means that another
548         * thread could be in the middle of a read-modify-write
549         * to add a new bit for a new object.  However, that
550         * RMW will have completed by the time any other thread
551         * could possibly see the new pointer, so there is no
552         * danger of dvmIsValidObject() being called on a valid
553         * pointer whose bit isn't set.
554         *
555         * Freeing will only happen during the sweep phase, which
556         * only happens while the heap is locked.
557         */
558        return dvmHeapSourceContains(obj);
559    }
560    return false;
561}
562
563/*
564 * Returns true iff <obj> points to a word-aligned address within Heap
565 * address space.
566 */
567bool dvmIsValidObjectAddress(const void* ptr)
568{
569    /* Don't bother if it's not 4-byte aligned.
570     */
571    if (((uintptr_t)ptr & (4-1)) == 0) {
572        return dvmHeapSourceContainsAddress(ptr);
573    }
574    return false;
575}
576
577size_t dvmObjectSizeInHeap(const Object *obj)
578{
579    return dvmHeapSourceChunkSize(obj);
580}
581
582/*
583 * Scan every live object in the heap, holding the locks.
584 */
585static void verifyHeap()
586{
587    // TODO: check the locks.
588    HeapBitmap *liveBits = dvmHeapSourceGetLiveBits();
589    dvmVerifyBitmap(liveBits);
590}
591
592/*
593 * Initiate garbage collection.
594 *
595 * NOTES:
596 * - If we don't hold gDvm.threadListLock, it's possible for a thread to
597 *   be added to the thread list while we work.  The thread should NOT
598 *   start executing, so this is only interesting when we start chasing
599 *   thread stacks.  (Before we do so, grab the lock.)
600 *
601 * We are not allowed to GC when the debugger has suspended the VM, which
602 * is awkward because debugger requests can cause allocations.  The easiest
603 * way to enforce this is to refuse to GC on an allocation made by the
604 * JDWP thread -- we have to expand the heap or fail.
605 */
606void dvmCollectGarbageInternal(bool clearSoftRefs, GcReason reason)
607{
608    GcHeap *gcHeap = gDvm.gcHeap;
609    u8 now;
610    s8 timeSinceLastGc;
611    s8 gcElapsedTime;
612    int numFreed;
613    size_t sizeFreed;
614    GcMode gcMode;
615    int oldThreadPriority = kInvalidPriority;
616
617    /* The heap lock must be held.
618     */
619
620    if (gcHeap->gcRunning) {
621        LOGW_HEAP("Attempted recursive GC\n");
622        return;
623    }
624    gcMode = (reason == GC_FOR_MALLOC) ? GC_PARTIAL : GC_FULL;
625    gcHeap->gcRunning = true;
626    now = dvmGetRelativeTimeUsec();
627    if (gcHeap->gcStartTime != 0) {
628        timeSinceLastGc = (now - gcHeap->gcStartTime) / 1000;
629    } else {
630        timeSinceLastGc = 0;
631    }
632    gcHeap->gcStartTime = now;
633
634    LOGV_HEAP("%s starting -- suspending threads\n", GcReasonStr[reason]);
635
636    dvmSuspendAllThreads(SUSPEND_FOR_GC);
637
638    /*
639     * If we are not marking concurrently raise the priority of the
640     * thread performing the garbage collection.
641     */
642    if (reason != GC_CONCURRENT) {
643        /* Get the priority (the "nice" value) of the current thread.  The
644         * getpriority() call can legitimately return -1, so we have to
645         * explicitly test errno.
646         */
647        errno = 0;
648        int priorityResult = getpriority(PRIO_PROCESS, 0);
649        if (errno != 0) {
650            LOGI_HEAP("getpriority(self) failed: %s\n", strerror(errno));
651        } else if (priorityResult > ANDROID_PRIORITY_NORMAL) {
652            /* Current value is numerically greater than "normal", which
653             * in backward UNIX terms means lower priority.
654             */
655
656            if (priorityResult >= ANDROID_PRIORITY_BACKGROUND) {
657                set_sched_policy(dvmGetSysThreadId(), SP_FOREGROUND);
658            }
659
660            if (setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL) != 0) {
661                LOGI_HEAP("Unable to elevate priority from %d to %d\n",
662                          priorityResult, ANDROID_PRIORITY_NORMAL);
663            } else {
664                /* priority elevated; save value so we can restore it later */
665                LOGD_HEAP("Elevating priority from %d to %d\n",
666                          priorityResult, ANDROID_PRIORITY_NORMAL);
667                oldThreadPriority = priorityResult;
668            }
669        }
670    }
671
672    /* Wait for the HeapWorker thread to block.
673     * (It may also already be suspended in interp code,
674     * in which case it's not holding heapWorkerLock.)
675     */
676    dvmLockMutex(&gDvm.heapWorkerLock);
677
678    /* Make sure that the HeapWorker thread hasn't become
679     * wedged inside interp code.  If it has, this call will
680     * print a message and abort the VM.
681     */
682    dvmAssertHeapWorkerThreadRunning();
683
684    /* Lock the pendingFinalizationRefs list.
685     *
686     * Acquire the lock after suspending so the finalizer
687     * thread can't block in the RUNNING state while
688     * we try to suspend.
689     */
690    dvmLockMutex(&gDvm.heapWorkerListLock);
691
692    if (gDvm.preVerify) {
693        LOGV_HEAP("Verifying heap before GC");
694        verifyHeap();
695    }
696
697#ifdef WITH_PROFILER
698    dvmMethodTraceGCBegin();
699#endif
700
701#if WITH_HPROF
702
703/* Set DUMP_HEAP_ON_DDMS_UPDATE to 1 to enable heap dumps
704 * whenever DDMS requests a heap update (HPIF chunk).
705 * The output files will appear in /data/misc, which must
706 * already exist.
707 * You must define "WITH_HPROF := true" in your buildspec.mk
708 * and recompile libdvm for this to work.
709 *
710 * To enable stack traces for each allocation, define
711 * "WITH_HPROF_STACK := true" in buildspec.mk.  This option slows down
712 * allocations and also requires 8 additional bytes per object on the
713 * GC heap.
714 */
715#define DUMP_HEAP_ON_DDMS_UPDATE 0
716#if DUMP_HEAP_ON_DDMS_UPDATE
717    gcHeap->hprofDumpOnGc |= (gcHeap->ddmHpifWhen != 0);
718#endif
719
720    if (gcHeap->hprofDumpOnGc) {
721        char nameBuf[128];
722
723        gcHeap->hprofResult = -1;
724
725        if (gcHeap->hprofFileName == NULL) {
726            /* no filename was provided; invent one */
727            sprintf(nameBuf, "/data/misc/heap-dump-tm%d-pid%d.hprof",
728                (int) time(NULL), (int) getpid());
729            gcHeap->hprofFileName = nameBuf;
730        }
731        gcHeap->hprofContext = hprofStartup(gcHeap->hprofFileName,
732                gcHeap->hprofDirectToDdms);
733        if (gcHeap->hprofContext != NULL) {
734            hprofStartHeapDump(gcHeap->hprofContext);
735        }
736        gcHeap->hprofDumpOnGc = false;
737        gcHeap->hprofFileName = NULL;
738    }
739#endif
740
741    if (timeSinceLastGc < 10000) {
742        LOGD_HEAP("GC! (%dms since last GC)\n",
743                (int)timeSinceLastGc);
744    } else {
745        LOGD_HEAP("GC! (%d sec since last GC)\n",
746                (int)(timeSinceLastGc / 1000));
747    }
748
749    /* Set up the marking context.
750     */
751    if (!dvmHeapBeginMarkStep(gcMode)) {
752        LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting\n");
753        dvmAbort();
754    }
755
756    /* Mark the set of objects that are strongly reachable from the roots.
757     */
758    LOGD_HEAP("Marking...");
759    dvmHeapMarkRootSet();
760
761    /* dvmHeapScanMarkedObjects() will build the lists of known
762     * instances of the Reference classes.
763     */
764    gcHeap->softReferences = NULL;
765    gcHeap->weakReferences = NULL;
766    gcHeap->phantomReferences = NULL;
767
768    if (reason == GC_CONCURRENT) {
769        /*
770         * Resume threads while tracing from the roots.  We unlock the
771         * heap to allow mutator threads to allocate from free space.
772         */
773        dvmUnlockHeap();
774        dvmResumeAllThreads(SUSPEND_FOR_GC);
775    }
776
777    /* Recursively mark any objects that marked objects point to strongly.
778     * If we're not collecting soft references, soft-reachable
779     * objects will also be marked.
780     */
781    LOGD_HEAP("Recursing...");
782    dvmHeapScanMarkedObjects();
783
784    if (reason == GC_CONCURRENT) {
785        /*
786         * Re-acquire the heap lock and perform the final thread
787         * suspension.
788         */
789        dvmLockHeap();
790        dvmSuspendAllThreads(SUSPEND_FOR_GC);
791        /*
792         * As no barrier intercepts root updates, we conservatively
793         * assume all roots may be gray and re-mark them.
794         */
795        dvmHeapMarkRootSet();
796        /*
797         * Recursively mark gray objects pointed to by the roots or by
798         * heap objects dirtied during the concurrent mark.
799         */
800        dvmMarkDirtyObjects();
801    }
802
803    /* All strongly-reachable objects have now been marked.
804     */
805    LOGD_HEAP("Handling soft references...");
806    if (!clearSoftRefs) {
807        dvmHandleSoftRefs(&gcHeap->softReferences);
808    }
809    dvmClearWhiteRefs(&gcHeap->softReferences);
810
811    LOGD_HEAP("Handling weak references...");
812    dvmClearWhiteRefs(&gcHeap->weakReferences);
813
814    /* Once all weak-reachable objects have been taken
815     * care of, any remaining unmarked objects can be finalized.
816     */
817    LOGD_HEAP("Finding finalizations...");
818    dvmHeapScheduleFinalizations();
819
820    LOGD_HEAP("Handling f-reachable soft references...");
821    dvmClearWhiteRefs(&gcHeap->softReferences);
822
823    LOGD_HEAP("Handling f-reachable weak references...");
824    dvmClearWhiteRefs(&gcHeap->weakReferences);
825
826    /* Any remaining objects that are not pending finalization
827     * could be phantom-reachable.  This will mark any phantom-reachable
828     * objects, as well as enqueue their references.
829     */
830    LOGD_HEAP("Handling phantom references...");
831    dvmClearWhiteRefs(&gcHeap->phantomReferences);
832
833#ifdef WITH_DEADLOCK_PREDICTION
834    dvmDumpMonitorInfo("before sweep");
835#endif
836    LOGD_HEAP("Sweeping...");
837    dvmHeapSweepUnmarkedObjects(gcMode, &numFreed, &sizeFreed);
838#ifdef WITH_DEADLOCK_PREDICTION
839    dvmDumpMonitorInfo("after sweep");
840#endif
841
842    LOGD_HEAP("Cleaning up...");
843    dvmHeapFinishMarkStep();
844
845    LOGD_HEAP("Done.");
846
847    /* Now's a good time to adjust the heap size, since
848     * we know what our utilization is.
849     *
850     * This doesn't actually resize any memory;
851     * it just lets the heap grow more when necessary.
852     */
853    dvmHeapSourceGrowForUtilization();
854    dvmHeapSizeChanged();
855
856#if WITH_HPROF
857    if (gcHeap->hprofContext != NULL) {
858        hprofFinishHeapDump(gcHeap->hprofContext);
859//TODO: write a HEAP_SUMMARY record
860        if (hprofShutdown(gcHeap->hprofContext))
861            gcHeap->hprofResult = 0;    /* indicate success */
862        gcHeap->hprofContext = NULL;
863    }
864#endif
865
866    /* Now that we've freed up the GC heap, return any large
867     * free chunks back to the system.  They'll get paged back
868     * in the next time they're used.  Don't do it immediately,
869     * though;  if the process is still allocating a bunch of
870     * memory, we'll be taking a ton of page faults that we don't
871     * necessarily need to.
872     *
873     * Cancel any old scheduled trims, and schedule a new one.
874     */
875    dvmScheduleHeapSourceTrim(5);  // in seconds
876
877#ifdef WITH_PROFILER
878    dvmMethodTraceGCEnd();
879#endif
880    LOGV_HEAP("GC finished");
881
882    if (gDvm.postVerify) {
883        LOGV_HEAP("Verifying heap after GC");
884        verifyHeap();
885    }
886
887    gcHeap->gcRunning = false;
888
889    LOGV_HEAP("Resuming threads");
890    dvmUnlockMutex(&gDvm.heapWorkerListLock);
891    dvmUnlockMutex(&gDvm.heapWorkerLock);
892
893#if defined(WITH_JIT)
894    /*
895     * Patching a chaining cell is very cheap as it only updates 4 words. It's
896     * the overhead of stopping all threads and synchronizing the I/D cache
897     * that makes it expensive.
898     *
899     * Therefore we batch those work orders in a queue and go through them
900     * when threads are suspended for GC.
901     */
902    dvmCompilerPerformSafePointChecks();
903#endif
904
905    dvmResumeAllThreads(SUSPEND_FOR_GC);
906
907    if (reason == GC_CONCURRENT) {
908        /*
909         * Wake-up any threads that blocked after a failed allocation
910         * request.
911         */
912        dvmBroadcastCond(&gDvm.gcHeapCond);
913    }
914
915    if (reason != GC_CONCURRENT) {
916        if (oldThreadPriority != kInvalidPriority) {
917            if (setpriority(PRIO_PROCESS, 0, oldThreadPriority) != 0) {
918                LOGW_HEAP("Unable to reset priority to %d: %s\n",
919                          oldThreadPriority, strerror(errno));
920            } else {
921                LOGD_HEAP("Reset priority to %d\n", oldThreadPriority);
922            }
923
924            if (oldThreadPriority >= ANDROID_PRIORITY_BACKGROUND) {
925                set_sched_policy(dvmGetSysThreadId(), SP_BACKGROUND);
926            }
927        }
928    }
929    gcElapsedTime = (dvmGetRelativeTimeUsec() - gcHeap->gcStartTime) / 1000;
930    LOGD("%s freed %d objects / %zd bytes in %dms\n",
931         GcReasonStr[reason], numFreed, sizeFreed, (int)gcElapsedTime);
932    dvmLogGcStats(numFreed, sizeFreed, gcElapsedTime);
933
934    if (gcHeap->ddmHpifWhen != 0) {
935        LOGD_HEAP("Sending VM heap info to DDM\n");
936        dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
937    }
938    if (gcHeap->ddmHpsgWhen != 0) {
939        LOGD_HEAP("Dumping VM heap to DDM\n");
940        dvmDdmSendHeapSegments(false, false);
941    }
942    if (gcHeap->ddmNhsgWhen != 0) {
943        LOGD_HEAP("Dumping native heap to DDM\n");
944        dvmDdmSendHeapSegments(false, true);
945    }
946}
947
948void dvmWaitForConcurrentGcToComplete(void)
949{
950    Thread *self = dvmThreadSelf();
951    ThreadStatus oldStatus;
952    assert(self != NULL);
953    oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
954    dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock);
955    dvmChangeStatus(self, oldStatus);
956}
957
958#if WITH_HPROF
959/*
960 * Perform garbage collection, writing heap information to the specified file.
961 *
962 * If "fileName" is NULL, a suitable name will be generated automatically.
963 *
964 * Returns 0 on success, or an error code on failure.
965 */
966int hprofDumpHeap(const char* fileName, bool directToDdms)
967{
968    int result;
969
970    dvmLockMutex(&gDvm.gcHeapLock);
971
972    gDvm.gcHeap->hprofDumpOnGc = true;
973    gDvm.gcHeap->hprofFileName = fileName;
974    gDvm.gcHeap->hprofDirectToDdms = directToDdms;
975    dvmCollectGarbageInternal(false, GC_HPROF_DUMP_HEAP);
976    result = gDvm.gcHeap->hprofResult;
977
978    dvmUnlockMutex(&gDvm.gcHeapLock);
979
980    return result;
981}
982
983void dvmHeapSetHprofGcScanState(hprof_heap_tag_t state, u4 threadSerialNumber)
984{
985    if (gDvm.gcHeap->hprofContext != NULL) {
986        hprofSetGcScanState(gDvm.gcHeap->hprofContext, state,
987                threadSerialNumber);
988    }
989}
990#endif
991