1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16/*
17 * Garbage-collecting memory allocator.
18 */
19#include "Dalvik.h"
20#include "alloc/HeapTable.h"
21#include "alloc/Heap.h"
22#include "alloc/HeapInternal.h"
23#include "alloc/DdmHeap.h"
24#include "alloc/HeapSource.h"
25#include "alloc/MarkSweep.h"
26
27#include "utils/threads.h"      // need Android thread priorities
28#define kInvalidPriority        10000
29
30#include <sys/time.h>
31#include <sys/resource.h>
32#include <limits.h>
33#include <errno.h>
34
35#define kNonCollectableRefDefault   16
36#define kFinalizableRefDefault      128
37
38/*
39 * Initialize the GC heap.
40 *
41 * Returns true if successful, false otherwise.
42 */
43bool dvmHeapStartup()
44{
45    GcHeap *gcHeap;
46
47#if defined(WITH_ALLOC_LIMITS)
48    gDvm.checkAllocLimits = false;
49    gDvm.allocationLimit = -1;
50#endif
51
52    gcHeap = dvmHeapSourceStartup(gDvm.heapSizeStart, gDvm.heapSizeMax);
53    if (gcHeap == NULL) {
54        return false;
55    }
56    gcHeap->heapWorkerCurrentObject = NULL;
57    gcHeap->heapWorkerCurrentMethod = NULL;
58    gcHeap->heapWorkerInterpStartTime = 0LL;
59    gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
60    gcHeap->softReferenceHeapSizeThreshold = gDvm.heapSizeStart;
61    gcHeap->ddmHpifWhen = 0;
62    gcHeap->ddmHpsgWhen = 0;
63    gcHeap->ddmHpsgWhat = 0;
64    gcHeap->ddmNhsgWhen = 0;
65    gcHeap->ddmNhsgWhat = 0;
66#if WITH_HPROF
67    gcHeap->hprofDumpOnGc = false;
68    gcHeap->hprofContext = NULL;
69#endif
70
71    /* This needs to be set before we call dvmHeapInitHeapRefTable().
72     */
73    gDvm.gcHeap = gcHeap;
74
75    /* Set up the table we'll use for ALLOC_NO_GC.
76     */
77    if (!dvmHeapInitHeapRefTable(&gcHeap->nonCollectableRefs,
78                           kNonCollectableRefDefault))
79    {
80        LOGE_HEAP("Can't allocate GC_NO_ALLOC table\n");
81        goto fail;
82    }
83
84    /* Set up the lists and lock we'll use for finalizable
85     * and reference objects.
86     */
87    dvmInitMutex(&gDvm.heapWorkerListLock);
88    gcHeap->finalizableRefs = NULL;
89    gcHeap->pendingFinalizationRefs = NULL;
90    gcHeap->referenceOperations = NULL;
91
92    /* Initialize the HeapWorker locks and other state
93     * that the GC uses.
94     */
95    dvmInitializeHeapWorkerState();
96
97    return true;
98
99fail:
100    gDvm.gcHeap = NULL;
101    dvmHeapSourceShutdown(gcHeap);
102    return false;
103}
104
105bool dvmHeapStartupAfterZygote()
106{
107    /* Update our idea of the last GC start time so that we
108     * don't use the last time that Zygote happened to GC.
109     */
110    gDvm.gcHeap->gcStartTime = dvmGetRelativeTimeUsec();
111
112    return dvmHeapSourceStartupAfterZygote();
113}
114
115void dvmHeapShutdown()
116{
117//TODO: make sure we're locked
118    if (gDvm.gcHeap != NULL) {
119        GcHeap *gcHeap;
120
121        gcHeap = gDvm.gcHeap;
122        gDvm.gcHeap = NULL;
123
124        /* Tables are allocated on the native heap;
125         * they need to be cleaned up explicitly.
126         * The process may stick around, so we don't
127         * want to leak any native memory.
128         */
129        dvmHeapFreeHeapRefTable(&gcHeap->nonCollectableRefs);
130
131        dvmHeapFreeLargeTable(gcHeap->finalizableRefs);
132        gcHeap->finalizableRefs = NULL;
133
134        dvmHeapFreeLargeTable(gcHeap->pendingFinalizationRefs);
135        gcHeap->pendingFinalizationRefs = NULL;
136
137        dvmHeapFreeLargeTable(gcHeap->referenceOperations);
138        gcHeap->referenceOperations = NULL;
139
140        /* Destroy the heap.  Any outstanding pointers
141         * will point to unmapped memory (unless/until
142         * someone else maps it).  This frees gcHeap
143         * as a side-effect.
144         */
145        dvmHeapSourceShutdown(gcHeap);
146    }
147}
148
149/*
150 * We've been asked to allocate something we can't, e.g. an array so
151 * large that (length * elementWidth) is larger than 2^31.  We want to
152 * throw an OutOfMemoryError, but doing so implies that certain other
153 * actions have taken place (like clearing soft references).
154 *
155 * TODO: for now we just throw an InternalError.
156 */
157void dvmThrowBadAllocException(const char* msg)
158{
159    dvmThrowException("Ljava/lang/InternalError;", msg);
160}
161
162/*
163 * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
164 * we're going to have to wait on the mutex.
165 */
166bool dvmLockHeap()
167{
168    if (pthread_mutex_trylock(&gDvm.gcHeapLock) != 0) {
169        Thread *self;
170        ThreadStatus oldStatus;
171        int cc;
172
173        self = dvmThreadSelf();
174        if (self != NULL) {
175            oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
176        } else {
177            oldStatus = -1; // shut up gcc
178        }
179
180        cc = pthread_mutex_lock(&gDvm.gcHeapLock);
181        assert(cc == 0);
182
183        if (self != NULL) {
184            dvmChangeStatus(self, oldStatus);
185        }
186    }
187
188    return true;
189}
190
191void dvmUnlockHeap()
192{
193    dvmUnlockMutex(&gDvm.gcHeapLock);
194}
195
196/* Pop an object from the list of pending finalizations and
197 * reference clears/enqueues, and return the object.
198 * The caller must call dvmReleaseTrackedAlloc()
199 * on the object when finished.
200 *
201 * Typically only called by the heap worker thread.
202 */
203Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op)
204{
205    Object *obj;
206    LargeHeapRefTable *table;
207    GcHeap *gcHeap = gDvm.gcHeap;
208
209    assert(op != NULL);
210
211    obj = NULL;
212
213    dvmLockMutex(&gDvm.heapWorkerListLock);
214
215    /* We must handle reference operations before finalizations.
216     * If:
217     *     a) Someone subclasses WeakReference and overrides clear()
218     *     b) A reference of this type is the last reference to
219     *        a finalizable object
220     * then we need to guarantee that the overridden clear() is called
221     * on the reference before finalize() is called on the referent.
222     * Both of these operations will always be scheduled at the same
223     * time, so handling reference operations first will guarantee
224     * the required order.
225     */
226    obj = dvmHeapGetNextObjectFromLargeTable(&gcHeap->referenceOperations);
227    if (obj != NULL) {
228        uintptr_t workBits;
229
230        workBits = (uintptr_t)obj & (WORKER_CLEAR | WORKER_ENQUEUE);
231        assert(workBits != 0);
232        obj = (Object *)((uintptr_t)obj & ~(WORKER_CLEAR | WORKER_ENQUEUE));
233
234        *op = workBits;
235    } else {
236        obj = dvmHeapGetNextObjectFromLargeTable(
237                &gcHeap->pendingFinalizationRefs);
238        if (obj != NULL) {
239            *op = WORKER_FINALIZE;
240        }
241    }
242
243    if (obj != NULL) {
244        /* Don't let the GC collect the object until the
245         * worker thread is done with it.
246         *
247         * This call is safe;  it uses thread-local storage
248         * and doesn't acquire any locks.
249         */
250        dvmAddTrackedAlloc(obj, NULL);
251    }
252
253    dvmUnlockMutex(&gDvm.heapWorkerListLock);
254
255    return obj;
256}
257
258/* Used for a heap size change hysteresis to avoid collecting
259 * SoftReferences when the heap only grows by a small amount.
260 */
261#define SOFT_REFERENCE_GROWTH_SLACK (128 * 1024)
262
263/* Whenever the effective heap size may have changed,
264 * this function must be called.
265 */
266void dvmHeapSizeChanged()
267{
268    GcHeap *gcHeap = gDvm.gcHeap;
269    size_t currentHeapSize;
270
271    currentHeapSize = dvmHeapSourceGetIdealFootprint();
272
273    /* See if the heap size has changed enough that we should care
274     * about it.
275     */
276    if (currentHeapSize <= gcHeap->softReferenceHeapSizeThreshold -
277            4 * SOFT_REFERENCE_GROWTH_SLACK)
278    {
279        /* The heap has shrunk enough that we'll use this as a new
280         * threshold.  Since we're doing better on space, there's
281         * no need to collect any SoftReferences.
282         *
283         * This is 4x the growth hysteresis because we don't want
284         * to snap down so easily after a shrink.  If we just cleared
285         * up a bunch of SoftReferences, we don't want to disallow
286         * any new ones from being created.
287         * TODO: determine if the 4x is important, needed, or even good
288         */
289        gcHeap->softReferenceHeapSizeThreshold = currentHeapSize;
290        gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
291    } else if (currentHeapSize >= gcHeap->softReferenceHeapSizeThreshold +
292            SOFT_REFERENCE_GROWTH_SLACK)
293    {
294        /* The heap has grown enough to warrant collecting SoftReferences.
295         */
296        gcHeap->softReferenceHeapSizeThreshold = currentHeapSize;
297        gcHeap->softReferenceCollectionState = SR_COLLECT_SOME;
298    }
299}
300
301
302/* Do a full garbage collection, which may grow the
303 * heap as a side-effect if the live set is large.
304 */
305static void gcForMalloc(bool collectSoftReferences)
306{
307#ifdef WITH_PROFILER
308    if (gDvm.allocProf.enabled) {
309        Thread* self = dvmThreadSelf();
310        gDvm.allocProf.gcCount++;
311        if (self != NULL) {
312            self->allocProf.gcCount++;
313        }
314    }
315#endif
316    /* This may adjust the soft limit as a side-effect.
317     */
318    LOGD_HEAP("dvmMalloc initiating GC%s\n",
319            collectSoftReferences ? "(collect SoftReferences)" : "");
320    dvmCollectGarbageInternal(collectSoftReferences);
321}
322
323/* Try as hard as possible to allocate some memory.
324 */
325static DvmHeapChunk *tryMalloc(size_t size)
326{
327    DvmHeapChunk *hc;
328
329    /* Don't try too hard if there's no way the allocation is
330     * going to succeed.  We have to collect SoftReferences before
331     * throwing an OOME, though.
332     */
333    if (size >= gDvm.heapSizeMax) {
334        LOGW_HEAP("dvmMalloc(%zu/0x%08zx): "
335                "someone's allocating a huge buffer\n", size, size);
336        hc = NULL;
337        goto collect_soft_refs;
338    }
339
340//TODO: figure out better heuristics
341//    There will be a lot of churn if someone allocates a bunch of
342//    big objects in a row, and we hit the frag case each time.
343//    A full GC for each.
344//    Maybe we grow the heap in bigger leaps
345//    Maybe we skip the GC if the size is large and we did one recently
346//      (number of allocations ago) (watch for thread effects)
347//    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
348//      (or, at least, there are only 0-5 objects swept each time)
349
350    hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
351    if (hc != NULL) {
352        return hc;
353    }
354
355    /* The allocation failed.  Free up some space by doing
356     * a full garbage collection.  This may grow the heap
357     * if the live set is sufficiently large.
358     */
359    gcForMalloc(false);
360    hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
361    if (hc != NULL) {
362        return hc;
363    }
364
365    /* Even that didn't work;  this is an exceptional state.
366     * Try harder, growing the heap if necessary.
367     */
368    hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
369    dvmHeapSizeChanged();
370    if (hc != NULL) {
371        size_t newHeapSize;
372
373        newHeapSize = dvmHeapSourceGetIdealFootprint();
374//TODO: may want to grow a little bit more so that the amount of free
375//      space is equal to the old free space + the utilization slop for
376//      the new allocation.
377        LOGI_HEAP("Grow heap (frag case) to "
378                "%zu.%03zuMB for %zu-byte allocation\n",
379                FRACTIONAL_MB(newHeapSize), size);
380        return hc;
381    }
382
383    /* Most allocations should have succeeded by now, so the heap
384     * is really full, really fragmented, or the requested size is
385     * really big.  Do another GC, collecting SoftReferences this
386     * time.  The VM spec requires that all SoftReferences have
387     * been collected and cleared before throwing an OOME.
388     */
389//TODO: wait for the finalizers from the previous GC to finish
390collect_soft_refs:
391    LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation\n",
392            size);
393    gcForMalloc(true);
394    hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
395    dvmHeapSizeChanged();
396    if (hc != NULL) {
397        return hc;
398    }
399//TODO: maybe wait for finalizers and try one last time
400
401    LOGE_HEAP("Out of memory on a %zd-byte allocation.\n", size);
402//TODO: tell the HeapSource to dump its state
403    dvmDumpThread(dvmThreadSelf(), false);
404
405    return NULL;
406}
407
408/* Throw an OutOfMemoryError if there's a thread to attach it to.
409 * Avoid recursing.
410 *
411 * The caller must not be holding the heap lock, or else the allocations
412 * in dvmThrowException() will deadlock.
413 */
414static void throwOOME()
415{
416    Thread *self;
417
418    if ((self = dvmThreadSelf()) != NULL) {
419        /* If the current (failing) dvmMalloc() happened as part of thread
420         * creation/attachment before the thread became part of the root set,
421         * we can't rely on the thread-local trackedAlloc table, so
422         * we can't keep track of a real allocated OOME object.  But, since
423         * the thread is in the process of being created, it won't have
424         * a useful stack anyway, so we may as well make things easier
425         * by throwing the (stackless) pre-built OOME.
426         */
427        if (dvmIsOnThreadList(self) && !self->throwingOOME) {
428            /* Let ourselves know that we tried to throw an OOM
429             * error in the normal way in case we run out of
430             * memory trying to allocate it inside dvmThrowException().
431             */
432            self->throwingOOME = true;
433
434            /* Don't include a description string;
435             * one fewer allocation.
436             */
437            dvmThrowException("Ljava/lang/OutOfMemoryError;", NULL);
438        } else {
439            /*
440             * This thread has already tried to throw an OutOfMemoryError,
441             * which probably means that we're running out of memory
442             * while recursively trying to throw.
443             *
444             * To avoid any more allocation attempts, "throw" a pre-built
445             * OutOfMemoryError object (which won't have a useful stack trace).
446             *
447             * Note that since this call can't possibly allocate anything,
448             * we don't care about the state of self->throwingOOME
449             * (which will usually already be set).
450             */
451            dvmSetException(self, gDvm.outOfMemoryObj);
452        }
453        /* We're done with the possible recursion.
454         */
455        self->throwingOOME = false;
456    }
457}
458
459/*
460 * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
461 *
462 * The new storage is zeroed out.
463 *
464 * Note that, in rare cases, this could get called while a GC is in
465 * progress.  If a non-VM thread tries to attach itself through JNI,
466 * it will need to allocate some objects.  If this becomes annoying to
467 * deal with, we can block it at the source, but holding the allocation
468 * mutex should be enough.
469 *
470 * In rare circumstances (JNI AttachCurrentThread) we can be called
471 * from a non-VM thread.
472 *
473 * We implement ALLOC_NO_GC by maintaining an internal list of objects
474 * that should not be collected.  This requires no actual flag storage in
475 * the object itself, which is good, but makes flag queries expensive.
476 *
477 * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
478 * (because it's being done for the interpreter "new" operation and will
479 * be part of the root set immediately) or we can't (because this allocation
480 * is for a brand new thread).
481 *
482 * Returns NULL and throws an exception on failure.
483 *
484 * TODO: don't do a GC if the debugger thinks all threads are suspended
485 */
486void* dvmMalloc(size_t size, int flags)
487{
488    GcHeap *gcHeap = gDvm.gcHeap;
489    DvmHeapChunk *hc;
490    void *ptr;
491    bool triedGc, triedGrowing;
492
493#if 0
494    /* handy for spotting large allocations */
495    if (size >= 100000) {
496        LOGI("dvmMalloc(%d):\n", size);
497        dvmDumpThread(dvmThreadSelf(), false);
498    }
499#endif
500
501#if defined(WITH_ALLOC_LIMITS)
502    /*
503     * See if they've exceeded the allocation limit for this thread.
504     *
505     * A limit value of -1 means "no limit".
506     *
507     * This is enabled at compile time because it requires us to do a
508     * TLS lookup for the Thread pointer.  This has enough of a performance
509     * impact that we don't want to do it if we don't have to.  (Now that
510     * we're using gDvm.checkAllocLimits we may want to reconsider this,
511     * but it's probably still best to just compile the check out of
512     * production code -- one less thing to hit on every allocation.)
513     */
514    if (gDvm.checkAllocLimits) {
515        Thread* self = dvmThreadSelf();
516        if (self != NULL) {
517            int count = self->allocLimit;
518            if (count > 0) {
519                self->allocLimit--;
520            } else if (count == 0) {
521                /* fail! */
522                assert(!gDvm.initializing);
523                self->allocLimit = -1;
524                dvmThrowException("Ldalvik/system/AllocationLimitError;",
525                    "thread allocation limit exceeded");
526                return NULL;
527            }
528        }
529    }
530
531    if (gDvm.allocationLimit >= 0) {
532        assert(!gDvm.initializing);
533        gDvm.allocationLimit = -1;
534        dvmThrowException("Ldalvik/system/AllocationLimitError;",
535            "global allocation limit exceeded");
536        return NULL;
537    }
538#endif
539
540    dvmLockHeap();
541
542    /* Try as hard as possible to allocate some memory.
543     */
544    hc = tryMalloc(size);
545    if (hc != NULL) {
546alloc_succeeded:
547        /* We've got the memory.
548         */
549        if ((flags & ALLOC_FINALIZABLE) != 0) {
550            /* This object is an instance of a class that
551             * overrides finalize().  Add it to the finalizable list.
552             *
553             * Note that until DVM_OBJECT_INIT() is called on this
554             * object, its clazz will be NULL.  Since the object is
555             * in this table, it will be scanned as part of the root
556             * set.  scanObject() explicitly deals with the NULL clazz.
557             */
558            if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs,
559                                    (Object *)hc->data))
560            {
561                LOGE_HEAP("dvmMalloc(): no room for any more "
562                        "finalizable objects\n");
563                dvmAbort();
564            }
565        }
566
567#if WITH_OBJECT_HEADERS
568        hc->header = OBJECT_HEADER;
569        hc->birthGeneration = gGeneration;
570#endif
571        ptr = hc->data;
572
573        /* The caller may not want us to collect this object.
574         * If not, throw it in the nonCollectableRefs table, which
575         * will be added to the root set when we GC.
576         *
577         * Note that until DVM_OBJECT_INIT() is called on this
578         * object, its clazz will be NULL.  Since the object is
579         * in this table, it will be scanned as part of the root
580         * set.  scanObject() explicitly deals with the NULL clazz.
581         */
582        if ((flags & ALLOC_NO_GC) != 0) {
583            if (!dvmHeapAddToHeapRefTable(&gcHeap->nonCollectableRefs, ptr)) {
584                LOGE_HEAP("dvmMalloc(): no room for any more "
585                        "ALLOC_NO_GC objects: %zd\n",
586                        dvmHeapNumHeapRefTableEntries(
587                                &gcHeap->nonCollectableRefs));
588                dvmAbort();
589            }
590        }
591
592#ifdef WITH_PROFILER
593        if (gDvm.allocProf.enabled) {
594            Thread* self = dvmThreadSelf();
595            gDvm.allocProf.allocCount++;
596            gDvm.allocProf.allocSize += size;
597            if (self != NULL) {
598                self->allocProf.allocCount++;
599                self->allocProf.allocSize += size;
600            }
601        }
602#endif
603    } else {
604        /* The allocation failed.
605         */
606        ptr = NULL;
607
608#ifdef WITH_PROFILER
609        if (gDvm.allocProf.enabled) {
610            Thread* self = dvmThreadSelf();
611            gDvm.allocProf.failedAllocCount++;
612            gDvm.allocProf.failedAllocSize += size;
613            if (self != NULL) {
614                self->allocProf.failedAllocCount++;
615                self->allocProf.failedAllocSize += size;
616            }
617        }
618#endif
619    }
620
621    dvmUnlockHeap();
622
623    if (ptr != NULL) {
624        /*
625         * If this block is immediately GCable, and they haven't asked us not
626         * to track it, add it to the internal tracking list.
627         *
628         * If there's no "self" yet, we can't track it.  Calls made before
629         * the Thread exists should use ALLOC_NO_GC.
630         */
631        if ((flags & (ALLOC_DONT_TRACK | ALLOC_NO_GC)) == 0) {
632            dvmAddTrackedAlloc(ptr, NULL);
633        }
634    } else {
635        /*
636         * The allocation failed; throw an OutOfMemoryError.
637         */
638        throwOOME();
639    }
640
641    return ptr;
642}
643
644/*
645 * Returns true iff <obj> points to a valid allocated object.
646 */
647bool dvmIsValidObject(const Object* obj)
648{
649    const DvmHeapChunk *hc;
650
651    /* Don't bother if it's NULL or not 8-byte aligned.
652     */
653    hc = ptr2chunk(obj);
654    if (obj != NULL && ((uintptr_t)hc & (8-1)) == 0) {
655        /* Even if the heap isn't locked, this shouldn't return
656         * any false negatives.  The only mutation that could
657         * be happening is allocation, which means that another
658         * thread could be in the middle of a read-modify-write
659         * to add a new bit for a new object.  However, that
660         * RMW will have completed by the time any other thread
661         * could possibly see the new pointer, so there is no
662         * danger of dvmIsValidObject() being called on a valid
663         * pointer whose bit isn't set.
664         *
665         * Freeing will only happen during the sweep phase, which
666         * only happens while the heap is locked.
667         */
668        return dvmHeapSourceContains(hc);
669    }
670    return false;
671}
672
673/*
674 * Clear flags that were passed into dvmMalloc() et al.
675 * e.g., ALLOC_NO_GC, ALLOC_DONT_TRACK.
676 */
677void dvmClearAllocFlags(Object *obj, int mask)
678{
679    if ((mask & ALLOC_NO_GC) != 0) {
680        dvmLockHeap();
681        if (dvmIsValidObject(obj)) {
682            if (!dvmHeapRemoveFromHeapRefTable(&gDvm.gcHeap->nonCollectableRefs,
683                                               obj))
684            {
685                LOGE_HEAP("dvmMalloc(): failed to remove ALLOC_NO_GC bit from "
686                        "object 0x%08x\n", (uintptr_t)obj);
687                dvmAbort();
688            }
689//TODO: shrink if the table is very empty
690        }
691        dvmUnlockHeap();
692    }
693
694    if ((mask & ALLOC_DONT_TRACK) != 0) {
695        dvmReleaseTrackedAlloc(obj, NULL);
696    }
697}
698
699size_t dvmObjectSizeInHeap(const Object *obj)
700{
701    return dvmHeapSourceChunkSize(ptr2chunk(obj)) - sizeof(DvmHeapChunk);
702}
703
704/*
705 * Initiate garbage collection.
706 *
707 * NOTES:
708 * - If we don't hold gDvm.threadListLock, it's possible for a thread to
709 *   be added to the thread list while we work.  The thread should NOT
710 *   start executing, so this is only interesting when we start chasing
711 *   thread stacks.  (Before we do so, grab the lock.)
712 *
713 * We are not allowed to GC when the debugger has suspended the VM, which
714 * is awkward because debugger requests can cause allocations.  The easiest
715 * way to enforce this is to refuse to GC on an allocation made by the
716 * JDWP thread -- we have to expand the heap or fail.
717 */
718void dvmCollectGarbageInternal(bool collectSoftReferences)
719{
720    GcHeap *gcHeap = gDvm.gcHeap;
721    Object *softReferences;
722    Object *weakReferences;
723    Object *phantomReferences;
724
725    u8 now;
726    s8 timeSinceLastGc;
727    s8 gcElapsedTime;
728    int numFreed;
729    size_t sizeFreed;
730
731#if DVM_TRACK_HEAP_MARKING
732    /* Since weak and soft references are always cleared,
733     * they don't require any marking.
734     * (Soft are lumped into strong when they aren't cleared.)
735     */
736    size_t strongMarkCount = 0;
737    size_t strongMarkSize = 0;
738    size_t finalizeMarkCount = 0;
739    size_t finalizeMarkSize = 0;
740    size_t phantomMarkCount = 0;
741    size_t phantomMarkSize = 0;
742#endif
743
744    /* The heap lock must be held.
745     */
746
747    if (gcHeap->gcRunning) {
748        LOGW_HEAP("Attempted recursive GC\n");
749        return;
750    }
751    gcHeap->gcRunning = true;
752    now = dvmGetRelativeTimeUsec();
753    if (gcHeap->gcStartTime != 0) {
754        timeSinceLastGc = (now - gcHeap->gcStartTime) / 1000;
755    } else {
756        timeSinceLastGc = 0;
757    }
758    gcHeap->gcStartTime = now;
759
760    LOGV_HEAP("GC starting -- suspending threads\n");
761
762    dvmSuspendAllThreads(SUSPEND_FOR_GC);
763
764    /* Get the priority (the "nice" value) of the current thread.  The
765     * getpriority() call can legitimately return -1, so we have to
766     * explicitly test errno.
767     */
768    errno = 0;
769    int oldThreadPriority = kInvalidPriority;
770    int priorityResult = getpriority(PRIO_PROCESS, 0);
771    if (errno != 0) {
772        LOGI_HEAP("getpriority(self) failed: %s\n", strerror(errno));
773    } else if (priorityResult > ANDROID_PRIORITY_NORMAL) {
774        /* Current value is numerically greater than "normal", which
775         * in backward UNIX terms means lower priority.
776         */
777
778        if (priorityResult >= ANDROID_PRIORITY_BACKGROUND) {
779            dvmChangeThreadSchedulerGroup(NULL);
780        }
781
782        if (setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL) != 0) {
783            LOGI_HEAP("Unable to elevate priority from %d to %d\n",
784                priorityResult, ANDROID_PRIORITY_NORMAL);
785        } else {
786            /* priority elevated; save value so we can restore it later */
787            LOGD_HEAP("Elevating priority from %d to %d\n",
788                priorityResult, ANDROID_PRIORITY_NORMAL);
789            oldThreadPriority = priorityResult;
790        }
791    }
792
793    /* Wait for the HeapWorker thread to block.
794     * (It may also already be suspended in interp code,
795     * in which case it's not holding heapWorkerLock.)
796     */
797    dvmLockMutex(&gDvm.heapWorkerLock);
798
799    /* Make sure that the HeapWorker thread hasn't become
800     * wedged inside interp code.  If it has, this call will
801     * print a message and abort the VM.
802     */
803    dvmAssertHeapWorkerThreadRunning();
804
805    /* Lock the pendingFinalizationRefs list.
806     *
807     * Acquire the lock after suspending so the finalizer
808     * thread can't block in the RUNNING state while
809     * we try to suspend.
810     */
811    dvmLockMutex(&gDvm.heapWorkerListLock);
812
813#ifdef WITH_PROFILER
814    dvmMethodTraceGCBegin();
815#endif
816
817#if WITH_HPROF
818
819/* Set DUMP_HEAP_ON_DDMS_UPDATE to 1 to enable heap dumps
820 * whenever DDMS requests a heap update (HPIF chunk).
821 * The output files will appear in /data/misc, which must
822 * already exist.
823 * You must define "WITH_HPROF := true" in your buildspec.mk
824 * and recompile libdvm for this to work.
825 *
826 * To enable stack traces for each allocation, define
827 * "WITH_HPROF_STACK := true" in buildspec.mk.  This option slows down
828 * allocations and also requires 8 additional bytes per object on the
829 * GC heap.
830 */
831#define DUMP_HEAP_ON_DDMS_UPDATE 0
832#if DUMP_HEAP_ON_DDMS_UPDATE
833    gcHeap->hprofDumpOnGc |= (gcHeap->ddmHpifWhen != 0);
834#endif
835
836    if (gcHeap->hprofDumpOnGc) {
837        char nameBuf[128];
838
839        gcHeap->hprofResult = -1;
840
841        if (gcHeap->hprofFileName == NULL) {
842            /* no filename was provided; invent one */
843            sprintf(nameBuf, "/data/misc/heap-dump-tm%d-pid%d.hprof",
844                (int) time(NULL), (int) getpid());
845            gcHeap->hprofFileName = nameBuf;
846        }
847        gcHeap->hprofContext = hprofStartup(gcHeap->hprofFileName);
848        if (gcHeap->hprofContext != NULL) {
849            hprofStartHeapDump(gcHeap->hprofContext);
850        }
851        gcHeap->hprofDumpOnGc = false;
852        gcHeap->hprofFileName = NULL;
853    }
854#endif
855
856    if (timeSinceLastGc < 10000) {
857        LOGD_HEAP("GC! (%dms since last GC)\n",
858                (int)timeSinceLastGc);
859    } else {
860        LOGD_HEAP("GC! (%d sec since last GC)\n",
861                (int)(timeSinceLastGc / 1000));
862    }
863#if DVM_TRACK_HEAP_MARKING
864    gcHeap->markCount = 0;
865    gcHeap->markSize = 0;
866#endif
867
868    /* Set up the marking context.
869     */
870    dvmHeapBeginMarkStep();
871
872    /* Mark the set of objects that are strongly reachable from the roots.
873     */
874    LOGD_HEAP("Marking...");
875    dvmHeapMarkRootSet();
876
877    /* dvmHeapScanMarkedObjects() will build the lists of known
878     * instances of the Reference classes.
879     */
880    gcHeap->softReferences = NULL;
881    gcHeap->weakReferences = NULL;
882    gcHeap->phantomReferences = NULL;
883
884    /* Make sure that we don't hard-mark the referents of Reference
885     * objects by default.
886     */
887    gcHeap->markAllReferents = false;
888
889    /* Don't mark SoftReferences if our caller wants us to collect them.
890     * This has to be set before calling dvmHeapScanMarkedObjects().
891     */
892    if (collectSoftReferences) {
893        gcHeap->softReferenceCollectionState = SR_COLLECT_ALL;
894    }
895
896    /* Recursively mark any objects that marked objects point to strongly.
897     * If we're not collecting soft references, soft-reachable
898     * objects will also be marked.
899     */
900    LOGD_HEAP("Recursing...");
901    dvmHeapScanMarkedObjects();
902#if DVM_TRACK_HEAP_MARKING
903    strongMarkCount = gcHeap->markCount;
904    strongMarkSize = gcHeap->markSize;
905    gcHeap->markCount = 0;
906    gcHeap->markSize = 0;
907#endif
908
909    /* Latch these so that the other calls to dvmHeapScanMarkedObjects() don't
910     * mess with them.
911     */
912    softReferences = gcHeap->softReferences;
913    weakReferences = gcHeap->weakReferences;
914    phantomReferences = gcHeap->phantomReferences;
915
916    /* All strongly-reachable objects have now been marked.
917     */
918    if (gcHeap->softReferenceCollectionState != SR_COLLECT_NONE) {
919        LOGD_HEAP("Handling soft references...");
920        dvmHeapHandleReferences(softReferences, REF_SOFT);
921        // markCount always zero
922
923        /* Now that we've tried collecting SoftReferences,
924         * fall back to not collecting them.  If the heap
925         * grows, we will start collecting again.
926         */
927        gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
928    } // else dvmHeapScanMarkedObjects() already marked the soft-reachable set
929    LOGD_HEAP("Handling weak references...");
930    dvmHeapHandleReferences(weakReferences, REF_WEAK);
931    // markCount always zero
932
933    /* Once all weak-reachable objects have been taken
934     * care of, any remaining unmarked objects can be finalized.
935     */
936    LOGD_HEAP("Finding finalizations...");
937    dvmHeapScheduleFinalizations();
938#if DVM_TRACK_HEAP_MARKING
939    finalizeMarkCount = gcHeap->markCount;
940    finalizeMarkSize = gcHeap->markSize;
941    gcHeap->markCount = 0;
942    gcHeap->markSize = 0;
943#endif
944
945    /* Any remaining objects that are not pending finalization
946     * could be phantom-reachable.  This will mark any phantom-reachable
947     * objects, as well as enqueue their references.
948     */
949    LOGD_HEAP("Handling phantom references...");
950    dvmHeapHandleReferences(phantomReferences, REF_PHANTOM);
951#if DVM_TRACK_HEAP_MARKING
952    phantomMarkCount = gcHeap->markCount;
953    phantomMarkSize = gcHeap->markSize;
954    gcHeap->markCount = 0;
955    gcHeap->markSize = 0;
956#endif
957
958//TODO: take care of JNI weak global references
959
960#if DVM_TRACK_HEAP_MARKING
961    LOGI_HEAP("Marked objects: %dB strong, %dB final, %dB phantom\n",
962            strongMarkSize, finalizeMarkSize, phantomMarkSize);
963#endif
964
965#ifdef WITH_DEADLOCK_PREDICTION
966    dvmDumpMonitorInfo("before sweep");
967#endif
968    LOGD_HEAP("Sweeping...");
969    dvmHeapSweepUnmarkedObjects(&numFreed, &sizeFreed);
970#ifdef WITH_DEADLOCK_PREDICTION
971    dvmDumpMonitorInfo("after sweep");
972#endif
973
974    LOGD_HEAP("Cleaning up...");
975    dvmHeapFinishMarkStep();
976
977    LOGD_HEAP("Done.");
978
979    /* Now's a good time to adjust the heap size, since
980     * we know what our utilization is.
981     *
982     * This doesn't actually resize any memory;
983     * it just lets the heap grow more when necessary.
984     */
985    dvmHeapSourceGrowForUtilization();
986    dvmHeapSizeChanged();
987
988#if WITH_HPROF
989    if (gcHeap->hprofContext != NULL) {
990        hprofFinishHeapDump(gcHeap->hprofContext);
991//TODO: write a HEAP_SUMMARY record
992        if (hprofShutdown(gcHeap->hprofContext))
993            gcHeap->hprofResult = 0;    /* indicate success */
994        gcHeap->hprofContext = NULL;
995    }
996#endif
997
998    /* Now that we've freed up the GC heap, return any large
999     * free chunks back to the system.  They'll get paged back
1000     * in the next time they're used.  Don't do it immediately,
1001     * though;  if the process is still allocating a bunch of
1002     * memory, we'll be taking a ton of page faults that we don't
1003     * necessarily need to.
1004     *
1005     * Cancel any old scheduled trims, and schedule a new one.
1006     */
1007    dvmScheduleHeapSourceTrim(5);  // in seconds
1008
1009#ifdef WITH_PROFILER
1010    dvmMethodTraceGCEnd();
1011#endif
1012    LOGV_HEAP("GC finished -- resuming threads\n");
1013
1014    gcHeap->gcRunning = false;
1015
1016    dvmUnlockMutex(&gDvm.heapWorkerListLock);
1017    dvmUnlockMutex(&gDvm.heapWorkerLock);
1018
1019    dvmResumeAllThreads(SUSPEND_FOR_GC);
1020    if (oldThreadPriority != kInvalidPriority) {
1021        if (setpriority(PRIO_PROCESS, 0, oldThreadPriority) != 0) {
1022            LOGW_HEAP("Unable to reset priority to %d: %s\n",
1023                oldThreadPriority, strerror(errno));
1024        } else {
1025            LOGD_HEAP("Reset priority to %d\n", oldThreadPriority);
1026        }
1027
1028        if (oldThreadPriority >= ANDROID_PRIORITY_BACKGROUND) {
1029            dvmChangeThreadSchedulerGroup("bg_non_interactive");
1030        }
1031    }
1032    gcElapsedTime = (dvmGetRelativeTimeUsec() - gcHeap->gcStartTime) / 1000;
1033    if (gcElapsedTime < 10000) {
1034        LOGD("GC freed %d objects / %zd bytes in %dms\n",
1035                numFreed, sizeFreed, (int)gcElapsedTime);
1036    } else {
1037        LOGD("GC freed %d objects / %zd bytes in %d sec\n",
1038                numFreed, sizeFreed, (int)(gcElapsedTime / 1000));
1039    }
1040    dvmLogGcStats(numFreed, sizeFreed, gcElapsedTime);
1041
1042    if (gcHeap->ddmHpifWhen != 0) {
1043        LOGD_HEAP("Sending VM heap info to DDM\n");
1044        dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
1045    }
1046    if (gcHeap->ddmHpsgWhen != 0) {
1047        LOGD_HEAP("Dumping VM heap to DDM\n");
1048        dvmDdmSendHeapSegments(false, false);
1049    }
1050    if (gcHeap->ddmNhsgWhen != 0) {
1051        LOGD_HEAP("Dumping native heap to DDM\n");
1052        dvmDdmSendHeapSegments(false, true);
1053    }
1054}
1055
1056#if WITH_HPROF
1057/*
1058 * Perform garbage collection, writing heap information to the specified file.
1059 *
1060 * If "fileName" is NULL, a suitable name will be generated automatically.
1061 *
1062 * Returns 0 on success, or an error code on failure.
1063 */
1064int hprofDumpHeap(const char* fileName)
1065{
1066    int result;
1067
1068    dvmLockMutex(&gDvm.gcHeapLock);
1069
1070    gDvm.gcHeap->hprofDumpOnGc = true;
1071    gDvm.gcHeap->hprofFileName = fileName;
1072    dvmCollectGarbageInternal(false);
1073    result = gDvm.gcHeap->hprofResult;
1074
1075    dvmUnlockMutex(&gDvm.gcHeapLock);
1076
1077    return result;
1078}
1079
1080void dvmHeapSetHprofGcScanState(hprof_heap_tag_t state, u4 threadSerialNumber)
1081{
1082    if (gDvm.gcHeap->hprofContext != NULL) {
1083        hprofSetGcScanState(gDvm.gcHeap->hprofContext, state,
1084                threadSerialNumber);
1085    }
1086}
1087#endif
1088