HeapSource.cpp revision 60fc806b679a3655c228b4093058c59941a49cfe
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <cutils/mspace.h>
18#include <stdint.h>
19#include <sys/mman.h>
20#include <errno.h>
21
22#define SIZE_MAX UINT_MAX  // TODO: get SIZE_MAX from stdint.h
23
24#include "Dalvik.h"
25#include "alloc/Heap.h"
26#include "alloc/HeapInternal.h"
27#include "alloc/HeapSource.h"
28#include "alloc/HeapBitmap.h"
29#include "alloc/HeapBitmapInlines.h"
30
31// TODO: find a real header file for these.
32extern "C" int dlmalloc_trim(size_t);
33extern "C" void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
34
35static void snapIdealFootprint();
36static void setIdealFootprint(size_t max);
37static size_t getMaximumSize(const HeapSource *hs);
38
39#define HEAP_UTILIZATION_MAX        1024
40#define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
41#define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
42#define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
43
44/* Start a concurrent collection when free memory falls under this
45 * many bytes.
46 */
47#define CONCURRENT_START (128 << 10)
48
49/* The next GC will not be concurrent when free memory after a GC is
50 * under this many bytes.
51 */
52#define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
53
54#define HS_BOILERPLATE() \
55    do { \
56        assert(gDvm.gcHeap != NULL); \
57        assert(gDvm.gcHeap->heapSource != NULL); \
58        assert(gHs == gDvm.gcHeap->heapSource); \
59    } while (0)
60
61struct Heap {
62    /* The mspace to allocate from.
63     */
64    mspace msp;
65
66    /* The largest size that this heap is allowed to grow to.
67     */
68    size_t maximumSize;
69
70    /* Number of bytes allocated from this mspace for objects,
71     * including any overhead.  This value is NOT exact, and
72     * should only be used as an input for certain heuristics.
73     */
74    size_t bytesAllocated;
75
76    /* Number of bytes allocated from this mspace at which a
77     * concurrent garbage collection will be started.
78     */
79    size_t concurrentStartBytes;
80
81    /* Number of objects currently allocated from this mspace.
82     */
83    size_t objectsAllocated;
84
85    /*
86     * The lowest address of this heap, inclusive.
87     */
88    char *base;
89
90    /*
91     * The highest address of this heap, exclusive.
92     */
93    char *limit;
94};
95
96struct HeapSource {
97    /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
98     */
99    size_t targetUtilization;
100
101    /* The starting heap size.
102     */
103    size_t startSize;
104
105    /* The largest that the heap source as a whole is allowed to grow.
106     */
107    size_t maximumSize;
108
109    /*
110     * The largest size we permit the heap to grow.  This value allows
111     * the user to limit the heap growth below the maximum size.  This
112     * is a work around until we can dynamically set the maximum size.
113     * This value can range between the starting size and the maximum
114     * size but should never be set below the current footprint of the
115     * heap.
116     */
117    size_t growthLimit;
118
119    /* The desired max size of the heap source as a whole.
120     */
121    size_t idealSize;
122
123    /* The maximum number of bytes allowed to be allocated from the
124     * active heap before a GC is forced.  This is used to "shrink" the
125     * heap in lieu of actual compaction.
126     */
127    size_t softLimit;
128
129    /* The heaps; heaps[0] is always the active heap,
130     * which new objects should be allocated from.
131     */
132    Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
133
134    /* The current number of heaps.
135     */
136    size_t numHeaps;
137
138    /* True if zygote mode was active when the HeapSource was created.
139     */
140    bool sawZygote;
141
142    /*
143     * The base address of the virtual memory reservation.
144     */
145    char *heapBase;
146
147    /*
148     * The length in bytes of the virtual memory reservation.
149     */
150    size_t heapLength;
151
152    /*
153     * The live object bitmap.
154     */
155    HeapBitmap liveBits;
156
157    /*
158     * The mark bitmap.
159     */
160    HeapBitmap markBits;
161
162    /*
163     * State for the GC daemon.
164     */
165    bool hasGcThread;
166    pthread_t gcThread;
167    bool gcThreadShutdown;
168    pthread_mutex_t gcThreadMutex;
169    pthread_cond_t gcThreadCond;
170};
171
172#define hs2heap(hs_) (&((hs_)->heaps[0]))
173
174/*
175 * Returns true iff a soft limit is in effect for the active heap.
176 */
177static bool isSoftLimited(const HeapSource *hs)
178{
179    /* softLimit will be either SIZE_MAX or the limit for the
180     * active mspace.  idealSize can be greater than softLimit
181     * if there is more than one heap.  If there is only one
182     * heap, a non-SIZE_MAX softLimit should always be the same
183     * as idealSize.
184     */
185    return hs->softLimit <= hs->idealSize;
186}
187
188/*
189 * Returns approximately the maximum number of bytes allowed to be
190 * allocated from the active heap before a GC is forced.
191 */
192static size_t getAllocLimit(const HeapSource *hs)
193{
194    if (isSoftLimited(hs)) {
195        return hs->softLimit;
196    } else {
197        return mspace_max_allowed_footprint(hs2heap(hs)->msp);
198    }
199}
200
201/*
202 * Returns the current footprint of all heaps.  If includeActive
203 * is false, don't count the heap at index 0.
204 */
205static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive)
206{
207    size_t footprint = 0;
208    size_t i;
209
210    if (includeActive) {
211        i = 0;
212    } else {
213        i = 1;
214    }
215    for (/* i = i */; i < hs->numHeaps; i++) {
216//TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
217        footprint += mspace_footprint(hs->heaps[i].msp);
218    }
219    return footprint;
220}
221
222/*
223 * Returns the heap that <ptr> could have come from, or NULL
224 * if it could not have come from any heap.
225 */
226static Heap *ptr2heap(const HeapSource *hs, const void *ptr)
227{
228    const size_t numHeaps = hs->numHeaps;
229
230    if (ptr != NULL) {
231        for (size_t i = 0; i < numHeaps; i++) {
232            const Heap *const heap = &hs->heaps[i];
233
234            if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
235                return (Heap *)heap;
236            }
237        }
238    }
239    return NULL;
240}
241
242/*
243 * Functions to update heapSource->bytesAllocated when an object
244 * is allocated or freed.  mspace_usable_size() will give
245 * us a much more accurate picture of heap utilization than
246 * the requested byte sizes would.
247 *
248 * These aren't exact, and should not be treated as such.
249 */
250static void countAllocation(Heap *heap, const void *ptr)
251{
252    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
253
254    heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
255            HEAP_SOURCE_CHUNK_OVERHEAD;
256    heap->objectsAllocated++;
257    HeapSource* hs = gDvm.gcHeap->heapSource;
258    dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
259
260    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
261}
262
263static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
264{
265    size_t delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
266    assert(delta > 0);
267    if (delta < heap->bytesAllocated) {
268        heap->bytesAllocated -= delta;
269    } else {
270        heap->bytesAllocated = 0;
271    }
272    HeapSource* hs = gDvm.gcHeap->heapSource;
273    dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
274    if (heap->objectsAllocated > 0) {
275        heap->objectsAllocated--;
276    }
277    *numBytes += delta;
278}
279
280static HeapSource *gHs = NULL;
281
282static mspace createMspace(void *base, size_t startSize, size_t maximumSize)
283{
284    /* Create an unlocked dlmalloc mspace to use as
285     * a heap source.
286     *
287     * We start off reserving startSize / 2 bytes but
288     * letting the heap grow to startSize.  This saves
289     * memory in the case where a process uses even less
290     * than the starting size.
291     */
292    LOGV_HEAP("Creating VM heap of size %zu", startSize);
293    errno = 0;
294
295    mspace msp = create_contiguous_mspace_with_base(startSize/2,
296            maximumSize, /*locked=*/false, base);
297    if (msp != NULL) {
298        /* Don't let the heap grow past the starting size without
299         * our intervention.
300         */
301        mspace_set_max_allowed_footprint(msp, startSize);
302    } else {
303        /* There's no guarantee that errno has meaning when the call
304         * fails, but it often does.
305         */
306        LOGE_HEAP("Can't create VM heap of size (%zu,%zu): %s",
307            startSize/2, maximumSize, strerror(errno));
308    }
309
310    return msp;
311}
312
313/*
314 * Add the initial heap.  Returns false if the initial heap was
315 * already added to the heap source.
316 */
317static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
318{
319    assert(hs != NULL);
320    assert(msp != NULL);
321    if (hs->numHeaps != 0) {
322        return false;
323    }
324    hs->heaps[0].msp = msp;
325    hs->heaps[0].maximumSize = maximumSize;
326    hs->heaps[0].concurrentStartBytes = SIZE_MAX;
327    hs->heaps[0].base = hs->heapBase;
328    hs->heaps[0].limit = hs->heapBase + hs->heaps[0].maximumSize;
329    hs->numHeaps = 1;
330    return true;
331}
332
333/*
334 * Adds an additional heap to the heap source.  Returns false if there
335 * are too many heaps or insufficient free space to add another heap.
336 */
337static bool addNewHeap(HeapSource *hs)
338{
339    Heap heap;
340
341    assert(hs != NULL);
342    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
343        LOGE("Attempt to create too many heaps (%zd >= %zd)",
344                hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
345        dvmAbort();
346        return false;
347    }
348
349    memset(&heap, 0, sizeof(heap));
350
351    /*
352     * Heap storage comes from a common virtual memory reservation.
353     * The new heap will start on the page after the old heap.
354     */
355    void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
356    char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
357    size_t overhead = base - hs->heaps[0].base;
358    assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
359
360    if (overhead + HEAP_MIN_FREE >= hs->maximumSize) {
361        LOGE_HEAP("No room to create any more heaps "
362                  "(%zd overhead, %zd max)",
363                  overhead, hs->maximumSize);
364        return false;
365    }
366
367    heap.maximumSize = hs->growthLimit - overhead;
368    heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
369    heap.base = base;
370    heap.limit = heap.base + heap.maximumSize;
371    heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead);
372    if (heap.msp == NULL) {
373        return false;
374    }
375
376    /* Don't let the soon-to-be-old heap grow any further.
377     */
378    hs->heaps[0].maximumSize = overhead;
379    hs->heaps[0].limit = base;
380    mspace msp = hs->heaps[0].msp;
381    mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
382
383    /* Put the new heap in the list, at heaps[0].
384     * Shift existing heaps down.
385     */
386    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
387    hs->heaps[0] = heap;
388    hs->numHeaps++;
389
390    return true;
391}
392
393/*
394 * The garbage collection daemon.  Initiates a concurrent collection
395 * when signaled.
396 */
397static void *gcDaemonThread(void* arg)
398{
399    dvmChangeStatus(NULL, THREAD_VMWAIT);
400    dvmLockMutex(&gHs->gcThreadMutex);
401    while (gHs->gcThreadShutdown != true) {
402        dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
403        dvmLockHeap();
404        dvmChangeStatus(NULL, THREAD_RUNNING);
405        dvmCollectGarbageInternal(GC_CONCURRENT);
406        dvmChangeStatus(NULL, THREAD_VMWAIT);
407        dvmUnlockHeap();
408    }
409    dvmChangeStatus(NULL, THREAD_RUNNING);
410    return NULL;
411}
412
413static bool gcDaemonStartup()
414{
415    dvmInitMutex(&gHs->gcThreadMutex);
416    pthread_cond_init(&gHs->gcThreadCond, NULL);
417    gHs->gcThreadShutdown = false;
418    gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
419                                               gcDaemonThread, NULL);
420    return gHs->hasGcThread;
421}
422
423static void gcDaemonShutdown()
424{
425    if (gHs->hasGcThread) {
426        dvmLockMutex(&gHs->gcThreadMutex);
427        gHs->gcThreadShutdown = true;
428        dvmSignalCond(&gHs->gcThreadCond);
429        dvmUnlockMutex(&gHs->gcThreadMutex);
430        pthread_join(gHs->gcThread, NULL);
431    }
432}
433
434/*
435 * Create a stack big enough for the worst possible case, where the
436 * heap is perfectly full of the smallest object.
437 * TODO: be better about memory usage; use a smaller stack with
438 *       overflow detection and recovery.
439 */
440static bool allocMarkStack(GcMarkStack *stack, size_t maximumSize)
441{
442    const char *name = "dalvik-mark-stack";
443    void *addr;
444
445    assert(stack != NULL);
446    stack->length = maximumSize * sizeof(Object*) /
447        (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
448    addr = dvmAllocRegion(stack->length, PROT_READ | PROT_WRITE, name);
449    if (addr == NULL) {
450        return false;
451    }
452    stack->base = (const Object **)addr;
453    stack->limit = (const Object **)((char *)addr + stack->length);
454    stack->top = NULL;
455    madvise(stack->base, stack->length, MADV_DONTNEED);
456    return true;
457}
458
459static void freeMarkStack(GcMarkStack *stack)
460{
461    assert(stack != NULL);
462    munmap(stack->base, stack->length);
463    memset(stack, 0, sizeof(*stack));
464}
465
466/*
467 * Initializes the heap source; must be called before any other
468 * dvmHeapSource*() functions.  Returns a GcHeap structure
469 * allocated from the heap source.
470 */
471GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
472                             size_t growthLimit)
473{
474    GcHeap *gcHeap;
475    HeapSource *hs;
476    mspace msp;
477    size_t length;
478    void *base;
479
480    assert(gHs == NULL);
481
482    if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
483        LOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
484             startSize, maximumSize, growthLimit);
485        return NULL;
486    }
487
488    /*
489     * Allocate a contiguous region of virtual memory to subdivided
490     * among the heaps managed by the garbage collector.
491     */
492    length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
493    base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
494    if (base == NULL) {
495        return NULL;
496    }
497
498    /* Create an unlocked dlmalloc mspace to use as
499     * a heap source.
500     */
501    msp = createMspace(base, startSize, maximumSize);
502    if (msp == NULL) {
503        goto fail;
504    }
505
506    gcHeap = (GcHeap *)malloc(sizeof(*gcHeap));
507    if (gcHeap == NULL) {
508        LOGE_HEAP("Can't allocate heap descriptor");
509        goto fail;
510    }
511    memset(gcHeap, 0, sizeof(*gcHeap));
512
513    hs = (HeapSource *)malloc(sizeof(*hs));
514    if (hs == NULL) {
515        LOGE_HEAP("Can't allocate heap source");
516        free(gcHeap);
517        goto fail;
518    }
519    memset(hs, 0, sizeof(*hs));
520
521    hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
522    hs->startSize = startSize;
523    hs->maximumSize = maximumSize;
524    hs->growthLimit = growthLimit;
525    hs->idealSize = startSize;
526    hs->softLimit = SIZE_MAX;    // no soft limit at first
527    hs->numHeaps = 0;
528    hs->sawZygote = gDvm.zygote;
529    hs->hasGcThread = false;
530    hs->heapBase = (char *)base;
531    hs->heapLength = length;
532    if (!addInitialHeap(hs, msp, growthLimit)) {
533        LOGE_HEAP("Can't add initial heap");
534        goto fail;
535    }
536    if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
537        LOGE_HEAP("Can't create liveBits");
538        goto fail;
539    }
540    if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
541        LOGE_HEAP("Can't create markBits");
542        dvmHeapBitmapDelete(&hs->liveBits);
543        goto fail;
544    }
545    if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
546        LOGE("Can't create markStack");
547        dvmHeapBitmapDelete(&hs->markBits);
548        dvmHeapBitmapDelete(&hs->liveBits);
549        goto fail;
550    }
551    gcHeap->markContext.bitmap = &hs->markBits;
552    gcHeap->heapSource = hs;
553
554    gHs = hs;
555    return gcHeap;
556
557fail:
558    munmap(base, length);
559    return NULL;
560}
561
562bool dvmHeapSourceStartupAfterZygote()
563{
564    return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
565}
566
567/*
568 * This is called while in zygote mode, right before we fork() for the
569 * first time.  We create a heap for all future zygote process allocations,
570 * in an attempt to avoid touching pages in the zygote heap.  (This would
571 * probably be unnecessary if we had a compacting GC -- the source of our
572 * troubles is small allocations filling in the gaps from larger ones.)
573 */
574bool dvmHeapSourceStartupBeforeFork()
575{
576    HS_BOILERPLATE();
577
578    assert(gDvm.zygote);
579
580    if (!gDvm.newZygoteHeapAllocated) {
581        /* Create a new heap for post-fork zygote allocations.  We only
582         * try once, even if it fails.
583         */
584        LOGV("Splitting out new zygote heap");
585        gDvm.newZygoteHeapAllocated = true;
586        dvmClearCardTable();
587        return addNewHeap(gHs);
588    }
589    return true;
590}
591
592void dvmHeapSourceThreadShutdown()
593{
594    if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
595        gcDaemonShutdown();
596    }
597}
598
599/*
600 * Tears down the entire GcHeap structure and all of the substructures
601 * attached to it.  This call has the side effect of setting the given
602 * gcHeap pointer and gHs to NULL.
603 */
604void dvmHeapSourceShutdown(GcHeap **gcHeap)
605{
606    assert(gcHeap != NULL);
607    if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
608        HeapSource *hs = (*gcHeap)->heapSource;
609        dvmHeapBitmapDelete(&hs->liveBits);
610        dvmHeapBitmapDelete(&hs->markBits);
611        freeMarkStack(&(*gcHeap)->markContext.stack);
612        munmap(hs->heapBase, hs->heapLength);
613        free(hs);
614        gHs = NULL;
615        free(*gcHeap);
616        *gcHeap = NULL;
617    }
618}
619
620/*
621 * Gets the begining of the allocation for the HeapSource.
622 */
623void *dvmHeapSourceGetBase()
624{
625    return gHs->heapBase;
626}
627
628/*
629 * Returns the requested value. If the per-heap stats are requested, fill
630 * them as well.
631 *
632 * Caller must hold the heap lock.
633 */
634size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[],
635                             size_t arrayLen)
636{
637    HeapSource *hs = gHs;
638    size_t value = 0;
639    size_t total = 0;
640
641    HS_BOILERPLATE();
642
643    assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
644    for (size_t i = 0; i < hs->numHeaps; i++) {
645        Heap *const heap = &hs->heaps[i];
646
647        switch (spec) {
648        case HS_FOOTPRINT:
649            value = mspace_footprint(heap->msp);
650            break;
651        case HS_ALLOWED_FOOTPRINT:
652            value = mspace_max_allowed_footprint(heap->msp);
653            break;
654        case HS_BYTES_ALLOCATED:
655            value = heap->bytesAllocated;
656            break;
657        case HS_OBJECTS_ALLOCATED:
658            value = heap->objectsAllocated;
659            break;
660        default:
661            // quiet gcc
662            break;
663        }
664        if (perHeapStats) {
665            perHeapStats[i] = value;
666        }
667        total += value;
668    }
669    return total;
670}
671
672void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, uintptr_t *limit,
673                             size_t numHeaps)
674{
675    HeapSource *hs = gHs;
676
677    HS_BOILERPLATE();
678
679    assert(numHeaps <= hs->numHeaps);
680    for (size_t i = 0; i < numHeaps; ++i) {
681        base[i] = (uintptr_t)hs->heaps[i].base;
682        if (max != NULL) {
683            max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
684        }
685        if (limit != NULL) {
686            limit[i] = (uintptr_t)hs->heaps[i].limit;
687        }
688    }
689}
690
691/*
692 * Get the bitmap representing all live objects.
693 */
694HeapBitmap *dvmHeapSourceGetLiveBits()
695{
696    HS_BOILERPLATE();
697
698    return &gHs->liveBits;
699}
700
701/*
702 * Get the bitmap representing all marked objects.
703 */
704HeapBitmap *dvmHeapSourceGetMarkBits()
705{
706    HS_BOILERPLATE();
707
708    return &gHs->markBits;
709}
710
711void dvmHeapSourceSwapBitmaps()
712{
713    HeapBitmap tmp = gHs->liveBits;
714    gHs->liveBits = gHs->markBits;
715    gHs->markBits = tmp;
716}
717
718void dvmHeapSourceZeroMarkBitmap()
719{
720    HS_BOILERPLATE();
721
722    dvmHeapBitmapZero(&gHs->markBits);
723}
724
725void dvmMarkImmuneObjects(const char *immuneLimit)
726{
727    /*
728     * Copy the contents of the live bit vector for immune object
729     * range into the mark bit vector.
730     */
731    /* The only values generated by dvmHeapSourceGetImmuneLimit() */
732    assert(immuneLimit == gHs->heaps[0].base ||
733           immuneLimit == NULL);
734    assert(gHs->liveBits.base == gHs->markBits.base);
735    assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
736    /* heap[0] is never immune */
737    assert(gHs->heaps[0].base >= immuneLimit);
738    assert(gHs->heaps[0].limit > immuneLimit);
739
740    for (size_t i = 1; i < gHs->numHeaps; ++i) {
741        if (gHs->heaps[i].base < immuneLimit) {
742            assert(gHs->heaps[i].limit <= immuneLimit);
743            /* Compute the number of words to copy in the bitmap. */
744            size_t index = HB_OFFSET_TO_INDEX(
745                (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
746            /* Compute the starting offset in the live and mark bits. */
747            char *src = (char *)(gHs->liveBits.bits + index);
748            char *dst = (char *)(gHs->markBits.bits + index);
749            /* Compute the number of bytes of the live bitmap to copy. */
750            size_t length = HB_OFFSET_TO_BYTE_INDEX(
751                gHs->heaps[i].limit - gHs->heaps[i].base);
752            /* Do the copy. */
753            memcpy(dst, src, length);
754            /* Make sure max points to the address of the highest set bit. */
755            if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
756                gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
757            }
758        }
759    }
760}
761
762/*
763 * Allocates <n> bytes of zeroed data.
764 */
765void* dvmHeapSourceAlloc(size_t n)
766{
767    HS_BOILERPLATE();
768
769    HeapSource *hs = gHs;
770    Heap* heap = hs2heap(hs);
771    if (heap->bytesAllocated + n > hs->softLimit) {
772        /*
773         * This allocation would push us over the soft limit; act as
774         * if the heap is full.
775         */
776        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
777                  FRACTIONAL_MB(hs->softLimit), n);
778        return NULL;
779    }
780    void* ptr = mspace_calloc(heap->msp, 1, n);
781    if (ptr == NULL) {
782        return NULL;
783    }
784    countAllocation(heap, ptr);
785    /*
786     * Check to see if a concurrent GC should be initiated.
787     */
788    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
789        /*
790         * The garbage collector thread is already running or has yet
791         * to be started.  Do nothing.
792         */
793        return ptr;
794    }
795    if (heap->bytesAllocated > heap->concurrentStartBytes) {
796        /*
797         * We have exceeded the allocation threshold.  Wake up the
798         * garbage collector.
799         */
800        dvmSignalCond(&gHs->gcThreadCond);
801    }
802    return ptr;
803}
804
805/* Remove any hard limits, try to allocate, and shrink back down.
806 * Last resort when trying to allocate an object.
807 */
808static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
809{
810    /* Grow as much as possible, but don't let the real footprint
811     * go over the absolute max.
812     */
813    size_t max = heap->maximumSize;
814
815    mspace_set_max_allowed_footprint(heap->msp, max);
816    void* ptr = dvmHeapSourceAlloc(n);
817
818    /* Shrink back down as small as possible.  Our caller may
819     * readjust max_allowed to a more appropriate value.
820     */
821    mspace_set_max_allowed_footprint(heap->msp,
822                                     mspace_footprint(heap->msp));
823    return ptr;
824}
825
826/*
827 * Allocates <n> bytes of zeroed data, growing as much as possible
828 * if necessary.
829 */
830void* dvmHeapSourceAllocAndGrow(size_t n)
831{
832    HS_BOILERPLATE();
833
834    HeapSource *hs = gHs;
835    Heap* heap = hs2heap(hs);
836    void* ptr = dvmHeapSourceAlloc(n);
837    if (ptr != NULL) {
838        return ptr;
839    }
840
841    size_t oldIdealSize = hs->idealSize;
842    if (isSoftLimited(hs)) {
843        /* We're soft-limited.  Try removing the soft limit to
844         * see if we can allocate without actually growing.
845         */
846        hs->softLimit = SIZE_MAX;
847        ptr = dvmHeapSourceAlloc(n);
848        if (ptr != NULL) {
849            /* Removing the soft limit worked;  fix things up to
850             * reflect the new effective ideal size.
851             */
852            snapIdealFootprint();
853            return ptr;
854        }
855        // softLimit intentionally left at SIZE_MAX.
856    }
857
858    /* We're not soft-limited.  Grow the heap to satisfy the request.
859     * If this call fails, no footprints will have changed.
860     */
861    ptr = heapAllocAndGrow(hs, heap, n);
862    if (ptr != NULL) {
863        /* The allocation succeeded.  Fix up the ideal size to
864         * reflect any footprint modifications that had to happen.
865         */
866        snapIdealFootprint();
867    } else {
868        /* We just couldn't do it.  Restore the original ideal size,
869         * fixing up softLimit if necessary.
870         */
871        setIdealFootprint(oldIdealSize);
872    }
873    return ptr;
874}
875
876/*
877 * Frees the first numPtrs objects in the ptrs list and returns the
878 * amount of reclaimed storage. The list must contain addresses all in
879 * the same mspace, and must be in increasing order. This implies that
880 * there are no duplicates, and no entries are NULL.
881 */
882size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
883{
884    HS_BOILERPLATE();
885
886    if (numPtrs == 0) {
887        return 0;
888    }
889
890    assert(ptrs != NULL);
891    assert(*ptrs != NULL);
892    Heap* heap = ptr2heap(gHs, *ptrs);
893    size_t numBytes = 0;
894    if (heap != NULL) {
895        mspace msp = heap->msp;
896        // Calling mspace_free on shared heaps disrupts sharing too
897        // much. For heap[0] -- the 'active heap' -- we call
898        // mspace_free, but on the other heaps we only do some
899        // accounting.
900        if (heap == gHs->heaps) {
901            // mspace_merge_objects takes two allocated objects, and
902            // if the second immediately follows the first, will merge
903            // them, returning a larger object occupying the same
904            // memory. This is a local operation, and doesn't require
905            // dlmalloc to manipulate any freelists. It's pretty
906            // inexpensive compared to free().
907
908            // ptrs is an array of objects all in memory order, and if
909            // client code has been allocating lots of short-lived
910            // objects, this is likely to contain runs of objects all
911            // now garbage, and thus highly amenable to this optimization.
912
913            // Unroll the 0th iteration around the loop below,
914            // countFree ptrs[0] and initializing merged.
915            assert(ptrs[0] != NULL);
916            assert(ptr2heap(gHs, ptrs[0]) == heap);
917            countFree(heap, ptrs[0], &numBytes);
918            void *merged = ptrs[0];
919            for (size_t i = 1; i < numPtrs; i++) {
920                assert(merged != NULL);
921                assert(ptrs[i] != NULL);
922                assert((intptr_t)merged < (intptr_t)ptrs[i]);
923                assert(ptr2heap(gHs, ptrs[i]) == heap);
924                countFree(heap, ptrs[i], &numBytes);
925                // Try to merge. If it works, merged now includes the
926                // memory of ptrs[i]. If it doesn't, free merged, and
927                // see if ptrs[i] starts a new run of adjacent
928                // objects to merge.
929                if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
930                    mspace_free(msp, merged);
931                    merged = ptrs[i];
932                }
933            }
934            assert(merged != NULL);
935            mspace_free(msp, merged);
936        } else {
937            // This is not an 'active heap'. Only do the accounting.
938            for (size_t i = 0; i < numPtrs; i++) {
939                assert(ptrs[i] != NULL);
940                assert(ptr2heap(gHs, ptrs[i]) == heap);
941                countFree(heap, ptrs[i], &numBytes);
942            }
943        }
944    }
945    return numBytes;
946}
947
948/*
949 * Returns true iff <ptr> is in the heap source.
950 */
951bool dvmHeapSourceContainsAddress(const void *ptr)
952{
953    HS_BOILERPLATE();
954
955    return (dvmHeapBitmapCoversAddress(&gHs->liveBits, ptr));
956}
957
958/*
959 * Returns true iff <ptr> was allocated from the heap source.
960 */
961bool dvmHeapSourceContains(const void *ptr)
962{
963    HS_BOILERPLATE();
964
965    if (dvmHeapSourceContainsAddress(ptr)) {
966        return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
967    }
968    return false;
969}
970
971bool dvmIsZygoteObject(const Object* obj)
972{
973    HeapSource *hs = gHs;
974
975    HS_BOILERPLATE();
976
977    if (dvmHeapSourceContains(obj) && hs->sawZygote) {
978        Heap *heap = ptr2heap(hs, obj);
979        if (heap != NULL) {
980            /* If the object is not in the active heap, we assume that
981             * it was allocated as part of zygote.
982             */
983            return heap != hs->heaps;
984        }
985    }
986    /* The pointer is outside of any known heap, or we are not
987     * running in zygote mode.
988     */
989    return false;
990}
991
992/*
993 * Returns the number of usable bytes in an allocated chunk; the size
994 * may be larger than the size passed to dvmHeapSourceAlloc().
995 */
996size_t dvmHeapSourceChunkSize(const void *ptr)
997{
998    HS_BOILERPLATE();
999
1000    Heap* heap = ptr2heap(gHs, ptr);
1001    if (heap != NULL) {
1002        return mspace_usable_size(heap->msp, ptr);
1003    }
1004    return 0;
1005}
1006
1007/*
1008 * Returns the number of bytes that the heap source has allocated
1009 * from the system using sbrk/mmap, etc.
1010 *
1011 * Caller must hold the heap lock.
1012 */
1013size_t dvmHeapSourceFootprint()
1014{
1015    HS_BOILERPLATE();
1016
1017//TODO: include size of bitmaps?
1018    return oldHeapOverhead(gHs, true);
1019}
1020
1021static size_t getMaximumSize(const HeapSource *hs)
1022{
1023    return hs->growthLimit;
1024}
1025
1026/*
1027 * Returns the current maximum size of the heap source respecting any
1028 * growth limits.
1029 */
1030size_t dvmHeapSourceGetMaximumSize()
1031{
1032    HS_BOILERPLATE();
1033    return getMaximumSize(gHs);
1034}
1035
1036/*
1037 * Removes any growth limits.  Allows the user to allocate up to the
1038 * maximum heap size.
1039 */
1040void dvmClearGrowthLimit()
1041{
1042    HS_BOILERPLATE();
1043    dvmLockHeap();
1044    dvmWaitForConcurrentGcToComplete();
1045    gHs->growthLimit = gHs->maximumSize;
1046    size_t overhead = oldHeapOverhead(gHs, false);
1047    gHs->heaps[0].maximumSize = gHs->maximumSize - overhead;
1048    dvmUnlockHeap();
1049}
1050
1051/*
1052 * Return the real bytes used by old heaps plus the soft usage of the
1053 * current heap.  When a soft limit is in effect, this is effectively
1054 * what it's compared against (though, in practice, it only looks at
1055 * the current heap).
1056 */
1057static size_t getSoftFootprint(bool includeActive)
1058{
1059    HS_BOILERPLATE();
1060
1061    HeapSource *hs = gHs;
1062    size_t ret = oldHeapOverhead(hs, false);
1063    if (includeActive) {
1064        ret += hs->heaps[0].bytesAllocated;
1065    }
1066
1067    return ret;
1068}
1069
1070/*
1071 * Gets the maximum number of bytes that the heap source is allowed
1072 * to allocate from the system.
1073 */
1074size_t dvmHeapSourceGetIdealFootprint()
1075{
1076    HeapSource *hs = gHs;
1077
1078    HS_BOILERPLATE();
1079
1080    return hs->idealSize;
1081}
1082
1083/*
1084 * Sets the soft limit, handling any necessary changes to the allowed
1085 * footprint of the active heap.
1086 */
1087static void setSoftLimit(HeapSource *hs, size_t softLimit)
1088{
1089    /* Compare against the actual footprint, rather than the
1090     * max_allowed, because the heap may not have grown all the
1091     * way to the allowed size yet.
1092     */
1093    mspace msp = hs->heaps[0].msp;
1094    size_t currentHeapSize = mspace_footprint(msp);
1095    if (softLimit < currentHeapSize) {
1096        /* Don't let the heap grow any more, and impose a soft limit.
1097         */
1098        mspace_set_max_allowed_footprint(msp, currentHeapSize);
1099        hs->softLimit = softLimit;
1100    } else {
1101        /* Let the heap grow to the requested max, and remove any
1102         * soft limit, if set.
1103         */
1104        mspace_set_max_allowed_footprint(msp, softLimit);
1105        hs->softLimit = SIZE_MAX;
1106    }
1107}
1108
1109/*
1110 * Sets the maximum number of bytes that the heap source is allowed
1111 * to allocate from the system.  Clamps to the appropriate maximum
1112 * value.
1113 */
1114static void setIdealFootprint(size_t max)
1115{
1116    HS_BOILERPLATE();
1117
1118    HeapSource *hs = gHs;
1119    size_t maximumSize = getMaximumSize(hs);
1120    if (max > maximumSize) {
1121        LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB",
1122                FRACTIONAL_MB(max),
1123                FRACTIONAL_MB(maximumSize));
1124        max = maximumSize;
1125    }
1126
1127    /* Convert max into a size that applies to the active heap.
1128     * Old heaps will count against the ideal size.
1129     */
1130    size_t overhead = getSoftFootprint(false);
1131    size_t activeMax;
1132    if (overhead < max) {
1133        activeMax = max - overhead;
1134    } else {
1135        activeMax = 0;
1136    }
1137
1138    setSoftLimit(hs, activeMax);
1139    hs->idealSize = max;
1140}
1141
1142/*
1143 * Make the ideal footprint equal to the current footprint.
1144 */
1145static void snapIdealFootprint()
1146{
1147    HS_BOILERPLATE();
1148
1149    setIdealFootprint(getSoftFootprint(true));
1150}
1151
1152/*
1153 * Gets the current ideal heap utilization, represented as a number
1154 * between zero and one.
1155 */
1156float dvmGetTargetHeapUtilization()
1157{
1158    HeapSource *hs = gHs;
1159
1160    HS_BOILERPLATE();
1161
1162    return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1163}
1164
1165/*
1166 * Sets the new ideal heap utilization, represented as a number
1167 * between zero and one.
1168 */
1169void dvmSetTargetHeapUtilization(float newTarget)
1170{
1171    HeapSource *hs = gHs;
1172
1173    HS_BOILERPLATE();
1174
1175    /* Clamp it to a reasonable range.
1176     */
1177    // TODO: This may need some tuning.
1178    if (newTarget < 0.2) {
1179        newTarget = 0.2;
1180    } else if (newTarget > 0.8) {
1181        newTarget = 0.8;
1182    }
1183
1184    hs->targetUtilization =
1185            (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1186    LOGV("Set heap target utilization to %zd/%d (%f)",
1187            hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1188}
1189
1190/*
1191 * Given the size of a live set, returns the ideal heap size given
1192 * the current target utilization and MIN/MAX values.
1193 *
1194 * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
1195 */
1196static size_t getUtilizationTarget(size_t liveSize, size_t targetUtilization)
1197{
1198    /* Use the current target utilization ratio to determine the
1199     * ideal heap size based on the size of the live set.
1200     */
1201    size_t targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
1202
1203    /* Cap the amount of free space, though, so we don't end up
1204     * with, e.g., 8MB of free space when the live set size hits 8MB.
1205     */
1206    if (targetSize > liveSize + HEAP_IDEAL_FREE) {
1207        targetSize = liveSize + HEAP_IDEAL_FREE;
1208    } else if (targetSize < liveSize + HEAP_MIN_FREE) {
1209        targetSize = liveSize + HEAP_MIN_FREE;
1210    }
1211    return targetSize;
1212}
1213
1214/*
1215 * Given the current contents of the active heap, increase the allowed
1216 * heap footprint to match the target utilization ratio.  This
1217 * should only be called immediately after a full mark/sweep.
1218 */
1219void dvmHeapSourceGrowForUtilization()
1220{
1221    HS_BOILERPLATE();
1222
1223    HeapSource *hs = gHs;
1224    Heap* heap = hs2heap(hs);
1225
1226    /* Use the current target utilization ratio to determine the
1227     * ideal heap size based on the size of the live set.
1228     * Note that only the active heap plays any part in this.
1229     *
1230     * Avoid letting the old heaps influence the target free size,
1231     * because they may be full of objects that aren't actually
1232     * in the working set.  Just look at the allocated size of
1233     * the current heap.
1234     */
1235    size_t currentHeapUsed = heap->bytesAllocated;
1236    size_t targetHeapSize =
1237            getUtilizationTarget(currentHeapUsed, hs->targetUtilization);
1238
1239    /* The ideal size includes the old heaps; add overhead so that
1240     * it can be immediately subtracted again in setIdealFootprint().
1241     * If the target heap size would exceed the max, setIdealFootprint()
1242     * will clamp it to a legal value.
1243     */
1244    size_t overhead = getSoftFootprint(false);
1245    setIdealFootprint(targetHeapSize + overhead);
1246
1247    size_t freeBytes = getAllocLimit(hs);
1248    if (freeBytes < CONCURRENT_MIN_FREE) {
1249        /* Not enough free memory to allow a concurrent GC. */
1250        heap->concurrentStartBytes = SIZE_MAX;
1251    } else {
1252        heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
1253    }
1254}
1255
1256/*
1257 * Return free pages to the system.
1258 * TODO: move this somewhere else, especially the native heap part.
1259 */
1260static void releasePagesInRange(void *start, void *end, void *nbytes)
1261{
1262    /* Linux requires that the madvise() start address is page-aligned.
1263    * We also align the end address.
1264    */
1265    start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1266    end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1267    if (start < end) {
1268        size_t length = (char *)end - (char *)start;
1269        madvise(start, length, MADV_DONTNEED);
1270        *(size_t *)nbytes += length;
1271    }
1272}
1273
1274/*
1275 * Return unused memory to the system if possible.
1276 */
1277void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
1278{
1279    HS_BOILERPLATE();
1280
1281    assert(arrayLen >= gHs->numHeaps);
1282    HeapSource *hs = gHs;
1283    size_t heapBytes = 0;
1284    for (size_t i = 0; i < hs->numHeaps; i++) {
1285        Heap *heap = &hs->heaps[i];
1286
1287        /* Return the wilderness chunk to the system.
1288         */
1289        mspace_trim(heap->msp, 0);
1290
1291        /* Return any whole free pages to the system.
1292         */
1293        bytesTrimmed[i] = 0;
1294        mspace_walk_free_pages(heap->msp, releasePagesInRange,
1295                               &bytesTrimmed[i]);
1296        heapBytes += bytesTrimmed[i];
1297    }
1298
1299    /* Same for the native heap.
1300     */
1301    dlmalloc_trim(0);
1302    size_t nativeBytes = 0;
1303    dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
1304
1305    LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
1306            heapBytes, nativeBytes, heapBytes + nativeBytes);
1307}
1308
1309/*
1310 * Walks over the heap source and passes every allocated and
1311 * free chunk to the callback.
1312 */
1313void dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
1314                                       const void *userptr, size_t userlen,
1315                                       void *arg),
1316                       void *arg)
1317{
1318    HS_BOILERPLATE();
1319
1320    /* Walk the heaps from oldest to newest.
1321     */
1322//TODO: do this in address order
1323    HeapSource *hs = gHs;
1324    for (size_t i = hs->numHeaps; i > 0; --i) {
1325        mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
1326    }
1327}
1328
1329/*
1330 * Gets the number of heaps available in the heap source.
1331 *
1332 * Caller must hold the heap lock, because gHs caches a field
1333 * in gDvm.gcHeap.
1334 */
1335size_t dvmHeapSourceGetNumHeaps()
1336{
1337    HS_BOILERPLATE();
1338
1339    return gHs->numHeaps;
1340}
1341
1342void *dvmHeapSourceGetImmuneLimit(bool isPartial)
1343{
1344    if (isPartial) {
1345        return hs2heap(gHs)->base;
1346    } else {
1347        return NULL;
1348    }
1349}
1350