HeapSource.cpp revision fd31cc071c83bdbe45e6f9db41fd76375e7ac310
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <cutils/mspace.h>
18#include <stdint.h>
19#include <sys/mman.h>
20#include <errno.h>
21
22#define SIZE_MAX UINT_MAX  // TODO: get SIZE_MAX from stdint.h
23
24#include "Dalvik.h"
25#include "alloc/Heap.h"
26#include "alloc/HeapInternal.h"
27#include "alloc/HeapSource.h"
28#include "alloc/HeapBitmap.h"
29#include "alloc/HeapBitmapInlines.h"
30
31// TODO: find a real header file for these.
32extern "C" int dlmalloc_trim(size_t);
33extern "C" void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
34
35static void snapIdealFootprint();
36static void setIdealFootprint(size_t max);
37static size_t getMaximumSize(const HeapSource *hs);
38
39#define HEAP_UTILIZATION_MAX        1024
40#define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
41#define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
42#define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
43
44/* Start a concurrent collection when free memory falls under this
45 * many bytes.
46 */
47#define CONCURRENT_START (128 << 10)
48
49/* The next GC will not be concurrent when free memory after a GC is
50 * under this many bytes.
51 */
52#define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
53
54#define HS_BOILERPLATE() \
55    do { \
56        assert(gDvm.gcHeap != NULL); \
57        assert(gDvm.gcHeap->heapSource != NULL); \
58        assert(gHs == gDvm.gcHeap->heapSource); \
59    } while (0)
60
61struct Heap {
62    /* The mspace to allocate from.
63     */
64    mspace msp;
65
66    /* The largest size that this heap is allowed to grow to.
67     */
68    size_t maximumSize;
69
70    /* Number of bytes allocated from this mspace for objects,
71     * including any overhead.  This value is NOT exact, and
72     * should only be used as an input for certain heuristics.
73     */
74    size_t bytesAllocated;
75
76    /* Number of bytes allocated from this mspace at which a
77     * concurrent garbage collection will be started.
78     */
79    size_t concurrentStartBytes;
80
81    /* Number of objects currently allocated from this mspace.
82     */
83    size_t objectsAllocated;
84
85    /*
86     * The lowest address of this heap, inclusive.
87     */
88    char *base;
89
90    /*
91     * The highest address of this heap, exclusive.
92     */
93    char *limit;
94};
95
96struct HeapSource {
97    /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
98     */
99    size_t targetUtilization;
100
101    /* The starting heap size.
102     */
103    size_t startSize;
104
105    /* The largest that the heap source as a whole is allowed to grow.
106     */
107    size_t maximumSize;
108
109    /*
110     * The largest size we permit the heap to grow.  This value allows
111     * the user to limit the heap growth below the maximum size.  This
112     * is a work around until we can dynamically set the maximum size.
113     * This value can range between the starting size and the maximum
114     * size but should never be set below the current footprint of the
115     * heap.
116     */
117    size_t growthLimit;
118
119    /* The desired max size of the heap source as a whole.
120     */
121    size_t idealSize;
122
123    /* The maximum number of bytes allowed to be allocated from the
124     * active heap before a GC is forced.  This is used to "shrink" the
125     * heap in lieu of actual compaction.
126     */
127    size_t softLimit;
128
129    /* The heaps; heaps[0] is always the active heap,
130     * which new objects should be allocated from.
131     */
132    Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
133
134    /* The current number of heaps.
135     */
136    size_t numHeaps;
137
138    /* True if zygote mode was active when the HeapSource was created.
139     */
140    bool sawZygote;
141
142    /*
143     * The base address of the virtual memory reservation.
144     */
145    char *heapBase;
146
147    /*
148     * The length in bytes of the virtual memory reservation.
149     */
150    size_t heapLength;
151
152    /*
153     * The live object bitmap.
154     */
155    HeapBitmap liveBits;
156
157    /*
158     * The mark bitmap.
159     */
160    HeapBitmap markBits;
161
162    /*
163     * State for the GC daemon.
164     */
165    bool hasGcThread;
166    pthread_t gcThread;
167    bool gcThreadShutdown;
168    pthread_mutex_t gcThreadMutex;
169    pthread_cond_t gcThreadCond;
170};
171
172#define hs2heap(hs_) (&((hs_)->heaps[0]))
173
174/*
175 * Returns true iff a soft limit is in effect for the active heap.
176 */
177static bool isSoftLimited(const HeapSource *hs)
178{
179    /* softLimit will be either SIZE_MAX or the limit for the
180     * active mspace.  idealSize can be greater than softLimit
181     * if there is more than one heap.  If there is only one
182     * heap, a non-SIZE_MAX softLimit should always be the same
183     * as idealSize.
184     */
185    return hs->softLimit <= hs->idealSize;
186}
187
188/*
189 * Returns approximately the maximum number of bytes allowed to be
190 * allocated from the active heap before a GC is forced.
191 */
192static size_t getAllocLimit(const HeapSource *hs)
193{
194    if (isSoftLimited(hs)) {
195        return hs->softLimit;
196    } else {
197        return mspace_max_allowed_footprint(hs2heap(hs)->msp);
198    }
199}
200
201/*
202 * Returns the current footprint of all heaps.  If includeActive
203 * is false, don't count the heap at index 0.
204 */
205static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive)
206{
207    size_t footprint = 0;
208    size_t i;
209
210    if (includeActive) {
211        i = 0;
212    } else {
213        i = 1;
214    }
215    for (/* i = i */; i < hs->numHeaps; i++) {
216//TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
217        footprint += mspace_footprint(hs->heaps[i].msp);
218    }
219    return footprint;
220}
221
222/*
223 * Returns the heap that <ptr> could have come from, or NULL
224 * if it could not have come from any heap.
225 */
226static Heap *ptr2heap(const HeapSource *hs, const void *ptr)
227{
228    const size_t numHeaps = hs->numHeaps;
229
230    if (ptr != NULL) {
231        for (size_t i = 0; i < numHeaps; i++) {
232            const Heap *const heap = &hs->heaps[i];
233
234            if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
235                return (Heap *)heap;
236            }
237        }
238    }
239    return NULL;
240}
241
242/*
243 * Functions to update heapSource->bytesAllocated when an object
244 * is allocated or freed.  mspace_usable_size() will give
245 * us a much more accurate picture of heap utilization than
246 * the requested byte sizes would.
247 *
248 * These aren't exact, and should not be treated as such.
249 */
250static void countAllocation(Heap *heap, const void *ptr)
251{
252    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
253
254    heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
255            HEAP_SOURCE_CHUNK_OVERHEAD;
256    heap->objectsAllocated++;
257    HeapSource* hs = gDvm.gcHeap->heapSource;
258    dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
259
260    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
261}
262
263static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
264{
265    size_t delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
266    assert(delta > 0);
267    if (delta < heap->bytesAllocated) {
268        heap->bytesAllocated -= delta;
269    } else {
270        heap->bytesAllocated = 0;
271    }
272    HeapSource* hs = gDvm.gcHeap->heapSource;
273    dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
274    if (heap->objectsAllocated > 0) {
275        heap->objectsAllocated--;
276    }
277    *numBytes += delta;
278}
279
280static HeapSource *gHs = NULL;
281
282static mspace createMspace(void *base, size_t startSize, size_t maximumSize)
283{
284    /* Create an unlocked dlmalloc mspace to use as
285     * a heap source.
286     *
287     * We start off reserving startSize / 2 bytes but
288     * letting the heap grow to startSize.  This saves
289     * memory in the case where a process uses even less
290     * than the starting size.
291     */
292    LOGV_HEAP("Creating VM heap of size %zu", startSize);
293    errno = 0;
294
295    mspace msp = create_contiguous_mspace_with_base(startSize/2,
296            maximumSize, /*locked=*/false, base);
297    if (msp != NULL) {
298        /* Don't let the heap grow past the starting size without
299         * our intervention.
300         */
301        mspace_set_max_allowed_footprint(msp, startSize);
302    } else {
303        /* There's no guarantee that errno has meaning when the call
304         * fails, but it often does.
305         */
306        LOGE_HEAP("Can't create VM heap of size (%zu,%zu): %s",
307            startSize/2, maximumSize, strerror(errno));
308    }
309
310    return msp;
311}
312
313/*
314 * Add the initial heap.  Returns false if the initial heap was
315 * already added to the heap source.
316 */
317static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
318{
319    assert(hs != NULL);
320    assert(msp != NULL);
321    if (hs->numHeaps != 0) {
322        return false;
323    }
324    hs->heaps[0].msp = msp;
325    hs->heaps[0].maximumSize = maximumSize;
326    hs->heaps[0].concurrentStartBytes = SIZE_MAX;
327    hs->heaps[0].base = hs->heapBase;
328    hs->heaps[0].limit = hs->heapBase + hs->heaps[0].maximumSize;
329    hs->numHeaps = 1;
330    return true;
331}
332
333/*
334 * Adds an additional heap to the heap source.  Returns false if there
335 * are too many heaps or insufficient free space to add another heap.
336 */
337static bool addNewHeap(HeapSource *hs)
338{
339    Heap heap;
340
341    assert(hs != NULL);
342    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
343        LOGE("Attempt to create too many heaps (%zd >= %zd)",
344                hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
345        dvmAbort();
346        return false;
347    }
348
349    memset(&heap, 0, sizeof(heap));
350
351    /*
352     * Heap storage comes from a common virtual memory reservation.
353     * The new heap will start on the page after the old heap.
354     */
355    void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
356    char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
357    size_t overhead = base - hs->heaps[0].base;
358    assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
359
360    if (overhead + HEAP_MIN_FREE >= hs->maximumSize) {
361        LOGE_HEAP("No room to create any more heaps "
362                  "(%zd overhead, %zd max)",
363                  overhead, hs->maximumSize);
364        return false;
365    }
366
367    heap.maximumSize = hs->growthLimit - overhead;
368    heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
369    heap.base = base;
370    heap.limit = heap.base + heap.maximumSize;
371    heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead);
372    if (heap.msp == NULL) {
373        return false;
374    }
375
376    /* Don't let the soon-to-be-old heap grow any further.
377     */
378    hs->heaps[0].maximumSize = overhead;
379    hs->heaps[0].limit = base;
380    mspace msp = hs->heaps[0].msp;
381    mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
382
383    /* Put the new heap in the list, at heaps[0].
384     * Shift existing heaps down.
385     */
386    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
387    hs->heaps[0] = heap;
388    hs->numHeaps++;
389
390    return true;
391}
392
393/*
394 * The garbage collection daemon.  Initiates a concurrent collection
395 * when signaled.
396 */
397static void *gcDaemonThread(void* arg)
398{
399    dvmChangeStatus(NULL, THREAD_VMWAIT);
400    dvmLockMutex(&gHs->gcThreadMutex);
401    while (gHs->gcThreadShutdown != true) {
402        dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
403        dvmLockHeap();
404        /*
405         * Another thread may have started a concurrent garbage
406         * collection before we were scheduled.  Check for this
407         * condition before proceeding.
408         */
409        if (!gDvm.gcHeap->gcRunning) {
410            dvmChangeStatus(NULL, THREAD_RUNNING);
411            dvmCollectGarbageInternal(GC_CONCURRENT);
412            dvmChangeStatus(NULL, THREAD_VMWAIT);
413        }
414        dvmUnlockHeap();
415    }
416    dvmChangeStatus(NULL, THREAD_RUNNING);
417    return NULL;
418}
419
420static bool gcDaemonStartup()
421{
422    dvmInitMutex(&gHs->gcThreadMutex);
423    pthread_cond_init(&gHs->gcThreadCond, NULL);
424    gHs->gcThreadShutdown = false;
425    gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
426                                               gcDaemonThread, NULL);
427    return gHs->hasGcThread;
428}
429
430static void gcDaemonShutdown()
431{
432    if (gHs->hasGcThread) {
433        dvmLockMutex(&gHs->gcThreadMutex);
434        gHs->gcThreadShutdown = true;
435        dvmSignalCond(&gHs->gcThreadCond);
436        dvmUnlockMutex(&gHs->gcThreadMutex);
437        pthread_join(gHs->gcThread, NULL);
438    }
439}
440
441/*
442 * Create a stack big enough for the worst possible case, where the
443 * heap is perfectly full of the smallest object.
444 * TODO: be better about memory usage; use a smaller stack with
445 *       overflow detection and recovery.
446 */
447static bool allocMarkStack(GcMarkStack *stack, size_t maximumSize)
448{
449    const char *name = "dalvik-mark-stack";
450    void *addr;
451
452    assert(stack != NULL);
453    stack->length = maximumSize * sizeof(Object*) /
454        (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
455    addr = dvmAllocRegion(stack->length, PROT_READ | PROT_WRITE, name);
456    if (addr == NULL) {
457        return false;
458    }
459    stack->base = (const Object **)addr;
460    stack->limit = (const Object **)((char *)addr + stack->length);
461    stack->top = NULL;
462    madvise(stack->base, stack->length, MADV_DONTNEED);
463    return true;
464}
465
466static void freeMarkStack(GcMarkStack *stack)
467{
468    assert(stack != NULL);
469    munmap(stack->base, stack->length);
470    memset(stack, 0, sizeof(*stack));
471}
472
473/*
474 * Initializes the heap source; must be called before any other
475 * dvmHeapSource*() functions.  Returns a GcHeap structure
476 * allocated from the heap source.
477 */
478GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
479                             size_t growthLimit)
480{
481    GcHeap *gcHeap;
482    HeapSource *hs;
483    mspace msp;
484    size_t length;
485    void *base;
486
487    assert(gHs == NULL);
488
489    if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
490        LOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
491             startSize, maximumSize, growthLimit);
492        return NULL;
493    }
494
495    /*
496     * Allocate a contiguous region of virtual memory to subdivided
497     * among the heaps managed by the garbage collector.
498     */
499    length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
500    base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
501    if (base == NULL) {
502        return NULL;
503    }
504
505    /* Create an unlocked dlmalloc mspace to use as
506     * a heap source.
507     */
508    msp = createMspace(base, startSize, maximumSize);
509    if (msp == NULL) {
510        goto fail;
511    }
512
513    gcHeap = (GcHeap *)malloc(sizeof(*gcHeap));
514    if (gcHeap == NULL) {
515        LOGE_HEAP("Can't allocate heap descriptor");
516        goto fail;
517    }
518    memset(gcHeap, 0, sizeof(*gcHeap));
519
520    hs = (HeapSource *)malloc(sizeof(*hs));
521    if (hs == NULL) {
522        LOGE_HEAP("Can't allocate heap source");
523        free(gcHeap);
524        goto fail;
525    }
526    memset(hs, 0, sizeof(*hs));
527
528    hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
529    hs->startSize = startSize;
530    hs->maximumSize = maximumSize;
531    hs->growthLimit = growthLimit;
532    hs->idealSize = startSize;
533    hs->softLimit = SIZE_MAX;    // no soft limit at first
534    hs->numHeaps = 0;
535    hs->sawZygote = gDvm.zygote;
536    hs->hasGcThread = false;
537    hs->heapBase = (char *)base;
538    hs->heapLength = length;
539    if (!addInitialHeap(hs, msp, growthLimit)) {
540        LOGE_HEAP("Can't add initial heap");
541        goto fail;
542    }
543    if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
544        LOGE_HEAP("Can't create liveBits");
545        goto fail;
546    }
547    if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
548        LOGE_HEAP("Can't create markBits");
549        dvmHeapBitmapDelete(&hs->liveBits);
550        goto fail;
551    }
552    if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
553        LOGE("Can't create markStack");
554        dvmHeapBitmapDelete(&hs->markBits);
555        dvmHeapBitmapDelete(&hs->liveBits);
556        goto fail;
557    }
558    gcHeap->markContext.bitmap = &hs->markBits;
559    gcHeap->heapSource = hs;
560
561    gHs = hs;
562    return gcHeap;
563
564fail:
565    munmap(base, length);
566    return NULL;
567}
568
569bool dvmHeapSourceStartupAfterZygote()
570{
571    return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
572}
573
574/*
575 * This is called while in zygote mode, right before we fork() for the
576 * first time.  We create a heap for all future zygote process allocations,
577 * in an attempt to avoid touching pages in the zygote heap.  (This would
578 * probably be unnecessary if we had a compacting GC -- the source of our
579 * troubles is small allocations filling in the gaps from larger ones.)
580 */
581bool dvmHeapSourceStartupBeforeFork()
582{
583    HS_BOILERPLATE();
584
585    assert(gDvm.zygote);
586
587    if (!gDvm.newZygoteHeapAllocated) {
588        /* Create a new heap for post-fork zygote allocations.  We only
589         * try once, even if it fails.
590         */
591        LOGV("Splitting out new zygote heap");
592        gDvm.newZygoteHeapAllocated = true;
593        dvmClearCardTable();
594        return addNewHeap(gHs);
595    }
596    return true;
597}
598
599void dvmHeapSourceThreadShutdown()
600{
601    if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
602        gcDaemonShutdown();
603    }
604}
605
606/*
607 * Tears down the entire GcHeap structure and all of the substructures
608 * attached to it.  This call has the side effect of setting the given
609 * gcHeap pointer and gHs to NULL.
610 */
611void dvmHeapSourceShutdown(GcHeap **gcHeap)
612{
613    assert(gcHeap != NULL);
614    if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
615        HeapSource *hs = (*gcHeap)->heapSource;
616        dvmHeapBitmapDelete(&hs->liveBits);
617        dvmHeapBitmapDelete(&hs->markBits);
618        freeMarkStack(&(*gcHeap)->markContext.stack);
619        munmap(hs->heapBase, hs->heapLength);
620        free(hs);
621        gHs = NULL;
622        free(*gcHeap);
623        *gcHeap = NULL;
624    }
625}
626
627/*
628 * Gets the begining of the allocation for the HeapSource.
629 */
630void *dvmHeapSourceGetBase()
631{
632    return gHs->heapBase;
633}
634
635/*
636 * Returns the requested value. If the per-heap stats are requested, fill
637 * them as well.
638 *
639 * Caller must hold the heap lock.
640 */
641size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[],
642                             size_t arrayLen)
643{
644    HeapSource *hs = gHs;
645    size_t value = 0;
646    size_t total = 0;
647
648    HS_BOILERPLATE();
649
650    assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
651    for (size_t i = 0; i < hs->numHeaps; i++) {
652        Heap *const heap = &hs->heaps[i];
653
654        switch (spec) {
655        case HS_FOOTPRINT:
656            value = mspace_footprint(heap->msp);
657            break;
658        case HS_ALLOWED_FOOTPRINT:
659            value = mspace_max_allowed_footprint(heap->msp);
660            break;
661        case HS_BYTES_ALLOCATED:
662            value = heap->bytesAllocated;
663            break;
664        case HS_OBJECTS_ALLOCATED:
665            value = heap->objectsAllocated;
666            break;
667        default:
668            // quiet gcc
669            break;
670        }
671        if (perHeapStats) {
672            perHeapStats[i] = value;
673        }
674        total += value;
675    }
676    return total;
677}
678
679void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, uintptr_t *limit,
680                             size_t numHeaps)
681{
682    HeapSource *hs = gHs;
683
684    HS_BOILERPLATE();
685
686    assert(numHeaps <= hs->numHeaps);
687    for (size_t i = 0; i < numHeaps; ++i) {
688        base[i] = (uintptr_t)hs->heaps[i].base;
689        if (max != NULL) {
690            max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
691        }
692        if (limit != NULL) {
693            limit[i] = (uintptr_t)hs->heaps[i].limit;
694        }
695    }
696}
697
698/*
699 * Get the bitmap representing all live objects.
700 */
701HeapBitmap *dvmHeapSourceGetLiveBits()
702{
703    HS_BOILERPLATE();
704
705    return &gHs->liveBits;
706}
707
708/*
709 * Get the bitmap representing all marked objects.
710 */
711HeapBitmap *dvmHeapSourceGetMarkBits()
712{
713    HS_BOILERPLATE();
714
715    return &gHs->markBits;
716}
717
718void dvmHeapSourceSwapBitmaps()
719{
720    HeapBitmap tmp = gHs->liveBits;
721    gHs->liveBits = gHs->markBits;
722    gHs->markBits = tmp;
723}
724
725void dvmHeapSourceZeroMarkBitmap()
726{
727    HS_BOILERPLATE();
728
729    dvmHeapBitmapZero(&gHs->markBits);
730}
731
732void dvmMarkImmuneObjects(const char *immuneLimit)
733{
734    /*
735     * Copy the contents of the live bit vector for immune object
736     * range into the mark bit vector.
737     */
738    /* The only values generated by dvmHeapSourceGetImmuneLimit() */
739    assert(immuneLimit == gHs->heaps[0].base ||
740           immuneLimit == NULL);
741    assert(gHs->liveBits.base == gHs->markBits.base);
742    assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
743    /* heap[0] is never immune */
744    assert(gHs->heaps[0].base >= immuneLimit);
745    assert(gHs->heaps[0].limit > immuneLimit);
746
747    for (size_t i = 1; i < gHs->numHeaps; ++i) {
748        if (gHs->heaps[i].base < immuneLimit) {
749            assert(gHs->heaps[i].limit <= immuneLimit);
750            /* Compute the number of words to copy in the bitmap. */
751            size_t index = HB_OFFSET_TO_INDEX(
752                (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
753            /* Compute the starting offset in the live and mark bits. */
754            char *src = (char *)(gHs->liveBits.bits + index);
755            char *dst = (char *)(gHs->markBits.bits + index);
756            /* Compute the number of bytes of the live bitmap to copy. */
757            size_t length = HB_OFFSET_TO_BYTE_INDEX(
758                gHs->heaps[i].limit - gHs->heaps[i].base);
759            /* Do the copy. */
760            memcpy(dst, src, length);
761            /* Make sure max points to the address of the highest set bit. */
762            if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
763                gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
764            }
765        }
766    }
767}
768
769/*
770 * Allocates <n> bytes of zeroed data.
771 */
772void* dvmHeapSourceAlloc(size_t n)
773{
774    HS_BOILERPLATE();
775
776    HeapSource *hs = gHs;
777    Heap* heap = hs2heap(hs);
778    if (heap->bytesAllocated + n > hs->softLimit) {
779        /*
780         * This allocation would push us over the soft limit; act as
781         * if the heap is full.
782         */
783        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
784                  FRACTIONAL_MB(hs->softLimit), n);
785        return NULL;
786    }
787    void* ptr = mspace_calloc(heap->msp, 1, n);
788    if (ptr == NULL) {
789        return NULL;
790    }
791    countAllocation(heap, ptr);
792    /*
793     * Check to see if a concurrent GC should be initiated.
794     */
795    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
796        /*
797         * The garbage collector thread is already running or has yet
798         * to be started.  Do nothing.
799         */
800        return ptr;
801    }
802    if (heap->bytesAllocated > heap->concurrentStartBytes) {
803        /*
804         * We have exceeded the allocation threshold.  Wake up the
805         * garbage collector.
806         */
807        dvmSignalCond(&gHs->gcThreadCond);
808    }
809    return ptr;
810}
811
812/* Remove any hard limits, try to allocate, and shrink back down.
813 * Last resort when trying to allocate an object.
814 */
815static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
816{
817    /* Grow as much as possible, but don't let the real footprint
818     * go over the absolute max.
819     */
820    size_t max = heap->maximumSize;
821
822    mspace_set_max_allowed_footprint(heap->msp, max);
823    void* ptr = dvmHeapSourceAlloc(n);
824
825    /* Shrink back down as small as possible.  Our caller may
826     * readjust max_allowed to a more appropriate value.
827     */
828    mspace_set_max_allowed_footprint(heap->msp,
829                                     mspace_footprint(heap->msp));
830    return ptr;
831}
832
833/*
834 * Allocates <n> bytes of zeroed data, growing as much as possible
835 * if necessary.
836 */
837void* dvmHeapSourceAllocAndGrow(size_t n)
838{
839    HS_BOILERPLATE();
840
841    HeapSource *hs = gHs;
842    Heap* heap = hs2heap(hs);
843    void* ptr = dvmHeapSourceAlloc(n);
844    if (ptr != NULL) {
845        return ptr;
846    }
847
848    size_t oldIdealSize = hs->idealSize;
849    if (isSoftLimited(hs)) {
850        /* We're soft-limited.  Try removing the soft limit to
851         * see if we can allocate without actually growing.
852         */
853        hs->softLimit = SIZE_MAX;
854        ptr = dvmHeapSourceAlloc(n);
855        if (ptr != NULL) {
856            /* Removing the soft limit worked;  fix things up to
857             * reflect the new effective ideal size.
858             */
859            snapIdealFootprint();
860            return ptr;
861        }
862        // softLimit intentionally left at SIZE_MAX.
863    }
864
865    /* We're not soft-limited.  Grow the heap to satisfy the request.
866     * If this call fails, no footprints will have changed.
867     */
868    ptr = heapAllocAndGrow(hs, heap, n);
869    if (ptr != NULL) {
870        /* The allocation succeeded.  Fix up the ideal size to
871         * reflect any footprint modifications that had to happen.
872         */
873        snapIdealFootprint();
874    } else {
875        /* We just couldn't do it.  Restore the original ideal size,
876         * fixing up softLimit if necessary.
877         */
878        setIdealFootprint(oldIdealSize);
879    }
880    return ptr;
881}
882
883/*
884 * Frees the first numPtrs objects in the ptrs list and returns the
885 * amount of reclaimed storage. The list must contain addresses all in
886 * the same mspace, and must be in increasing order. This implies that
887 * there are no duplicates, and no entries are NULL.
888 */
889size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
890{
891    HS_BOILERPLATE();
892
893    if (numPtrs == 0) {
894        return 0;
895    }
896
897    assert(ptrs != NULL);
898    assert(*ptrs != NULL);
899    Heap* heap = ptr2heap(gHs, *ptrs);
900    size_t numBytes = 0;
901    if (heap != NULL) {
902        mspace msp = heap->msp;
903        // Calling mspace_free on shared heaps disrupts sharing too
904        // much. For heap[0] -- the 'active heap' -- we call
905        // mspace_free, but on the other heaps we only do some
906        // accounting.
907        if (heap == gHs->heaps) {
908            // mspace_merge_objects takes two allocated objects, and
909            // if the second immediately follows the first, will merge
910            // them, returning a larger object occupying the same
911            // memory. This is a local operation, and doesn't require
912            // dlmalloc to manipulate any freelists. It's pretty
913            // inexpensive compared to free().
914
915            // ptrs is an array of objects all in memory order, and if
916            // client code has been allocating lots of short-lived
917            // objects, this is likely to contain runs of objects all
918            // now garbage, and thus highly amenable to this optimization.
919
920            // Unroll the 0th iteration around the loop below,
921            // countFree ptrs[0] and initializing merged.
922            assert(ptrs[0] != NULL);
923            assert(ptr2heap(gHs, ptrs[0]) == heap);
924            countFree(heap, ptrs[0], &numBytes);
925            void *merged = ptrs[0];
926            for (size_t i = 1; i < numPtrs; i++) {
927                assert(merged != NULL);
928                assert(ptrs[i] != NULL);
929                assert((intptr_t)merged < (intptr_t)ptrs[i]);
930                assert(ptr2heap(gHs, ptrs[i]) == heap);
931                countFree(heap, ptrs[i], &numBytes);
932                // Try to merge. If it works, merged now includes the
933                // memory of ptrs[i]. If it doesn't, free merged, and
934                // see if ptrs[i] starts a new run of adjacent
935                // objects to merge.
936                if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
937                    mspace_free(msp, merged);
938                    merged = ptrs[i];
939                }
940            }
941            assert(merged != NULL);
942            mspace_free(msp, merged);
943        } else {
944            // This is not an 'active heap'. Only do the accounting.
945            for (size_t i = 0; i < numPtrs; i++) {
946                assert(ptrs[i] != NULL);
947                assert(ptr2heap(gHs, ptrs[i]) == heap);
948                countFree(heap, ptrs[i], &numBytes);
949            }
950        }
951    }
952    return numBytes;
953}
954
955/*
956 * Returns true iff <ptr> is in the heap source.
957 */
958bool dvmHeapSourceContainsAddress(const void *ptr)
959{
960    HS_BOILERPLATE();
961
962    return (dvmHeapBitmapCoversAddress(&gHs->liveBits, ptr));
963}
964
965/*
966 * Returns true iff <ptr> was allocated from the heap source.
967 */
968bool dvmHeapSourceContains(const void *ptr)
969{
970    HS_BOILERPLATE();
971
972    if (dvmHeapSourceContainsAddress(ptr)) {
973        return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
974    }
975    return false;
976}
977
978bool dvmIsZygoteObject(const Object* obj)
979{
980    HeapSource *hs = gHs;
981
982    HS_BOILERPLATE();
983
984    if (dvmHeapSourceContains(obj) && hs->sawZygote) {
985        Heap *heap = ptr2heap(hs, obj);
986        if (heap != NULL) {
987            /* If the object is not in the active heap, we assume that
988             * it was allocated as part of zygote.
989             */
990            return heap != hs->heaps;
991        }
992    }
993    /* The pointer is outside of any known heap, or we are not
994     * running in zygote mode.
995     */
996    return false;
997}
998
999/*
1000 * Returns the number of usable bytes in an allocated chunk; the size
1001 * may be larger than the size passed to dvmHeapSourceAlloc().
1002 */
1003size_t dvmHeapSourceChunkSize(const void *ptr)
1004{
1005    HS_BOILERPLATE();
1006
1007    Heap* heap = ptr2heap(gHs, ptr);
1008    if (heap != NULL) {
1009        return mspace_usable_size(heap->msp, ptr);
1010    }
1011    return 0;
1012}
1013
1014/*
1015 * Returns the number of bytes that the heap source has allocated
1016 * from the system using sbrk/mmap, etc.
1017 *
1018 * Caller must hold the heap lock.
1019 */
1020size_t dvmHeapSourceFootprint()
1021{
1022    HS_BOILERPLATE();
1023
1024//TODO: include size of bitmaps?
1025    return oldHeapOverhead(gHs, true);
1026}
1027
1028static size_t getMaximumSize(const HeapSource *hs)
1029{
1030    return hs->growthLimit;
1031}
1032
1033/*
1034 * Returns the current maximum size of the heap source respecting any
1035 * growth limits.
1036 */
1037size_t dvmHeapSourceGetMaximumSize()
1038{
1039    HS_BOILERPLATE();
1040    return getMaximumSize(gHs);
1041}
1042
1043/*
1044 * Removes any growth limits.  Allows the user to allocate up to the
1045 * maximum heap size.
1046 */
1047void dvmClearGrowthLimit()
1048{
1049    HS_BOILERPLATE();
1050    dvmLockHeap();
1051    dvmWaitForConcurrentGcToComplete();
1052    gHs->growthLimit = gHs->maximumSize;
1053    size_t overhead = oldHeapOverhead(gHs, false);
1054    gHs->heaps[0].maximumSize = gHs->maximumSize - overhead;
1055    dvmUnlockHeap();
1056}
1057
1058/*
1059 * Return the real bytes used by old heaps plus the soft usage of the
1060 * current heap.  When a soft limit is in effect, this is effectively
1061 * what it's compared against (though, in practice, it only looks at
1062 * the current heap).
1063 */
1064static size_t getSoftFootprint(bool includeActive)
1065{
1066    HS_BOILERPLATE();
1067
1068    HeapSource *hs = gHs;
1069    size_t ret = oldHeapOverhead(hs, false);
1070    if (includeActive) {
1071        ret += hs->heaps[0].bytesAllocated;
1072    }
1073
1074    return ret;
1075}
1076
1077/*
1078 * Gets the maximum number of bytes that the heap source is allowed
1079 * to allocate from the system.
1080 */
1081size_t dvmHeapSourceGetIdealFootprint()
1082{
1083    HeapSource *hs = gHs;
1084
1085    HS_BOILERPLATE();
1086
1087    return hs->idealSize;
1088}
1089
1090/*
1091 * Sets the soft limit, handling any necessary changes to the allowed
1092 * footprint of the active heap.
1093 */
1094static void setSoftLimit(HeapSource *hs, size_t softLimit)
1095{
1096    /* Compare against the actual footprint, rather than the
1097     * max_allowed, because the heap may not have grown all the
1098     * way to the allowed size yet.
1099     */
1100    mspace msp = hs->heaps[0].msp;
1101    size_t currentHeapSize = mspace_footprint(msp);
1102    if (softLimit < currentHeapSize) {
1103        /* Don't let the heap grow any more, and impose a soft limit.
1104         */
1105        mspace_set_max_allowed_footprint(msp, currentHeapSize);
1106        hs->softLimit = softLimit;
1107    } else {
1108        /* Let the heap grow to the requested max, and remove any
1109         * soft limit, if set.
1110         */
1111        mspace_set_max_allowed_footprint(msp, softLimit);
1112        hs->softLimit = SIZE_MAX;
1113    }
1114}
1115
1116/*
1117 * Sets the maximum number of bytes that the heap source is allowed
1118 * to allocate from the system.  Clamps to the appropriate maximum
1119 * value.
1120 */
1121static void setIdealFootprint(size_t max)
1122{
1123    HS_BOILERPLATE();
1124
1125    HeapSource *hs = gHs;
1126    size_t maximumSize = getMaximumSize(hs);
1127    if (max > maximumSize) {
1128        LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB",
1129                FRACTIONAL_MB(max),
1130                FRACTIONAL_MB(maximumSize));
1131        max = maximumSize;
1132    }
1133
1134    /* Convert max into a size that applies to the active heap.
1135     * Old heaps will count against the ideal size.
1136     */
1137    size_t overhead = getSoftFootprint(false);
1138    size_t activeMax;
1139    if (overhead < max) {
1140        activeMax = max - overhead;
1141    } else {
1142        activeMax = 0;
1143    }
1144
1145    setSoftLimit(hs, activeMax);
1146    hs->idealSize = max;
1147}
1148
1149/*
1150 * Make the ideal footprint equal to the current footprint.
1151 */
1152static void snapIdealFootprint()
1153{
1154    HS_BOILERPLATE();
1155
1156    setIdealFootprint(getSoftFootprint(true));
1157}
1158
1159/*
1160 * Gets the current ideal heap utilization, represented as a number
1161 * between zero and one.
1162 */
1163float dvmGetTargetHeapUtilization()
1164{
1165    HeapSource *hs = gHs;
1166
1167    HS_BOILERPLATE();
1168
1169    return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1170}
1171
1172/*
1173 * Sets the new ideal heap utilization, represented as a number
1174 * between zero and one.
1175 */
1176void dvmSetTargetHeapUtilization(float newTarget)
1177{
1178    HeapSource *hs = gHs;
1179
1180    HS_BOILERPLATE();
1181
1182    /* Clamp it to a reasonable range.
1183     */
1184    // TODO: This may need some tuning.
1185    if (newTarget < 0.2) {
1186        newTarget = 0.2;
1187    } else if (newTarget > 0.8) {
1188        newTarget = 0.8;
1189    }
1190
1191    hs->targetUtilization =
1192            (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1193    LOGV("Set heap target utilization to %zd/%d (%f)",
1194            hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1195}
1196
1197/*
1198 * Given the size of a live set, returns the ideal heap size given
1199 * the current target utilization and MIN/MAX values.
1200 *
1201 * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
1202 */
1203static size_t getUtilizationTarget(size_t liveSize, size_t targetUtilization)
1204{
1205    /* Use the current target utilization ratio to determine the
1206     * ideal heap size based on the size of the live set.
1207     */
1208    size_t targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
1209
1210    /* Cap the amount of free space, though, so we don't end up
1211     * with, e.g., 8MB of free space when the live set size hits 8MB.
1212     */
1213    if (targetSize > liveSize + HEAP_IDEAL_FREE) {
1214        targetSize = liveSize + HEAP_IDEAL_FREE;
1215    } else if (targetSize < liveSize + HEAP_MIN_FREE) {
1216        targetSize = liveSize + HEAP_MIN_FREE;
1217    }
1218    return targetSize;
1219}
1220
1221/*
1222 * Given the current contents of the active heap, increase the allowed
1223 * heap footprint to match the target utilization ratio.  This
1224 * should only be called immediately after a full mark/sweep.
1225 */
1226void dvmHeapSourceGrowForUtilization()
1227{
1228    HS_BOILERPLATE();
1229
1230    HeapSource *hs = gHs;
1231    Heap* heap = hs2heap(hs);
1232
1233    /* Use the current target utilization ratio to determine the
1234     * ideal heap size based on the size of the live set.
1235     * Note that only the active heap plays any part in this.
1236     *
1237     * Avoid letting the old heaps influence the target free size,
1238     * because they may be full of objects that aren't actually
1239     * in the working set.  Just look at the allocated size of
1240     * the current heap.
1241     */
1242    size_t currentHeapUsed = heap->bytesAllocated;
1243    size_t targetHeapSize =
1244            getUtilizationTarget(currentHeapUsed, hs->targetUtilization);
1245
1246    /* The ideal size includes the old heaps; add overhead so that
1247     * it can be immediately subtracted again in setIdealFootprint().
1248     * If the target heap size would exceed the max, setIdealFootprint()
1249     * will clamp it to a legal value.
1250     */
1251    size_t overhead = getSoftFootprint(false);
1252    setIdealFootprint(targetHeapSize + overhead);
1253
1254    size_t freeBytes = getAllocLimit(hs);
1255    if (freeBytes < CONCURRENT_MIN_FREE) {
1256        /* Not enough free memory to allow a concurrent GC. */
1257        heap->concurrentStartBytes = SIZE_MAX;
1258    } else {
1259        heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
1260    }
1261}
1262
1263/*
1264 * Return free pages to the system.
1265 * TODO: move this somewhere else, especially the native heap part.
1266 */
1267static void releasePagesInRange(void *start, void *end, void *nbytes)
1268{
1269    /* Linux requires that the madvise() start address is page-aligned.
1270    * We also align the end address.
1271    */
1272    start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1273    end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1274    if (start < end) {
1275        size_t length = (char *)end - (char *)start;
1276        madvise(start, length, MADV_DONTNEED);
1277        *(size_t *)nbytes += length;
1278    }
1279}
1280
1281/*
1282 * Return unused memory to the system if possible.
1283 */
1284void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
1285{
1286    HS_BOILERPLATE();
1287
1288    assert(arrayLen >= gHs->numHeaps);
1289    HeapSource *hs = gHs;
1290    size_t heapBytes = 0;
1291    for (size_t i = 0; i < hs->numHeaps; i++) {
1292        Heap *heap = &hs->heaps[i];
1293
1294        /* Return the wilderness chunk to the system.
1295         */
1296        mspace_trim(heap->msp, 0);
1297
1298        /* Return any whole free pages to the system.
1299         */
1300        bytesTrimmed[i] = 0;
1301        mspace_walk_free_pages(heap->msp, releasePagesInRange,
1302                               &bytesTrimmed[i]);
1303        heapBytes += bytesTrimmed[i];
1304    }
1305
1306    /* Same for the native heap.
1307     */
1308    dlmalloc_trim(0);
1309    size_t nativeBytes = 0;
1310    dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
1311
1312    LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
1313            heapBytes, nativeBytes, heapBytes + nativeBytes);
1314}
1315
1316/*
1317 * Walks over the heap source and passes every allocated and
1318 * free chunk to the callback.
1319 */
1320void dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
1321                                       const void *userptr, size_t userlen,
1322                                       void *arg),
1323                       void *arg)
1324{
1325    HS_BOILERPLATE();
1326
1327    /* Walk the heaps from oldest to newest.
1328     */
1329//TODO: do this in address order
1330    HeapSource *hs = gHs;
1331    for (size_t i = hs->numHeaps; i > 0; --i) {
1332        mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
1333    }
1334}
1335
1336/*
1337 * Gets the number of heaps available in the heap source.
1338 *
1339 * Caller must hold the heap lock, because gHs caches a field
1340 * in gDvm.gcHeap.
1341 */
1342size_t dvmHeapSourceGetNumHeaps()
1343{
1344    HS_BOILERPLATE();
1345
1346    return gHs->numHeaps;
1347}
1348
1349void *dvmHeapSourceGetImmuneLimit(bool isPartial)
1350{
1351    if (isPartial) {
1352        return hs2heap(gHs)->base;
1353    } else {
1354        return NULL;
1355    }
1356}
1357