HeapSource.cpp revision fe9052edaf6bebbccaac5a9fb607012778d0dd74
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <cutils/mspace.h>
18#include <stdint.h>
19#include <sys/mman.h>
20#include <errno.h>
21
22#define SIZE_MAX UINT_MAX  // TODO: get SIZE_MAX from stdint.h
23
24#include "Dalvik.h"
25#include "alloc/Heap.h"
26#include "alloc/HeapInternal.h"
27#include "alloc/HeapSource.h"
28#include "alloc/HeapBitmap.h"
29#include "alloc/HeapBitmapInlines.h"
30
31// TODO: find a real header file for these.
32extern "C" int dlmalloc_trim(size_t);
33extern "C" void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
34
35static void snapIdealFootprint();
36static void setIdealFootprint(size_t max);
37static size_t getMaximumSize(const HeapSource *hs);
38static void trimHeaps();
39
40#define HEAP_UTILIZATION_MAX        1024
41#define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
42#define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
43#define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
44
45/* Number of seconds to wait after a GC before performing a heap trim
46 * operation to reclaim unused pages.
47 */
48#define HEAP_TRIM_IDLE_TIME_SECONDS 5
49
50/* Start a concurrent collection when free memory falls under this
51 * many bytes.
52 */
53#define CONCURRENT_START (128 << 10)
54
55/* The next GC will not be concurrent when free memory after a GC is
56 * under this many bytes.
57 */
58#define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
59
60#define HS_BOILERPLATE() \
61    do { \
62        assert(gDvm.gcHeap != NULL); \
63        assert(gDvm.gcHeap->heapSource != NULL); \
64        assert(gHs == gDvm.gcHeap->heapSource); \
65    } while (0)
66
67struct Heap {
68    /* The mspace to allocate from.
69     */
70    mspace msp;
71
72    /* The largest size that this heap is allowed to grow to.
73     */
74    size_t maximumSize;
75
76    /* Number of bytes allocated from this mspace for objects,
77     * including any overhead.  This value is NOT exact, and
78     * should only be used as an input for certain heuristics.
79     */
80    size_t bytesAllocated;
81
82    /* Number of bytes allocated from this mspace at which a
83     * concurrent garbage collection will be started.
84     */
85    size_t concurrentStartBytes;
86
87    /* Number of objects currently allocated from this mspace.
88     */
89    size_t objectsAllocated;
90
91    /*
92     * The lowest address of this heap, inclusive.
93     */
94    char *base;
95
96    /*
97     * The highest address of this heap, exclusive.
98     */
99    char *limit;
100};
101
102struct HeapSource {
103    /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
104     */
105    size_t targetUtilization;
106
107    /* The starting heap size.
108     */
109    size_t startSize;
110
111    /* The largest that the heap source as a whole is allowed to grow.
112     */
113    size_t maximumSize;
114
115    /*
116     * The largest size we permit the heap to grow.  This value allows
117     * the user to limit the heap growth below the maximum size.  This
118     * is a work around until we can dynamically set the maximum size.
119     * This value can range between the starting size and the maximum
120     * size but should never be set below the current footprint of the
121     * heap.
122     */
123    size_t growthLimit;
124
125    /* The desired max size of the heap source as a whole.
126     */
127    size_t idealSize;
128
129    /* The maximum number of bytes allowed to be allocated from the
130     * active heap before a GC is forced.  This is used to "shrink" the
131     * heap in lieu of actual compaction.
132     */
133    size_t softLimit;
134
135    /* The heaps; heaps[0] is always the active heap,
136     * which new objects should be allocated from.
137     */
138    Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
139
140    /* The current number of heaps.
141     */
142    size_t numHeaps;
143
144    /* True if zygote mode was active when the HeapSource was created.
145     */
146    bool sawZygote;
147
148    /*
149     * The base address of the virtual memory reservation.
150     */
151    char *heapBase;
152
153    /*
154     * The length in bytes of the virtual memory reservation.
155     */
156    size_t heapLength;
157
158    /*
159     * The live object bitmap.
160     */
161    HeapBitmap liveBits;
162
163    /*
164     * The mark bitmap.
165     */
166    HeapBitmap markBits;
167
168    /*
169     * State for the GC daemon.
170     */
171    bool hasGcThread;
172    pthread_t gcThread;
173    bool gcThreadShutdown;
174    pthread_mutex_t gcThreadMutex;
175    pthread_cond_t gcThreadCond;
176    bool gcThreadTrimNeeded;
177};
178
179#define hs2heap(hs_) (&((hs_)->heaps[0]))
180
181/*
182 * Returns true iff a soft limit is in effect for the active heap.
183 */
184static bool isSoftLimited(const HeapSource *hs)
185{
186    /* softLimit will be either SIZE_MAX or the limit for the
187     * active mspace.  idealSize can be greater than softLimit
188     * if there is more than one heap.  If there is only one
189     * heap, a non-SIZE_MAX softLimit should always be the same
190     * as idealSize.
191     */
192    return hs->softLimit <= hs->idealSize;
193}
194
195/*
196 * Returns approximately the maximum number of bytes allowed to be
197 * allocated from the active heap before a GC is forced.
198 */
199static size_t getAllocLimit(const HeapSource *hs)
200{
201    if (isSoftLimited(hs)) {
202        return hs->softLimit;
203    } else {
204        return mspace_max_allowed_footprint(hs2heap(hs)->msp);
205    }
206}
207
208/*
209 * Returns the current footprint of all heaps.  If includeActive
210 * is false, don't count the heap at index 0.
211 */
212static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive)
213{
214    size_t footprint = 0;
215    size_t i;
216
217    if (includeActive) {
218        i = 0;
219    } else {
220        i = 1;
221    }
222    for (/* i = i */; i < hs->numHeaps; i++) {
223//TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
224        footprint += mspace_footprint(hs->heaps[i].msp);
225    }
226    return footprint;
227}
228
229/*
230 * Returns the heap that <ptr> could have come from, or NULL
231 * if it could not have come from any heap.
232 */
233static Heap *ptr2heap(const HeapSource *hs, const void *ptr)
234{
235    const size_t numHeaps = hs->numHeaps;
236
237    if (ptr != NULL) {
238        for (size_t i = 0; i < numHeaps; i++) {
239            const Heap *const heap = &hs->heaps[i];
240
241            if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
242                return (Heap *)heap;
243            }
244        }
245    }
246    return NULL;
247}
248
249/*
250 * Functions to update heapSource->bytesAllocated when an object
251 * is allocated or freed.  mspace_usable_size() will give
252 * us a much more accurate picture of heap utilization than
253 * the requested byte sizes would.
254 *
255 * These aren't exact, and should not be treated as such.
256 */
257static void countAllocation(Heap *heap, const void *ptr)
258{
259    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
260
261    heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
262            HEAP_SOURCE_CHUNK_OVERHEAD;
263    heap->objectsAllocated++;
264    HeapSource* hs = gDvm.gcHeap->heapSource;
265    dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
266
267    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
268}
269
270static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
271{
272    size_t delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
273    assert(delta > 0);
274    if (delta < heap->bytesAllocated) {
275        heap->bytesAllocated -= delta;
276    } else {
277        heap->bytesAllocated = 0;
278    }
279    HeapSource* hs = gDvm.gcHeap->heapSource;
280    dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
281    if (heap->objectsAllocated > 0) {
282        heap->objectsAllocated--;
283    }
284    *numBytes += delta;
285}
286
287static HeapSource *gHs = NULL;
288
289static mspace createMspace(void *base, size_t startSize, size_t maximumSize)
290{
291    /* Create an unlocked dlmalloc mspace to use as
292     * a heap source.
293     *
294     * We start off reserving startSize / 2 bytes but
295     * letting the heap grow to startSize.  This saves
296     * memory in the case where a process uses even less
297     * than the starting size.
298     */
299    LOGV_HEAP("Creating VM heap of size %zu", startSize);
300    errno = 0;
301
302    mspace msp = create_contiguous_mspace_with_base(startSize/2,
303            maximumSize, /*locked=*/false, base);
304    if (msp != NULL) {
305        /* Don't let the heap grow past the starting size without
306         * our intervention.
307         */
308        mspace_set_max_allowed_footprint(msp, startSize);
309    } else {
310        /* There's no guarantee that errno has meaning when the call
311         * fails, but it often does.
312         */
313        LOGE_HEAP("Can't create VM heap of size (%zu,%zu): %s",
314            startSize/2, maximumSize, strerror(errno));
315    }
316
317    return msp;
318}
319
320/*
321 * Add the initial heap.  Returns false if the initial heap was
322 * already added to the heap source.
323 */
324static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
325{
326    assert(hs != NULL);
327    assert(msp != NULL);
328    if (hs->numHeaps != 0) {
329        return false;
330    }
331    hs->heaps[0].msp = msp;
332    hs->heaps[0].maximumSize = maximumSize;
333    hs->heaps[0].concurrentStartBytes = SIZE_MAX;
334    hs->heaps[0].base = hs->heapBase;
335    hs->heaps[0].limit = hs->heapBase + hs->heaps[0].maximumSize;
336    hs->numHeaps = 1;
337    return true;
338}
339
340/*
341 * Adds an additional heap to the heap source.  Returns false if there
342 * are too many heaps or insufficient free space to add another heap.
343 */
344static bool addNewHeap(HeapSource *hs)
345{
346    Heap heap;
347
348    assert(hs != NULL);
349    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
350        LOGE("Attempt to create too many heaps (%zd >= %zd)",
351                hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
352        dvmAbort();
353        return false;
354    }
355
356    memset(&heap, 0, sizeof(heap));
357
358    /*
359     * Heap storage comes from a common virtual memory reservation.
360     * The new heap will start on the page after the old heap.
361     */
362    void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
363    char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
364    size_t overhead = base - hs->heaps[0].base;
365    assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
366
367    if (overhead + HEAP_MIN_FREE >= hs->maximumSize) {
368        LOGE_HEAP("No room to create any more heaps "
369                  "(%zd overhead, %zd max)",
370                  overhead, hs->maximumSize);
371        return false;
372    }
373
374    heap.maximumSize = hs->growthLimit - overhead;
375    heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
376    heap.base = base;
377    heap.limit = heap.base + heap.maximumSize;
378    heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead);
379    if (heap.msp == NULL) {
380        return false;
381    }
382
383    /* Don't let the soon-to-be-old heap grow any further.
384     */
385    hs->heaps[0].maximumSize = overhead;
386    hs->heaps[0].limit = base;
387    mspace msp = hs->heaps[0].msp;
388    mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
389
390    /* Put the new heap in the list, at heaps[0].
391     * Shift existing heaps down.
392     */
393    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
394    hs->heaps[0] = heap;
395    hs->numHeaps++;
396
397    return true;
398}
399
400/*
401 * The garbage collection daemon.  Initiates a concurrent collection
402 * when signaled.  Also periodically trims the heaps when a few seconds
403 * have elapsed since the last concurrent GC.
404 */
405static void *gcDaemonThread(void* arg)
406{
407    dvmChangeStatus(NULL, THREAD_VMWAIT);
408    dvmLockMutex(&gHs->gcThreadMutex);
409    while (gHs->gcThreadShutdown != true) {
410        bool trim = false;
411        if (gHs->gcThreadTrimNeeded) {
412            int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex,
413                    HEAP_TRIM_IDLE_TIME_SECONDS, 0);
414            if (result == ETIMEDOUT) {
415                /* Timed out waiting for a GC request, schedule a heap trim. */
416                trim = true;
417            }
418        } else {
419            dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
420        }
421
422        dvmLockHeap();
423        /*
424         * Another thread may have started a concurrent garbage
425         * collection before we were scheduled.  Check for this
426         * condition before proceeding.
427         */
428        if (!gDvm.gcHeap->gcRunning) {
429            dvmChangeStatus(NULL, THREAD_RUNNING);
430            if (trim) {
431                trimHeaps();
432                gHs->gcThreadTrimNeeded = false;
433            } else {
434                dvmCollectGarbageInternal(GC_CONCURRENT);
435                gHs->gcThreadTrimNeeded = true;
436            }
437            dvmChangeStatus(NULL, THREAD_VMWAIT);
438        }
439        dvmUnlockHeap();
440    }
441    dvmChangeStatus(NULL, THREAD_RUNNING);
442    return NULL;
443}
444
445static bool gcDaemonStartup()
446{
447    dvmInitMutex(&gHs->gcThreadMutex);
448    pthread_cond_init(&gHs->gcThreadCond, NULL);
449    gHs->gcThreadShutdown = false;
450    gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
451                                               gcDaemonThread, NULL);
452    return gHs->hasGcThread;
453}
454
455static void gcDaemonShutdown()
456{
457    if (gHs->hasGcThread) {
458        dvmLockMutex(&gHs->gcThreadMutex);
459        gHs->gcThreadShutdown = true;
460        dvmSignalCond(&gHs->gcThreadCond);
461        dvmUnlockMutex(&gHs->gcThreadMutex);
462        pthread_join(gHs->gcThread, NULL);
463    }
464}
465
466/*
467 * Create a stack big enough for the worst possible case, where the
468 * heap is perfectly full of the smallest object.
469 * TODO: be better about memory usage; use a smaller stack with
470 *       overflow detection and recovery.
471 */
472static bool allocMarkStack(GcMarkStack *stack, size_t maximumSize)
473{
474    const char *name = "dalvik-mark-stack";
475    void *addr;
476
477    assert(stack != NULL);
478    stack->length = maximumSize * sizeof(Object*) /
479        (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
480    addr = dvmAllocRegion(stack->length, PROT_READ | PROT_WRITE, name);
481    if (addr == NULL) {
482        return false;
483    }
484    stack->base = (const Object **)addr;
485    stack->limit = (const Object **)((char *)addr + stack->length);
486    stack->top = NULL;
487    madvise(stack->base, stack->length, MADV_DONTNEED);
488    return true;
489}
490
491static void freeMarkStack(GcMarkStack *stack)
492{
493    assert(stack != NULL);
494    munmap(stack->base, stack->length);
495    memset(stack, 0, sizeof(*stack));
496}
497
498/*
499 * Initializes the heap source; must be called before any other
500 * dvmHeapSource*() functions.  Returns a GcHeap structure
501 * allocated from the heap source.
502 */
503GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
504                             size_t growthLimit)
505{
506    GcHeap *gcHeap;
507    HeapSource *hs;
508    mspace msp;
509    size_t length;
510    void *base;
511
512    assert(gHs == NULL);
513
514    if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
515        LOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
516             startSize, maximumSize, growthLimit);
517        return NULL;
518    }
519
520    /*
521     * Allocate a contiguous region of virtual memory to subdivided
522     * among the heaps managed by the garbage collector.
523     */
524    length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
525    base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
526    if (base == NULL) {
527        return NULL;
528    }
529
530    /* Create an unlocked dlmalloc mspace to use as
531     * a heap source.
532     */
533    msp = createMspace(base, startSize, maximumSize);
534    if (msp == NULL) {
535        goto fail;
536    }
537
538    gcHeap = (GcHeap *)malloc(sizeof(*gcHeap));
539    if (gcHeap == NULL) {
540        LOGE_HEAP("Can't allocate heap descriptor");
541        goto fail;
542    }
543    memset(gcHeap, 0, sizeof(*gcHeap));
544
545    hs = (HeapSource *)malloc(sizeof(*hs));
546    if (hs == NULL) {
547        LOGE_HEAP("Can't allocate heap source");
548        free(gcHeap);
549        goto fail;
550    }
551    memset(hs, 0, sizeof(*hs));
552
553    hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
554    hs->startSize = startSize;
555    hs->maximumSize = maximumSize;
556    hs->growthLimit = growthLimit;
557    hs->idealSize = startSize;
558    hs->softLimit = SIZE_MAX;    // no soft limit at first
559    hs->numHeaps = 0;
560    hs->sawZygote = gDvm.zygote;
561    hs->hasGcThread = false;
562    hs->heapBase = (char *)base;
563    hs->heapLength = length;
564    if (!addInitialHeap(hs, msp, growthLimit)) {
565        LOGE_HEAP("Can't add initial heap");
566        goto fail;
567    }
568    if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
569        LOGE_HEAP("Can't create liveBits");
570        goto fail;
571    }
572    if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
573        LOGE_HEAP("Can't create markBits");
574        dvmHeapBitmapDelete(&hs->liveBits);
575        goto fail;
576    }
577    if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
578        LOGE("Can't create markStack");
579        dvmHeapBitmapDelete(&hs->markBits);
580        dvmHeapBitmapDelete(&hs->liveBits);
581        goto fail;
582    }
583    gcHeap->markContext.bitmap = &hs->markBits;
584    gcHeap->heapSource = hs;
585
586    gHs = hs;
587    return gcHeap;
588
589fail:
590    munmap(base, length);
591    return NULL;
592}
593
594bool dvmHeapSourceStartupAfterZygote()
595{
596    return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
597}
598
599/*
600 * This is called while in zygote mode, right before we fork() for the
601 * first time.  We create a heap for all future zygote process allocations,
602 * in an attempt to avoid touching pages in the zygote heap.  (This would
603 * probably be unnecessary if we had a compacting GC -- the source of our
604 * troubles is small allocations filling in the gaps from larger ones.)
605 */
606bool dvmHeapSourceStartupBeforeFork()
607{
608    HS_BOILERPLATE();
609
610    assert(gDvm.zygote);
611
612    if (!gDvm.newZygoteHeapAllocated) {
613        /* Create a new heap for post-fork zygote allocations.  We only
614         * try once, even if it fails.
615         */
616        LOGV("Splitting out new zygote heap");
617        gDvm.newZygoteHeapAllocated = true;
618        dvmClearCardTable();
619        return addNewHeap(gHs);
620    }
621    return true;
622}
623
624void dvmHeapSourceThreadShutdown()
625{
626    if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
627        gcDaemonShutdown();
628    }
629}
630
631/*
632 * Tears down the entire GcHeap structure and all of the substructures
633 * attached to it.  This call has the side effect of setting the given
634 * gcHeap pointer and gHs to NULL.
635 */
636void dvmHeapSourceShutdown(GcHeap **gcHeap)
637{
638    assert(gcHeap != NULL);
639    if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
640        HeapSource *hs = (*gcHeap)->heapSource;
641        dvmHeapBitmapDelete(&hs->liveBits);
642        dvmHeapBitmapDelete(&hs->markBits);
643        freeMarkStack(&(*gcHeap)->markContext.stack);
644        munmap(hs->heapBase, hs->heapLength);
645        free(hs);
646        gHs = NULL;
647        free(*gcHeap);
648        *gcHeap = NULL;
649    }
650}
651
652/*
653 * Gets the begining of the allocation for the HeapSource.
654 */
655void *dvmHeapSourceGetBase()
656{
657    return gHs->heapBase;
658}
659
660/*
661 * Returns the requested value. If the per-heap stats are requested, fill
662 * them as well.
663 *
664 * Caller must hold the heap lock.
665 */
666size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[],
667                             size_t arrayLen)
668{
669    HeapSource *hs = gHs;
670    size_t value = 0;
671    size_t total = 0;
672
673    HS_BOILERPLATE();
674
675    assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
676    for (size_t i = 0; i < hs->numHeaps; i++) {
677        Heap *const heap = &hs->heaps[i];
678
679        switch (spec) {
680        case HS_FOOTPRINT:
681            value = mspace_footprint(heap->msp);
682            break;
683        case HS_ALLOWED_FOOTPRINT:
684            value = mspace_max_allowed_footprint(heap->msp);
685            break;
686        case HS_BYTES_ALLOCATED:
687            value = heap->bytesAllocated;
688            break;
689        case HS_OBJECTS_ALLOCATED:
690            value = heap->objectsAllocated;
691            break;
692        default:
693            // quiet gcc
694            break;
695        }
696        if (perHeapStats) {
697            perHeapStats[i] = value;
698        }
699        total += value;
700    }
701    return total;
702}
703
704void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, uintptr_t *limit,
705                             size_t numHeaps)
706{
707    HeapSource *hs = gHs;
708
709    HS_BOILERPLATE();
710
711    assert(numHeaps <= hs->numHeaps);
712    for (size_t i = 0; i < numHeaps; ++i) {
713        base[i] = (uintptr_t)hs->heaps[i].base;
714        if (max != NULL) {
715            max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
716        }
717        if (limit != NULL) {
718            limit[i] = (uintptr_t)hs->heaps[i].limit;
719        }
720    }
721}
722
723/*
724 * Get the bitmap representing all live objects.
725 */
726HeapBitmap *dvmHeapSourceGetLiveBits()
727{
728    HS_BOILERPLATE();
729
730    return &gHs->liveBits;
731}
732
733/*
734 * Get the bitmap representing all marked objects.
735 */
736HeapBitmap *dvmHeapSourceGetMarkBits()
737{
738    HS_BOILERPLATE();
739
740    return &gHs->markBits;
741}
742
743void dvmHeapSourceSwapBitmaps()
744{
745    HeapBitmap tmp = gHs->liveBits;
746    gHs->liveBits = gHs->markBits;
747    gHs->markBits = tmp;
748}
749
750void dvmHeapSourceZeroMarkBitmap()
751{
752    HS_BOILERPLATE();
753
754    dvmHeapBitmapZero(&gHs->markBits);
755}
756
757void dvmMarkImmuneObjects(const char *immuneLimit)
758{
759    /*
760     * Copy the contents of the live bit vector for immune object
761     * range into the mark bit vector.
762     */
763    /* The only values generated by dvmHeapSourceGetImmuneLimit() */
764    assert(immuneLimit == gHs->heaps[0].base ||
765           immuneLimit == NULL);
766    assert(gHs->liveBits.base == gHs->markBits.base);
767    assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
768    /* heap[0] is never immune */
769    assert(gHs->heaps[0].base >= immuneLimit);
770    assert(gHs->heaps[0].limit > immuneLimit);
771
772    for (size_t i = 1; i < gHs->numHeaps; ++i) {
773        if (gHs->heaps[i].base < immuneLimit) {
774            assert(gHs->heaps[i].limit <= immuneLimit);
775            /* Compute the number of words to copy in the bitmap. */
776            size_t index = HB_OFFSET_TO_INDEX(
777                (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
778            /* Compute the starting offset in the live and mark bits. */
779            char *src = (char *)(gHs->liveBits.bits + index);
780            char *dst = (char *)(gHs->markBits.bits + index);
781            /* Compute the number of bytes of the live bitmap to copy. */
782            size_t length = HB_OFFSET_TO_BYTE_INDEX(
783                gHs->heaps[i].limit - gHs->heaps[i].base);
784            /* Do the copy. */
785            memcpy(dst, src, length);
786            /* Make sure max points to the address of the highest set bit. */
787            if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
788                gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
789            }
790        }
791    }
792}
793
794/*
795 * Allocates <n> bytes of zeroed data.
796 */
797void* dvmHeapSourceAlloc(size_t n)
798{
799    HS_BOILERPLATE();
800
801    HeapSource *hs = gHs;
802    Heap* heap = hs2heap(hs);
803    if (heap->bytesAllocated + n > hs->softLimit) {
804        /*
805         * This allocation would push us over the soft limit; act as
806         * if the heap is full.
807         */
808        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
809                  FRACTIONAL_MB(hs->softLimit), n);
810        return NULL;
811    }
812    void* ptr = mspace_calloc(heap->msp, 1, n);
813    if (ptr == NULL) {
814        return NULL;
815    }
816    countAllocation(heap, ptr);
817    /*
818     * Check to see if a concurrent GC should be initiated.
819     */
820    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
821        /*
822         * The garbage collector thread is already running or has yet
823         * to be started.  Do nothing.
824         */
825        return ptr;
826    }
827    if (heap->bytesAllocated > heap->concurrentStartBytes) {
828        /*
829         * We have exceeded the allocation threshold.  Wake up the
830         * garbage collector.
831         */
832        dvmSignalCond(&gHs->gcThreadCond);
833    }
834    return ptr;
835}
836
837/* Remove any hard limits, try to allocate, and shrink back down.
838 * Last resort when trying to allocate an object.
839 */
840static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
841{
842    /* Grow as much as possible, but don't let the real footprint
843     * go over the absolute max.
844     */
845    size_t max = heap->maximumSize;
846
847    mspace_set_max_allowed_footprint(heap->msp, max);
848    void* ptr = dvmHeapSourceAlloc(n);
849
850    /* Shrink back down as small as possible.  Our caller may
851     * readjust max_allowed to a more appropriate value.
852     */
853    mspace_set_max_allowed_footprint(heap->msp,
854                                     mspace_footprint(heap->msp));
855    return ptr;
856}
857
858/*
859 * Allocates <n> bytes of zeroed data, growing as much as possible
860 * if necessary.
861 */
862void* dvmHeapSourceAllocAndGrow(size_t n)
863{
864    HS_BOILERPLATE();
865
866    HeapSource *hs = gHs;
867    Heap* heap = hs2heap(hs);
868    void* ptr = dvmHeapSourceAlloc(n);
869    if (ptr != NULL) {
870        return ptr;
871    }
872
873    size_t oldIdealSize = hs->idealSize;
874    if (isSoftLimited(hs)) {
875        /* We're soft-limited.  Try removing the soft limit to
876         * see if we can allocate without actually growing.
877         */
878        hs->softLimit = SIZE_MAX;
879        ptr = dvmHeapSourceAlloc(n);
880        if (ptr != NULL) {
881            /* Removing the soft limit worked;  fix things up to
882             * reflect the new effective ideal size.
883             */
884            snapIdealFootprint();
885            return ptr;
886        }
887        // softLimit intentionally left at SIZE_MAX.
888    }
889
890    /* We're not soft-limited.  Grow the heap to satisfy the request.
891     * If this call fails, no footprints will have changed.
892     */
893    ptr = heapAllocAndGrow(hs, heap, n);
894    if (ptr != NULL) {
895        /* The allocation succeeded.  Fix up the ideal size to
896         * reflect any footprint modifications that had to happen.
897         */
898        snapIdealFootprint();
899    } else {
900        /* We just couldn't do it.  Restore the original ideal size,
901         * fixing up softLimit if necessary.
902         */
903        setIdealFootprint(oldIdealSize);
904    }
905    return ptr;
906}
907
908/*
909 * Frees the first numPtrs objects in the ptrs list and returns the
910 * amount of reclaimed storage. The list must contain addresses all in
911 * the same mspace, and must be in increasing order. This implies that
912 * there are no duplicates, and no entries are NULL.
913 */
914size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
915{
916    HS_BOILERPLATE();
917
918    if (numPtrs == 0) {
919        return 0;
920    }
921
922    assert(ptrs != NULL);
923    assert(*ptrs != NULL);
924    Heap* heap = ptr2heap(gHs, *ptrs);
925    size_t numBytes = 0;
926    if (heap != NULL) {
927        mspace msp = heap->msp;
928        // Calling mspace_free on shared heaps disrupts sharing too
929        // much. For heap[0] -- the 'active heap' -- we call
930        // mspace_free, but on the other heaps we only do some
931        // accounting.
932        if (heap == gHs->heaps) {
933            // mspace_merge_objects takes two allocated objects, and
934            // if the second immediately follows the first, will merge
935            // them, returning a larger object occupying the same
936            // memory. This is a local operation, and doesn't require
937            // dlmalloc to manipulate any freelists. It's pretty
938            // inexpensive compared to free().
939
940            // ptrs is an array of objects all in memory order, and if
941            // client code has been allocating lots of short-lived
942            // objects, this is likely to contain runs of objects all
943            // now garbage, and thus highly amenable to this optimization.
944
945            // Unroll the 0th iteration around the loop below,
946            // countFree ptrs[0] and initializing merged.
947            assert(ptrs[0] != NULL);
948            assert(ptr2heap(gHs, ptrs[0]) == heap);
949            countFree(heap, ptrs[0], &numBytes);
950            void *merged = ptrs[0];
951            for (size_t i = 1; i < numPtrs; i++) {
952                assert(merged != NULL);
953                assert(ptrs[i] != NULL);
954                assert((intptr_t)merged < (intptr_t)ptrs[i]);
955                assert(ptr2heap(gHs, ptrs[i]) == heap);
956                countFree(heap, ptrs[i], &numBytes);
957                // Try to merge. If it works, merged now includes the
958                // memory of ptrs[i]. If it doesn't, free merged, and
959                // see if ptrs[i] starts a new run of adjacent
960                // objects to merge.
961                if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
962                    mspace_free(msp, merged);
963                    merged = ptrs[i];
964                }
965            }
966            assert(merged != NULL);
967            mspace_free(msp, merged);
968        } else {
969            // This is not an 'active heap'. Only do the accounting.
970            for (size_t i = 0; i < numPtrs; i++) {
971                assert(ptrs[i] != NULL);
972                assert(ptr2heap(gHs, ptrs[i]) == heap);
973                countFree(heap, ptrs[i], &numBytes);
974            }
975        }
976    }
977    return numBytes;
978}
979
980/*
981 * Returns true iff <ptr> is in the heap source.
982 */
983bool dvmHeapSourceContainsAddress(const void *ptr)
984{
985    HS_BOILERPLATE();
986
987    return (dvmHeapBitmapCoversAddress(&gHs->liveBits, ptr));
988}
989
990/*
991 * Returns true iff <ptr> was allocated from the heap source.
992 */
993bool dvmHeapSourceContains(const void *ptr)
994{
995    HS_BOILERPLATE();
996
997    if (dvmHeapSourceContainsAddress(ptr)) {
998        return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
999    }
1000    return false;
1001}
1002
1003bool dvmIsZygoteObject(const Object* obj)
1004{
1005    HeapSource *hs = gHs;
1006
1007    HS_BOILERPLATE();
1008
1009    if (dvmHeapSourceContains(obj) && hs->sawZygote) {
1010        Heap *heap = ptr2heap(hs, obj);
1011        if (heap != NULL) {
1012            /* If the object is not in the active heap, we assume that
1013             * it was allocated as part of zygote.
1014             */
1015            return heap != hs->heaps;
1016        }
1017    }
1018    /* The pointer is outside of any known heap, or we are not
1019     * running in zygote mode.
1020     */
1021    return false;
1022}
1023
1024/*
1025 * Returns the number of usable bytes in an allocated chunk; the size
1026 * may be larger than the size passed to dvmHeapSourceAlloc().
1027 */
1028size_t dvmHeapSourceChunkSize(const void *ptr)
1029{
1030    HS_BOILERPLATE();
1031
1032    Heap* heap = ptr2heap(gHs, ptr);
1033    if (heap != NULL) {
1034        return mspace_usable_size(heap->msp, ptr);
1035    }
1036    return 0;
1037}
1038
1039/*
1040 * Returns the number of bytes that the heap source has allocated
1041 * from the system using sbrk/mmap, etc.
1042 *
1043 * Caller must hold the heap lock.
1044 */
1045size_t dvmHeapSourceFootprint()
1046{
1047    HS_BOILERPLATE();
1048
1049//TODO: include size of bitmaps?
1050    return oldHeapOverhead(gHs, true);
1051}
1052
1053static size_t getMaximumSize(const HeapSource *hs)
1054{
1055    return hs->growthLimit;
1056}
1057
1058/*
1059 * Returns the current maximum size of the heap source respecting any
1060 * growth limits.
1061 */
1062size_t dvmHeapSourceGetMaximumSize()
1063{
1064    HS_BOILERPLATE();
1065    return getMaximumSize(gHs);
1066}
1067
1068/*
1069 * Removes any growth limits.  Allows the user to allocate up to the
1070 * maximum heap size.
1071 */
1072void dvmClearGrowthLimit()
1073{
1074    HS_BOILERPLATE();
1075    dvmLockHeap();
1076    dvmWaitForConcurrentGcToComplete();
1077    gHs->growthLimit = gHs->maximumSize;
1078    size_t overhead = oldHeapOverhead(gHs, false);
1079    gHs->heaps[0].maximumSize = gHs->maximumSize - overhead;
1080    gHs->heaps[0].limit = gHs->heaps[0].base + gHs->heaps[0].maximumSize;
1081    dvmUnlockHeap();
1082}
1083
1084/*
1085 * Return the real bytes used by old heaps plus the soft usage of the
1086 * current heap.  When a soft limit is in effect, this is effectively
1087 * what it's compared against (though, in practice, it only looks at
1088 * the current heap).
1089 */
1090static size_t getSoftFootprint(bool includeActive)
1091{
1092    HS_BOILERPLATE();
1093
1094    HeapSource *hs = gHs;
1095    size_t ret = oldHeapOverhead(hs, false);
1096    if (includeActive) {
1097        ret += hs->heaps[0].bytesAllocated;
1098    }
1099
1100    return ret;
1101}
1102
1103/*
1104 * Gets the maximum number of bytes that the heap source is allowed
1105 * to allocate from the system.
1106 */
1107size_t dvmHeapSourceGetIdealFootprint()
1108{
1109    HeapSource *hs = gHs;
1110
1111    HS_BOILERPLATE();
1112
1113    return hs->idealSize;
1114}
1115
1116/*
1117 * Sets the soft limit, handling any necessary changes to the allowed
1118 * footprint of the active heap.
1119 */
1120static void setSoftLimit(HeapSource *hs, size_t softLimit)
1121{
1122    /* Compare against the actual footprint, rather than the
1123     * max_allowed, because the heap may not have grown all the
1124     * way to the allowed size yet.
1125     */
1126    mspace msp = hs->heaps[0].msp;
1127    size_t currentHeapSize = mspace_footprint(msp);
1128    if (softLimit < currentHeapSize) {
1129        /* Don't let the heap grow any more, and impose a soft limit.
1130         */
1131        mspace_set_max_allowed_footprint(msp, currentHeapSize);
1132        hs->softLimit = softLimit;
1133    } else {
1134        /* Let the heap grow to the requested max, and remove any
1135         * soft limit, if set.
1136         */
1137        mspace_set_max_allowed_footprint(msp, softLimit);
1138        hs->softLimit = SIZE_MAX;
1139    }
1140}
1141
1142/*
1143 * Sets the maximum number of bytes that the heap source is allowed
1144 * to allocate from the system.  Clamps to the appropriate maximum
1145 * value.
1146 */
1147static void setIdealFootprint(size_t max)
1148{
1149    HS_BOILERPLATE();
1150
1151    HeapSource *hs = gHs;
1152    size_t maximumSize = getMaximumSize(hs);
1153    if (max > maximumSize) {
1154        LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB",
1155                FRACTIONAL_MB(max),
1156                FRACTIONAL_MB(maximumSize));
1157        max = maximumSize;
1158    }
1159
1160    /* Convert max into a size that applies to the active heap.
1161     * Old heaps will count against the ideal size.
1162     */
1163    size_t overhead = getSoftFootprint(false);
1164    size_t activeMax;
1165    if (overhead < max) {
1166        activeMax = max - overhead;
1167    } else {
1168        activeMax = 0;
1169    }
1170
1171    setSoftLimit(hs, activeMax);
1172    hs->idealSize = max;
1173}
1174
1175/*
1176 * Make the ideal footprint equal to the current footprint.
1177 */
1178static void snapIdealFootprint()
1179{
1180    HS_BOILERPLATE();
1181
1182    setIdealFootprint(getSoftFootprint(true));
1183}
1184
1185/*
1186 * Gets the current ideal heap utilization, represented as a number
1187 * between zero and one.
1188 */
1189float dvmGetTargetHeapUtilization()
1190{
1191    HeapSource *hs = gHs;
1192
1193    HS_BOILERPLATE();
1194
1195    return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1196}
1197
1198/*
1199 * Sets the new ideal heap utilization, represented as a number
1200 * between zero and one.
1201 */
1202void dvmSetTargetHeapUtilization(float newTarget)
1203{
1204    HeapSource *hs = gHs;
1205
1206    HS_BOILERPLATE();
1207
1208    /* Clamp it to a reasonable range.
1209     */
1210    // TODO: This may need some tuning.
1211    if (newTarget < 0.2) {
1212        newTarget = 0.2;
1213    } else if (newTarget > 0.8) {
1214        newTarget = 0.8;
1215    }
1216
1217    hs->targetUtilization =
1218            (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1219    LOGV("Set heap target utilization to %zd/%d (%f)",
1220            hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1221}
1222
1223/*
1224 * Given the size of a live set, returns the ideal heap size given
1225 * the current target utilization and MIN/MAX values.
1226 *
1227 * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
1228 */
1229static size_t getUtilizationTarget(size_t liveSize, size_t targetUtilization)
1230{
1231    /* Use the current target utilization ratio to determine the
1232     * ideal heap size based on the size of the live set.
1233     */
1234    size_t targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
1235
1236    /* Cap the amount of free space, though, so we don't end up
1237     * with, e.g., 8MB of free space when the live set size hits 8MB.
1238     */
1239    if (targetSize > liveSize + HEAP_IDEAL_FREE) {
1240        targetSize = liveSize + HEAP_IDEAL_FREE;
1241    } else if (targetSize < liveSize + HEAP_MIN_FREE) {
1242        targetSize = liveSize + HEAP_MIN_FREE;
1243    }
1244    return targetSize;
1245}
1246
1247/*
1248 * Given the current contents of the active heap, increase the allowed
1249 * heap footprint to match the target utilization ratio.  This
1250 * should only be called immediately after a full mark/sweep.
1251 */
1252void dvmHeapSourceGrowForUtilization()
1253{
1254    HS_BOILERPLATE();
1255
1256    HeapSource *hs = gHs;
1257    Heap* heap = hs2heap(hs);
1258
1259    /* Use the current target utilization ratio to determine the
1260     * ideal heap size based on the size of the live set.
1261     * Note that only the active heap plays any part in this.
1262     *
1263     * Avoid letting the old heaps influence the target free size,
1264     * because they may be full of objects that aren't actually
1265     * in the working set.  Just look at the allocated size of
1266     * the current heap.
1267     */
1268    size_t currentHeapUsed = heap->bytesAllocated;
1269    size_t targetHeapSize =
1270            getUtilizationTarget(currentHeapUsed, hs->targetUtilization);
1271
1272    /* The ideal size includes the old heaps; add overhead so that
1273     * it can be immediately subtracted again in setIdealFootprint().
1274     * If the target heap size would exceed the max, setIdealFootprint()
1275     * will clamp it to a legal value.
1276     */
1277    size_t overhead = getSoftFootprint(false);
1278    setIdealFootprint(targetHeapSize + overhead);
1279
1280    size_t freeBytes = getAllocLimit(hs);
1281    if (freeBytes < CONCURRENT_MIN_FREE) {
1282        /* Not enough free memory to allow a concurrent GC. */
1283        heap->concurrentStartBytes = SIZE_MAX;
1284    } else {
1285        heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
1286    }
1287}
1288
1289/*
1290 * Return free pages to the system.
1291 * TODO: move this somewhere else, especially the native heap part.
1292 */
1293static void releasePagesInRange(void *start, void *end, void *nbytes)
1294{
1295    /* Linux requires that the madvise() start address is page-aligned.
1296    * We also align the end address.
1297    */
1298    start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1299    end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1300    if (start < end) {
1301        size_t length = (char *)end - (char *)start;
1302        madvise(start, length, MADV_DONTNEED);
1303        *(size_t *)nbytes += length;
1304    }
1305}
1306
1307/*
1308 * Return unused memory to the system if possible.
1309 */
1310static void trimHeaps()
1311{
1312    HS_BOILERPLATE();
1313
1314    HeapSource *hs = gHs;
1315    size_t heapBytes = 0;
1316    for (size_t i = 0; i < hs->numHeaps; i++) {
1317        Heap *heap = &hs->heaps[i];
1318
1319        /* Return the wilderness chunk to the system.
1320         */
1321        mspace_trim(heap->msp, 0);
1322
1323        /* Return any whole free pages to the system.
1324         */
1325        mspace_walk_free_pages(heap->msp, releasePagesInRange, &heapBytes);
1326    }
1327
1328    /* Same for the native heap.
1329     */
1330    dlmalloc_trim(0);
1331    size_t nativeBytes = 0;
1332    dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
1333
1334    LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
1335            heapBytes, nativeBytes, heapBytes + nativeBytes);
1336}
1337
1338/*
1339 * Walks over the heap source and passes every allocated and
1340 * free chunk to the callback.
1341 */
1342void dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
1343                                       const void *userptr, size_t userlen,
1344                                       void *arg),
1345                       void *arg)
1346{
1347    HS_BOILERPLATE();
1348
1349    /* Walk the heaps from oldest to newest.
1350     */
1351//TODO: do this in address order
1352    HeapSource *hs = gHs;
1353    for (size_t i = hs->numHeaps; i > 0; --i) {
1354        mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
1355    }
1356}
1357
1358/*
1359 * Gets the number of heaps available in the heap source.
1360 *
1361 * Caller must hold the heap lock, because gHs caches a field
1362 * in gDvm.gcHeap.
1363 */
1364size_t dvmHeapSourceGetNumHeaps()
1365{
1366    HS_BOILERPLATE();
1367
1368    return gHs->numHeaps;
1369}
1370
1371void *dvmHeapSourceGetImmuneLimit(bool isPartial)
1372{
1373    if (isPartial) {
1374        return hs2heap(gHs)->base;
1375    } else {
1376        return NULL;
1377    }
1378}
1379