HeapSource.cpp revision bba37bd191843ef29ef9c7a8839e98b73debfffa
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <stdint.h>
18#include <sys/mman.h>
19#include <errno.h>
20
21#define SIZE_MAX UINT_MAX  // TODO: get SIZE_MAX from stdint.h
22
23#include "Dalvik.h"
24#include "alloc/DlMalloc.h"
25#include "alloc/Heap.h"
26#include "alloc/HeapInternal.h"
27#include "alloc/HeapSource.h"
28#include "alloc/HeapBitmap.h"
29#include "alloc/HeapBitmapInlines.h"
30
31static void snapIdealFootprint();
32static void setIdealFootprint(size_t max);
33static size_t getMaximumSize(const HeapSource *hs);
34static void trimHeaps();
35
36#define HEAP_UTILIZATION_MAX        1024
37#define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
38#define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
39#define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
40
41/* How long to wait after a GC before performing a heap trim
42 * operation to reclaim unused pages.
43 */
44#define HEAP_TRIM_IDLE_TIME_MS (5 * 1000)
45
46/* Start a concurrent collection when free memory falls under this
47 * many bytes.
48 */
49#define CONCURRENT_START (128 << 10)
50
51/* The next GC will not be concurrent when free memory after a GC is
52 * under this many bytes.
53 */
54#define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
55
56#define HS_BOILERPLATE() \
57    do { \
58        assert(gDvm.gcHeap != NULL); \
59        assert(gDvm.gcHeap->heapSource != NULL); \
60        assert(gHs == gDvm.gcHeap->heapSource); \
61    } while (0)
62
63struct Heap {
64    /* The mspace to allocate from.
65     */
66    mspace msp;
67
68    /* The largest size that this heap is allowed to grow to.
69     */
70    size_t maximumSize;
71
72    /* Number of bytes allocated from this mspace for objects,
73     * including any overhead.  This value is NOT exact, and
74     * should only be used as an input for certain heuristics.
75     */
76    size_t bytesAllocated;
77
78    /* Number of bytes allocated from this mspace at which a
79     * concurrent garbage collection will be started.
80     */
81    size_t concurrentStartBytes;
82
83    /* Number of objects currently allocated from this mspace.
84     */
85    size_t objectsAllocated;
86
87    /*
88     * The lowest address of this heap, inclusive.
89     */
90    char *base;
91
92    /*
93     * The highest address of this heap, exclusive.
94     */
95    char *limit;
96
97    /*
98     * If the heap has an mspace, the current high water mark in
99     * allocations requested via dvmHeapSourceMorecore.
100     */
101    char *brk;
102};
103
104struct HeapSource {
105    /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
106     */
107    size_t targetUtilization;
108
109    /* The starting heap size.
110     */
111    size_t startSize;
112
113    /* The largest that the heap source as a whole is allowed to grow.
114     */
115    size_t maximumSize;
116
117    /*
118     * The largest size we permit the heap to grow.  This value allows
119     * the user to limit the heap growth below the maximum size.  This
120     * is a work around until we can dynamically set the maximum size.
121     * This value can range between the starting size and the maximum
122     * size but should never be set below the current footprint of the
123     * heap.
124     */
125    size_t growthLimit;
126
127    /* The desired max size of the heap source as a whole.
128     */
129    size_t idealSize;
130
131    /* The maximum number of bytes allowed to be allocated from the
132     * active heap before a GC is forced.  This is used to "shrink" the
133     * heap in lieu of actual compaction.
134     */
135    size_t softLimit;
136
137    /* The heaps; heaps[0] is always the active heap,
138     * which new objects should be allocated from.
139     */
140    Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
141
142    /* The current number of heaps.
143     */
144    size_t numHeaps;
145
146    /* True if zygote mode was active when the HeapSource was created.
147     */
148    bool sawZygote;
149
150    /*
151     * The base address of the virtual memory reservation.
152     */
153    char *heapBase;
154
155    /*
156     * The length in bytes of the virtual memory reservation.
157     */
158    size_t heapLength;
159
160    /*
161     * The live object bitmap.
162     */
163    HeapBitmap liveBits;
164
165    /*
166     * The mark bitmap.
167     */
168    HeapBitmap markBits;
169
170    /*
171     * State for the GC daemon.
172     */
173    bool hasGcThread;
174    pthread_t gcThread;
175    bool gcThreadShutdown;
176    pthread_mutex_t gcThreadMutex;
177    pthread_cond_t gcThreadCond;
178    bool gcThreadTrimNeeded;
179};
180
181#define hs2heap(hs_) (&((hs_)->heaps[0]))
182
183/*
184 * Returns true iff a soft limit is in effect for the active heap.
185 */
186static bool isSoftLimited(const HeapSource *hs)
187{
188    /* softLimit will be either SIZE_MAX or the limit for the
189     * active mspace.  idealSize can be greater than softLimit
190     * if there is more than one heap.  If there is only one
191     * heap, a non-SIZE_MAX softLimit should always be the same
192     * as idealSize.
193     */
194    return hs->softLimit <= hs->idealSize;
195}
196
197/*
198 * Returns approximately the maximum number of bytes allowed to be
199 * allocated from the active heap before a GC is forced.
200 */
201static size_t getAllocLimit(const HeapSource *hs)
202{
203    if (isSoftLimited(hs)) {
204        return hs->softLimit;
205    } else {
206        return mspace_footprint_limit(hs2heap(hs)->msp);
207    }
208}
209
210/*
211 * Returns the current footprint of all heaps.  If includeActive
212 * is false, don't count the heap at index 0.
213 */
214static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive)
215{
216    size_t footprint = 0;
217    size_t i;
218
219    if (includeActive) {
220        i = 0;
221    } else {
222        i = 1;
223    }
224    for (/* i = i */; i < hs->numHeaps; i++) {
225//TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
226        footprint += mspace_footprint(hs->heaps[i].msp);
227    }
228    return footprint;
229}
230
231/*
232 * Returns the heap that <ptr> could have come from, or NULL
233 * if it could not have come from any heap.
234 */
235static Heap *ptr2heap(const HeapSource *hs, const void *ptr)
236{
237    const size_t numHeaps = hs->numHeaps;
238
239    if (ptr != NULL) {
240        for (size_t i = 0; i < numHeaps; i++) {
241            const Heap *const heap = &hs->heaps[i];
242
243            if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
244                return (Heap *)heap;
245            }
246        }
247    }
248    return NULL;
249}
250
251/*
252 * Functions to update heapSource->bytesAllocated when an object
253 * is allocated or freed.  mspace_usable_size() will give
254 * us a much more accurate picture of heap utilization than
255 * the requested byte sizes would.
256 *
257 * These aren't exact, and should not be treated as such.
258 */
259static void countAllocation(Heap *heap, const void *ptr)
260{
261    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
262
263    heap->bytesAllocated += mspace_usable_size(ptr) +
264            HEAP_SOURCE_CHUNK_OVERHEAD;
265    heap->objectsAllocated++;
266    HeapSource* hs = gDvm.gcHeap->heapSource;
267    dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
268
269    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
270}
271
272static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
273{
274    size_t delta = mspace_usable_size(ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
275    assert(delta > 0);
276    if (delta < heap->bytesAllocated) {
277        heap->bytesAllocated -= delta;
278    } else {
279        heap->bytesAllocated = 0;
280    }
281    HeapSource* hs = gDvm.gcHeap->heapSource;
282    dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
283    if (heap->objectsAllocated > 0) {
284        heap->objectsAllocated--;
285    }
286    *numBytes += delta;
287}
288
289static HeapSource *gHs = NULL;
290
291static mspace createMspace(void* begin, size_t morecoreStart, size_t startingSize)
292{
293    // Clear errno to allow strerror on error.
294    errno = 0;
295    // Allow access to inital pages that will hold mspace.
296    mprotect(begin, morecoreStart, PROT_READ | PROT_WRITE);
297    // Create mspace using our backing storage starting at begin and with a footprint of
298    // morecoreStart. Don't use an internal dlmalloc lock. When morecoreStart bytes of memory are
299    // exhausted morecore will be called.
300    mspace msp = create_mspace_with_base(begin, morecoreStart, false /*locked*/);
301    if (msp != NULL) {
302        // Do not allow morecore requests to succeed beyond the starting size of the heap.
303        mspace_set_footprint_limit(msp, startingSize);
304    } else {
305        ALOGE("create_mspace_with_base failed %s", strerror(errno));
306    }
307    return msp;
308}
309
310/*
311 * Service request from DlMalloc to increase heap size.
312 */
313void* dvmHeapSourceMorecore(void* mspace, intptr_t increment)
314{
315    Heap* heap = NULL;
316    for (size_t i = 0; i < gHs->numHeaps; i++) {
317        if (gHs->heaps[i].msp == mspace) {
318            heap = &gHs->heaps[i];
319            break;
320        }
321    }
322    if (heap == NULL) {
323        ALOGE("Failed to find heap for mspace %p", mspace);
324        dvmAbort();
325    }
326    char* original_brk = heap->brk;
327    if (increment != 0) {
328        char* new_brk = original_brk + increment;
329        if (increment > 0) {
330            // Should never be asked to increase the allocation beyond the capacity of the space.
331            // Enforced by mspace_set_footprint_limit.
332            assert(new_brk <= heap->limit);
333            mprotect(original_brk, increment, PROT_READ | PROT_WRITE);
334        } else {
335            // Should never be asked for negative footprint (ie before base).
336            assert(original_brk + increment > heap->base);
337            // Advise we don't need the pages and protect them.
338            size_t size = -increment;
339            madvise(new_brk, size, MADV_DONTNEED);
340            mprotect(new_brk, size, PROT_NONE);
341        }
342        // Update brk.
343        heap->brk = new_brk;
344    }
345    return original_brk;
346}
347
348const size_t kInitialMorecoreStart = SYSTEM_PAGE_SIZE;
349/*
350 * Add the initial heap.  Returns false if the initial heap was
351 * already added to the heap source.
352 */
353static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
354{
355    assert(hs != NULL);
356    assert(msp != NULL);
357    if (hs->numHeaps != 0) {
358        return false;
359    }
360    hs->heaps[0].msp = msp;
361    hs->heaps[0].maximumSize = maximumSize;
362    hs->heaps[0].concurrentStartBytes = SIZE_MAX;
363    hs->heaps[0].base = hs->heapBase;
364    hs->heaps[0].limit = hs->heapBase + maximumSize;
365    hs->heaps[0].brk = hs->heapBase + kInitialMorecoreStart;
366    hs->numHeaps = 1;
367    return true;
368}
369
370/*
371 * Adds an additional heap to the heap source.  Returns false if there
372 * are too many heaps or insufficient free space to add another heap.
373 */
374static bool addNewHeap(HeapSource *hs)
375{
376    Heap heap;
377
378    assert(hs != NULL);
379    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
380        ALOGE("Attempt to create too many heaps (%zd >= %zd)",
381                hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
382        dvmAbort();
383        return false;
384    }
385
386    memset(&heap, 0, sizeof(heap));
387
388    /*
389     * Heap storage comes from a common virtual memory reservation.
390     * The new heap will start on the page after the old heap.
391     */
392    char *base = hs->heaps[0].brk;
393    size_t overhead = base - hs->heaps[0].base;
394    assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
395
396    if (overhead + HEAP_MIN_FREE >= hs->maximumSize) {
397        LOGE_HEAP("No room to create any more heaps "
398                  "(%zd overhead, %zd max)",
399                  overhead, hs->maximumSize);
400        return false;
401    }
402    size_t morecoreStart = SYSTEM_PAGE_SIZE;
403    heap.maximumSize = hs->growthLimit - overhead;
404    heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
405    heap.base = base;
406    heap.limit = heap.base + heap.maximumSize;
407    heap.brk = heap.base + morecoreStart;
408    heap.msp = createMspace(base, morecoreStart, HEAP_MIN_FREE);
409    if (heap.msp == NULL) {
410        return false;
411    }
412
413    /* Don't let the soon-to-be-old heap grow any further.
414     */
415    hs->heaps[0].maximumSize = overhead;
416    hs->heaps[0].limit = base;
417    mspace_set_footprint_limit(hs->heaps[0].msp, overhead);
418
419    /* Put the new heap in the list, at heaps[0].
420     * Shift existing heaps down.
421     */
422    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
423    hs->heaps[0] = heap;
424    hs->numHeaps++;
425
426    return true;
427}
428
429/*
430 * The garbage collection daemon.  Initiates a concurrent collection
431 * when signaled.  Also periodically trims the heaps when a few seconds
432 * have elapsed since the last concurrent GC.
433 */
434static void *gcDaemonThread(void* arg)
435{
436    dvmChangeStatus(NULL, THREAD_VMWAIT);
437    dvmLockMutex(&gHs->gcThreadMutex);
438    while (gHs->gcThreadShutdown != true) {
439        bool trim = false;
440        if (gHs->gcThreadTrimNeeded) {
441            int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex,
442                    HEAP_TRIM_IDLE_TIME_MS, 0);
443            if (result == ETIMEDOUT) {
444                /* Timed out waiting for a GC request, schedule a heap trim. */
445                trim = true;
446            }
447        } else {
448            dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
449        }
450
451        dvmLockHeap();
452        /*
453         * Another thread may have started a concurrent garbage
454         * collection before we were scheduled.  Check for this
455         * condition before proceeding.
456         */
457        if (!gDvm.gcHeap->gcRunning) {
458            dvmChangeStatus(NULL, THREAD_RUNNING);
459            if (trim) {
460                trimHeaps();
461                gHs->gcThreadTrimNeeded = false;
462            } else {
463                dvmCollectGarbageInternal(GC_CONCURRENT);
464                gHs->gcThreadTrimNeeded = true;
465            }
466            dvmChangeStatus(NULL, THREAD_VMWAIT);
467        }
468        dvmUnlockHeap();
469    }
470    dvmChangeStatus(NULL, THREAD_RUNNING);
471    return NULL;
472}
473
474static bool gcDaemonStartup()
475{
476    dvmInitMutex(&gHs->gcThreadMutex);
477    pthread_cond_init(&gHs->gcThreadCond, NULL);
478    gHs->gcThreadShutdown = false;
479    gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
480                                               gcDaemonThread, NULL);
481    return gHs->hasGcThread;
482}
483
484static void gcDaemonShutdown()
485{
486    if (gHs->hasGcThread) {
487        dvmLockMutex(&gHs->gcThreadMutex);
488        gHs->gcThreadShutdown = true;
489        dvmSignalCond(&gHs->gcThreadCond);
490        dvmUnlockMutex(&gHs->gcThreadMutex);
491        pthread_join(gHs->gcThread, NULL);
492    }
493}
494
495/*
496 * Create a stack big enough for the worst possible case, where the
497 * heap is perfectly full of the smallest object.
498 * TODO: be better about memory usage; use a smaller stack with
499 *       overflow detection and recovery.
500 */
501static bool allocMarkStack(GcMarkStack *stack, size_t maximumSize)
502{
503    const char *name = "dalvik-mark-stack";
504    void *addr;
505
506    assert(stack != NULL);
507    stack->length = maximumSize * sizeof(Object*) /
508        (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
509    addr = dvmAllocRegion(stack->length, PROT_READ | PROT_WRITE, name);
510    if (addr == NULL) {
511        return false;
512    }
513    stack->base = (const Object **)addr;
514    stack->limit = (const Object **)((char *)addr + stack->length);
515    stack->top = NULL;
516    madvise(stack->base, stack->length, MADV_DONTNEED);
517    return true;
518}
519
520static void freeMarkStack(GcMarkStack *stack)
521{
522    assert(stack != NULL);
523    munmap(stack->base, stack->length);
524    memset(stack, 0, sizeof(*stack));
525}
526
527/*
528 * Initializes the heap source; must be called before any other
529 * dvmHeapSource*() functions.  Returns a GcHeap structure
530 * allocated from the heap source.
531 */
532GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
533                             size_t growthLimit)
534{
535    GcHeap *gcHeap;
536    HeapSource *hs;
537    mspace msp;
538    size_t length;
539    void *base;
540
541    assert(gHs == NULL);
542
543    if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
544        ALOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
545             startSize, maximumSize, growthLimit);
546        return NULL;
547    }
548
549    /*
550     * Allocate a contiguous region of virtual memory to subdivided
551     * among the heaps managed by the garbage collector.
552     */
553    length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
554    base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
555    if (base == NULL) {
556        return NULL;
557    }
558
559    /* Create an unlocked dlmalloc mspace to use as
560     * a heap source.
561     */
562    msp = createMspace(base, kInitialMorecoreStart, startSize);
563    if (msp == NULL) {
564        goto fail;
565    }
566
567    gcHeap = (GcHeap *)calloc(1, sizeof(*gcHeap));
568    if (gcHeap == NULL) {
569        LOGE_HEAP("Can't allocate heap descriptor");
570        goto fail;
571    }
572
573    hs = (HeapSource *)calloc(1, sizeof(*hs));
574    if (hs == NULL) {
575        LOGE_HEAP("Can't allocate heap source");
576        free(gcHeap);
577        goto fail;
578    }
579
580    hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
581    hs->startSize = startSize;
582    hs->maximumSize = maximumSize;
583    hs->growthLimit = growthLimit;
584    hs->idealSize = startSize;
585    hs->softLimit = SIZE_MAX;    // no soft limit at first
586    hs->numHeaps = 0;
587    hs->sawZygote = gDvm.zygote;
588    hs->hasGcThread = false;
589    hs->heapBase = (char *)base;
590    hs->heapLength = length;
591    if (!addInitialHeap(hs, msp, growthLimit)) {
592        LOGE_HEAP("Can't add initial heap");
593        goto fail;
594    }
595    if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
596        LOGE_HEAP("Can't create liveBits");
597        goto fail;
598    }
599    if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
600        LOGE_HEAP("Can't create markBits");
601        dvmHeapBitmapDelete(&hs->liveBits);
602        goto fail;
603    }
604    if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
605        ALOGE("Can't create markStack");
606        dvmHeapBitmapDelete(&hs->markBits);
607        dvmHeapBitmapDelete(&hs->liveBits);
608        goto fail;
609    }
610    gcHeap->markContext.bitmap = &hs->markBits;
611    gcHeap->heapSource = hs;
612
613    gHs = hs;
614    return gcHeap;
615
616fail:
617    munmap(base, length);
618    return NULL;
619}
620
621bool dvmHeapSourceStartupAfterZygote()
622{
623    return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
624}
625
626/*
627 * This is called while in zygote mode, right before we fork() for the
628 * first time.  We create a heap for all future zygote process allocations,
629 * in an attempt to avoid touching pages in the zygote heap.  (This would
630 * probably be unnecessary if we had a compacting GC -- the source of our
631 * troubles is small allocations filling in the gaps from larger ones.)
632 */
633bool dvmHeapSourceStartupBeforeFork()
634{
635    HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
636
637    HS_BOILERPLATE();
638
639    assert(gDvm.zygote);
640
641    if (!gDvm.newZygoteHeapAllocated) {
642        /* Create a new heap for post-fork zygote allocations.  We only
643         * try once, even if it fails.
644         */
645        ALOGV("Splitting out new zygote heap");
646        gDvm.newZygoteHeapAllocated = true;
647        return addNewHeap(hs);
648    }
649    return true;
650}
651
652void dvmHeapSourceThreadShutdown()
653{
654    if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
655        gcDaemonShutdown();
656    }
657}
658
659/*
660 * Tears down the entire GcHeap structure and all of the substructures
661 * attached to it.  This call has the side effect of setting the given
662 * gcHeap pointer and gHs to NULL.
663 */
664void dvmHeapSourceShutdown(GcHeap **gcHeap)
665{
666    assert(gcHeap != NULL);
667    if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
668        HeapSource *hs = (*gcHeap)->heapSource;
669        dvmHeapBitmapDelete(&hs->liveBits);
670        dvmHeapBitmapDelete(&hs->markBits);
671        freeMarkStack(&(*gcHeap)->markContext.stack);
672        munmap(hs->heapBase, hs->heapLength);
673        free(hs);
674        gHs = NULL;
675        free(*gcHeap);
676        *gcHeap = NULL;
677    }
678}
679
680/*
681 * Gets the begining of the allocation for the HeapSource.
682 */
683void *dvmHeapSourceGetBase()
684{
685    return gHs->heapBase;
686}
687
688/*
689 * Returns the requested value. If the per-heap stats are requested, fill
690 * them as well.
691 *
692 * Caller must hold the heap lock.
693 */
694size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[],
695                             size_t arrayLen)
696{
697    HeapSource *hs = gHs;
698    size_t value = 0;
699    size_t total = 0;
700
701    HS_BOILERPLATE();
702
703    assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
704    for (size_t i = 0; i < hs->numHeaps; i++) {
705        Heap *const heap = &hs->heaps[i];
706
707        switch (spec) {
708        case HS_FOOTPRINT:
709            value = mspace_footprint(heap->msp);
710            break;
711        case HS_ALLOWED_FOOTPRINT:
712            value = mspace_footprint_limit(heap->msp);
713            break;
714        case HS_BYTES_ALLOCATED:
715            value = heap->bytesAllocated;
716            break;
717        case HS_OBJECTS_ALLOCATED:
718            value = heap->objectsAllocated;
719            break;
720        default:
721            // quiet gcc
722            break;
723        }
724        if (perHeapStats) {
725            perHeapStats[i] = value;
726        }
727        total += value;
728    }
729    return total;
730}
731
732void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, size_t numHeaps)
733{
734    HeapSource *hs = gHs;
735
736    HS_BOILERPLATE();
737
738    assert(numHeaps <= hs->numHeaps);
739    for (size_t i = 0; i < numHeaps; ++i) {
740        base[i] = (uintptr_t)hs->heaps[i].base;
741        max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
742    }
743}
744
745/*
746 * Get the bitmap representing all live objects.
747 */
748HeapBitmap *dvmHeapSourceGetLiveBits()
749{
750    HS_BOILERPLATE();
751
752    return &gHs->liveBits;
753}
754
755/*
756 * Get the bitmap representing all marked objects.
757 */
758HeapBitmap *dvmHeapSourceGetMarkBits()
759{
760    HS_BOILERPLATE();
761
762    return &gHs->markBits;
763}
764
765void dvmHeapSourceSwapBitmaps()
766{
767    HeapBitmap tmp = gHs->liveBits;
768    gHs->liveBits = gHs->markBits;
769    gHs->markBits = tmp;
770}
771
772void dvmHeapSourceZeroMarkBitmap()
773{
774    HS_BOILERPLATE();
775
776    dvmHeapBitmapZero(&gHs->markBits);
777}
778
779void dvmMarkImmuneObjects(const char *immuneLimit)
780{
781    /*
782     * Copy the contents of the live bit vector for immune object
783     * range into the mark bit vector.
784     */
785    /* The only values generated by dvmHeapSourceGetImmuneLimit() */
786    assert(immuneLimit == gHs->heaps[0].base ||
787           immuneLimit == NULL);
788    assert(gHs->liveBits.base == gHs->markBits.base);
789    assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
790    /* heap[0] is never immune */
791    assert(gHs->heaps[0].base >= immuneLimit);
792    assert(gHs->heaps[0].limit > immuneLimit);
793
794    for (size_t i = 1; i < gHs->numHeaps; ++i) {
795        if (gHs->heaps[i].base < immuneLimit) {
796            assert(gHs->heaps[i].limit <= immuneLimit);
797            /* Compute the number of words to copy in the bitmap. */
798            size_t index = HB_OFFSET_TO_INDEX(
799                (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
800            /* Compute the starting offset in the live and mark bits. */
801            char *src = (char *)(gHs->liveBits.bits + index);
802            char *dst = (char *)(gHs->markBits.bits + index);
803            /* Compute the number of bytes of the live bitmap to copy. */
804            size_t length = HB_OFFSET_TO_BYTE_INDEX(
805                gHs->heaps[i].limit - gHs->heaps[i].base);
806            /* Do the copy. */
807            memcpy(dst, src, length);
808            /* Make sure max points to the address of the highest set bit. */
809            if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
810                gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
811            }
812        }
813    }
814}
815
816/*
817 * Allocates <n> bytes of zeroed data.
818 */
819void* dvmHeapSourceAlloc(size_t n)
820{
821    HS_BOILERPLATE();
822
823    HeapSource *hs = gHs;
824    Heap* heap = hs2heap(hs);
825    if (heap->bytesAllocated + n > hs->softLimit) {
826        /*
827         * This allocation would push us over the soft limit; act as
828         * if the heap is full.
829         */
830        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
831                  FRACTIONAL_MB(hs->softLimit), n);
832        return NULL;
833    }
834    void* ptr = mspace_calloc(heap->msp, 1, n);
835    if (ptr == NULL) {
836        return NULL;
837    }
838    countAllocation(heap, ptr);
839    /*
840     * Check to see if a concurrent GC should be initiated.
841     */
842    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
843        /*
844         * The garbage collector thread is already running or has yet
845         * to be started.  Do nothing.
846         */
847        return ptr;
848    }
849    if (heap->bytesAllocated > heap->concurrentStartBytes) {
850        /*
851         * We have exceeded the allocation threshold.  Wake up the
852         * garbage collector.
853         */
854        dvmSignalCond(&gHs->gcThreadCond);
855    }
856    return ptr;
857}
858
859/* Remove any hard limits, try to allocate, and shrink back down.
860 * Last resort when trying to allocate an object.
861 */
862static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
863{
864    /* Grow as much as possible, but don't let the real footprint
865     * go over the absolute max.
866     */
867    size_t max = heap->maximumSize;
868
869    mspace_set_footprint_limit(heap->msp, max);
870    void* ptr = dvmHeapSourceAlloc(n);
871
872    /* Shrink back down as small as possible.  Our caller may
873     * readjust max_allowed to a more appropriate value.
874     */
875    mspace_set_footprint_limit(heap->msp,
876                               mspace_footprint(heap->msp));
877    return ptr;
878}
879
880/*
881 * Allocates <n> bytes of zeroed data, growing as much as possible
882 * if necessary.
883 */
884void* dvmHeapSourceAllocAndGrow(size_t n)
885{
886    HS_BOILERPLATE();
887
888    HeapSource *hs = gHs;
889    Heap* heap = hs2heap(hs);
890    void* ptr = dvmHeapSourceAlloc(n);
891    if (ptr != NULL) {
892        return ptr;
893    }
894
895    size_t oldIdealSize = hs->idealSize;
896    if (isSoftLimited(hs)) {
897        /* We're soft-limited.  Try removing the soft limit to
898         * see if we can allocate without actually growing.
899         */
900        hs->softLimit = SIZE_MAX;
901        ptr = dvmHeapSourceAlloc(n);
902        if (ptr != NULL) {
903            /* Removing the soft limit worked;  fix things up to
904             * reflect the new effective ideal size.
905             */
906            snapIdealFootprint();
907            return ptr;
908        }
909        // softLimit intentionally left at SIZE_MAX.
910    }
911
912    /* We're not soft-limited.  Grow the heap to satisfy the request.
913     * If this call fails, no footprints will have changed.
914     */
915    ptr = heapAllocAndGrow(hs, heap, n);
916    if (ptr != NULL) {
917        /* The allocation succeeded.  Fix up the ideal size to
918         * reflect any footprint modifications that had to happen.
919         */
920        snapIdealFootprint();
921    } else {
922        /* We just couldn't do it.  Restore the original ideal size,
923         * fixing up softLimit if necessary.
924         */
925        setIdealFootprint(oldIdealSize);
926    }
927    return ptr;
928}
929
930/*
931 * Frees the first numPtrs objects in the ptrs list and returns the
932 * amount of reclaimed storage. The list must contain addresses all in
933 * the same mspace, and must be in increasing order. This implies that
934 * there are no duplicates, and no entries are NULL.
935 */
936size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
937{
938    HS_BOILERPLATE();
939
940    if (numPtrs == 0) {
941        return 0;
942    }
943
944    assert(ptrs != NULL);
945    assert(*ptrs != NULL);
946    Heap* heap = ptr2heap(gHs, *ptrs);
947    size_t numBytes = 0;
948    if (heap != NULL) {
949        mspace msp = heap->msp;
950        // Calling mspace_free on shared heaps disrupts sharing too
951        // much. For heap[0] -- the 'active heap' -- we call
952        // mspace_free, but on the other heaps we only do some
953        // accounting.
954        if (heap == gHs->heaps) {
955            // Count freed objects.
956            for (size_t i = 0; i < numPtrs; i++) {
957                assert(ptrs[i] != NULL);
958                assert(ptr2heap(gHs, ptrs[i]) == heap);
959                countFree(heap, ptrs[i], &numBytes);
960            }
961            // Bulk free ptrs.
962            mspace_bulk_free(msp, ptrs, numPtrs);
963        } else {
964            // This is not an 'active heap'. Only do the accounting.
965            for (size_t i = 0; i < numPtrs; i++) {
966                assert(ptrs[i] != NULL);
967                assert(ptr2heap(gHs, ptrs[i]) == heap);
968                countFree(heap, ptrs[i], &numBytes);
969            }
970        }
971    }
972    return numBytes;
973}
974
975/*
976 * Returns true iff <ptr> is in the heap source.
977 */
978bool dvmHeapSourceContainsAddress(const void *ptr)
979{
980    HS_BOILERPLATE();
981
982    return (dvmHeapBitmapCoversAddress(&gHs->liveBits, ptr));
983}
984
985/*
986 * Returns true iff <ptr> was allocated from the heap source.
987 */
988bool dvmHeapSourceContains(const void *ptr)
989{
990    HS_BOILERPLATE();
991
992    if (dvmHeapSourceContainsAddress(ptr)) {
993        return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
994    }
995    return false;
996}
997
998bool dvmIsZygoteObject(const Object* obj)
999{
1000    HeapSource *hs = gHs;
1001
1002    HS_BOILERPLATE();
1003
1004    if (dvmHeapSourceContains(obj) && hs->sawZygote) {
1005        Heap *heap = ptr2heap(hs, obj);
1006        if (heap != NULL) {
1007            /* If the object is not in the active heap, we assume that
1008             * it was allocated as part of zygote.
1009             */
1010            return heap != hs->heaps;
1011        }
1012    }
1013    /* The pointer is outside of any known heap, or we are not
1014     * running in zygote mode.
1015     */
1016    return false;
1017}
1018
1019/*
1020 * Returns the number of usable bytes in an allocated chunk; the size
1021 * may be larger than the size passed to dvmHeapSourceAlloc().
1022 */
1023size_t dvmHeapSourceChunkSize(const void *ptr)
1024{
1025    HS_BOILERPLATE();
1026
1027    Heap* heap = ptr2heap(gHs, ptr);
1028    if (heap != NULL) {
1029        return mspace_usable_size(ptr);
1030    }
1031    return 0;
1032}
1033
1034/*
1035 * Returns the number of bytes that the heap source has allocated
1036 * from the system using sbrk/mmap, etc.
1037 *
1038 * Caller must hold the heap lock.
1039 */
1040size_t dvmHeapSourceFootprint()
1041{
1042    HS_BOILERPLATE();
1043
1044//TODO: include size of bitmaps?
1045    return oldHeapOverhead(gHs, true);
1046}
1047
1048static size_t getMaximumSize(const HeapSource *hs)
1049{
1050    return hs->growthLimit;
1051}
1052
1053/*
1054 * Returns the current maximum size of the heap source respecting any
1055 * growth limits.
1056 */
1057size_t dvmHeapSourceGetMaximumSize()
1058{
1059    HS_BOILERPLATE();
1060    return getMaximumSize(gHs);
1061}
1062
1063/*
1064 * Removes any growth limits.  Allows the user to allocate up to the
1065 * maximum heap size.
1066 */
1067void dvmClearGrowthLimit()
1068{
1069    HS_BOILERPLATE();
1070    dvmLockHeap();
1071    dvmWaitForConcurrentGcToComplete();
1072    gDvm.gcHeap->cardTableLength = gDvm.gcHeap->cardTableMaxLength;
1073    gHs->growthLimit = gHs->maximumSize;
1074    size_t overhead = oldHeapOverhead(gHs, false);
1075    gHs->heaps[0].maximumSize = gHs->maximumSize - overhead;
1076    gHs->heaps[0].limit = gHs->heaps[0].base + gHs->heaps[0].maximumSize;
1077    dvmUnlockHeap();
1078}
1079
1080/*
1081 * Return the real bytes used by old heaps plus the soft usage of the
1082 * current heap.  When a soft limit is in effect, this is effectively
1083 * what it's compared against (though, in practice, it only looks at
1084 * the current heap).
1085 */
1086static size_t getSoftFootprint(bool includeActive)
1087{
1088    HS_BOILERPLATE();
1089
1090    HeapSource *hs = gHs;
1091    size_t ret = oldHeapOverhead(hs, false);
1092    if (includeActive) {
1093        ret += hs->heaps[0].bytesAllocated;
1094    }
1095
1096    return ret;
1097}
1098
1099/*
1100 * Gets the maximum number of bytes that the heap source is allowed
1101 * to allocate from the system.
1102 */
1103size_t dvmHeapSourceGetIdealFootprint()
1104{
1105    HeapSource *hs = gHs;
1106
1107    HS_BOILERPLATE();
1108
1109    return hs->idealSize;
1110}
1111
1112/*
1113 * Sets the soft limit, handling any necessary changes to the allowed
1114 * footprint of the active heap.
1115 */
1116static void setSoftLimit(HeapSource *hs, size_t softLimit)
1117{
1118    /* Compare against the actual footprint, rather than the
1119     * max_allowed, because the heap may not have grown all the
1120     * way to the allowed size yet.
1121     */
1122    mspace msp = hs->heaps[0].msp;
1123    size_t currentHeapSize = mspace_footprint(msp);
1124    if (softLimit < currentHeapSize) {
1125        /* Don't let the heap grow any more, and impose a soft limit.
1126         */
1127        mspace_set_footprint_limit(msp, currentHeapSize);
1128        hs->softLimit = softLimit;
1129    } else {
1130        /* Let the heap grow to the requested max, and remove any
1131         * soft limit, if set.
1132         */
1133        mspace_set_footprint_limit(msp, softLimit);
1134        hs->softLimit = SIZE_MAX;
1135    }
1136}
1137
1138/*
1139 * Sets the maximum number of bytes that the heap source is allowed
1140 * to allocate from the system.  Clamps to the appropriate maximum
1141 * value.
1142 */
1143static void setIdealFootprint(size_t max)
1144{
1145    HS_BOILERPLATE();
1146
1147    HeapSource *hs = gHs;
1148    size_t maximumSize = getMaximumSize(hs);
1149    if (max > maximumSize) {
1150        LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB",
1151                FRACTIONAL_MB(max),
1152                FRACTIONAL_MB(maximumSize));
1153        max = maximumSize;
1154    }
1155
1156    /* Convert max into a size that applies to the active heap.
1157     * Old heaps will count against the ideal size.
1158     */
1159    size_t overhead = getSoftFootprint(false);
1160    size_t activeMax;
1161    if (overhead < max) {
1162        activeMax = max - overhead;
1163    } else {
1164        activeMax = 0;
1165    }
1166
1167    setSoftLimit(hs, activeMax);
1168    hs->idealSize = max;
1169}
1170
1171/*
1172 * Make the ideal footprint equal to the current footprint.
1173 */
1174static void snapIdealFootprint()
1175{
1176    HS_BOILERPLATE();
1177
1178    setIdealFootprint(getSoftFootprint(true));
1179}
1180
1181/*
1182 * Gets the current ideal heap utilization, represented as a number
1183 * between zero and one.
1184 */
1185float dvmGetTargetHeapUtilization()
1186{
1187    HeapSource *hs = gHs;
1188
1189    HS_BOILERPLATE();
1190
1191    return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1192}
1193
1194/*
1195 * Sets the new ideal heap utilization, represented as a number
1196 * between zero and one.
1197 */
1198void dvmSetTargetHeapUtilization(float newTarget)
1199{
1200    HeapSource *hs = gHs;
1201
1202    HS_BOILERPLATE();
1203
1204    /* Clamp it to a reasonable range.
1205     */
1206    // TODO: This may need some tuning.
1207    if (newTarget < 0.2) {
1208        newTarget = 0.2;
1209    } else if (newTarget > 0.8) {
1210        newTarget = 0.8;
1211    }
1212
1213    hs->targetUtilization =
1214            (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1215    ALOGV("Set heap target utilization to %zd/%d (%f)",
1216            hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1217}
1218
1219/*
1220 * Given the size of a live set, returns the ideal heap size given
1221 * the current target utilization and MIN/MAX values.
1222 *
1223 * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
1224 */
1225static size_t getUtilizationTarget(size_t liveSize, size_t targetUtilization)
1226{
1227    /* Use the current target utilization ratio to determine the
1228     * ideal heap size based on the size of the live set.
1229     */
1230    size_t targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
1231
1232    /* Cap the amount of free space, though, so we don't end up
1233     * with, e.g., 8MB of free space when the live set size hits 8MB.
1234     */
1235    if (targetSize > liveSize + HEAP_IDEAL_FREE) {
1236        targetSize = liveSize + HEAP_IDEAL_FREE;
1237    } else if (targetSize < liveSize + HEAP_MIN_FREE) {
1238        targetSize = liveSize + HEAP_MIN_FREE;
1239    }
1240    return targetSize;
1241}
1242
1243/*
1244 * Given the current contents of the active heap, increase the allowed
1245 * heap footprint to match the target utilization ratio.  This
1246 * should only be called immediately after a full mark/sweep.
1247 */
1248void dvmHeapSourceGrowForUtilization()
1249{
1250    HS_BOILERPLATE();
1251
1252    HeapSource *hs = gHs;
1253    Heap* heap = hs2heap(hs);
1254
1255    /* Use the current target utilization ratio to determine the
1256     * ideal heap size based on the size of the live set.
1257     * Note that only the active heap plays any part in this.
1258     *
1259     * Avoid letting the old heaps influence the target free size,
1260     * because they may be full of objects that aren't actually
1261     * in the working set.  Just look at the allocated size of
1262     * the current heap.
1263     */
1264    size_t currentHeapUsed = heap->bytesAllocated;
1265    size_t targetHeapSize =
1266            getUtilizationTarget(currentHeapUsed, hs->targetUtilization);
1267
1268    /* The ideal size includes the old heaps; add overhead so that
1269     * it can be immediately subtracted again in setIdealFootprint().
1270     * If the target heap size would exceed the max, setIdealFootprint()
1271     * will clamp it to a legal value.
1272     */
1273    size_t overhead = getSoftFootprint(false);
1274    setIdealFootprint(targetHeapSize + overhead);
1275
1276    size_t freeBytes = getAllocLimit(hs);
1277    if (freeBytes < CONCURRENT_MIN_FREE) {
1278        /* Not enough free memory to allow a concurrent GC. */
1279        heap->concurrentStartBytes = SIZE_MAX;
1280    } else {
1281        heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
1282    }
1283}
1284
1285/*
1286 * Return free pages to the system.
1287 * TODO: move this somewhere else, especially the native heap part.
1288 */
1289static void releasePagesInRange(void* start, void* end, size_t used_bytes,
1290                                void* releasedBytes)
1291{
1292    if (used_bytes == 0) {
1293        /*
1294         * We have a range of memory we can try to madvise()
1295         * back. Linux requires that the madvise() start address is
1296         * page-aligned.  We also align the end address.
1297         */
1298        start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1299        end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1300        if (end > start) {
1301            size_t length = (char *)end - (char *)start;
1302            madvise(start, length, MADV_DONTNEED);
1303            *(size_t *)releasedBytes += length;
1304        }
1305    }
1306}
1307
1308/*
1309 * Return unused memory to the system if possible.
1310 */
1311static void trimHeaps()
1312{
1313    HS_BOILERPLATE();
1314
1315    HeapSource *hs = gHs;
1316    size_t heapBytes = 0;
1317    for (size_t i = 0; i < hs->numHeaps; i++) {
1318        Heap *heap = &hs->heaps[i];
1319
1320        /* Return the wilderness chunk to the system. */
1321        mspace_trim(heap->msp, 0);
1322
1323        /* Return any whole free pages to the system. */
1324        mspace_inspect_all(heap->msp, releasePagesInRange, &heapBytes);
1325    }
1326
1327    /* Same for the native heap. */
1328    dlmalloc_trim(0);
1329    size_t nativeBytes = 0;
1330    dlmalloc_inspect_all(releasePagesInRange, &nativeBytes);
1331
1332    LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
1333            heapBytes, nativeBytes, heapBytes + nativeBytes);
1334}
1335
1336/*
1337 * Walks over the heap source and passes every allocated and
1338 * free chunk to the callback.
1339 */
1340void dvmHeapSourceWalk(void(*callback)(void* start, void* end,
1341                                       size_t used_bytes, void* arg),
1342                       void *arg)
1343{
1344    HS_BOILERPLATE();
1345
1346    /* Walk the heaps from oldest to newest.
1347     */
1348//TODO: do this in address order
1349    HeapSource *hs = gHs;
1350    for (size_t i = hs->numHeaps; i > 0; --i) {
1351        mspace_inspect_all(hs->heaps[i-1].msp, callback, arg);
1352        callback(NULL, NULL, 0, arg);  // Indicate end of a heap.
1353    }
1354}
1355
1356/*
1357 * Gets the number of heaps available in the heap source.
1358 *
1359 * Caller must hold the heap lock, because gHs caches a field
1360 * in gDvm.gcHeap.
1361 */
1362size_t dvmHeapSourceGetNumHeaps()
1363{
1364    HS_BOILERPLATE();
1365
1366    return gHs->numHeaps;
1367}
1368
1369void *dvmHeapSourceGetImmuneLimit(bool isPartial)
1370{
1371    if (isPartial) {
1372        return hs2heap(gHs)->base;
1373    } else {
1374        return NULL;
1375    }
1376}
1377