1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <cutils/mspace.h>
18#include <stdint.h>     // for SIZE_MAX
19#include <sys/mman.h>
20#include <errno.h>
21
22#include "Dalvik.h"
23#include "alloc/Heap.h"
24#include "alloc/HeapInternal.h"
25#include "alloc/HeapSource.h"
26#include "alloc/HeapBitmap.h"
27
28// TODO: find a real header file for these.
29extern int dlmalloc_trim(size_t);
30extern void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
31
32static void snapIdealFootprint(void);
33static void setIdealFootprint(size_t max);
34
35#define ALIGN_UP_TO_PAGE_SIZE(p) \
36    (((size_t)(p) + (SYSTEM_PAGE_SIZE - 1)) & ~(SYSTEM_PAGE_SIZE - 1))
37#define ALIGN_DOWN_TO_PAGE_SIZE(p) \
38    ((size_t)(p) & ~(SYSTEM_PAGE_SIZE - 1))
39
40#define HEAP_UTILIZATION_MAX        1024
41#define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
42#define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
43#define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
44
45/* Start a concurrent collection when free memory falls under this
46 * many bytes.
47 */
48#define CONCURRENT_START (128 << 10)
49
50/* The next GC will not be concurrent when free memory after a GC is
51 * under this many bytes.
52 */
53#define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
54
55#define HS_BOILERPLATE() \
56    do { \
57        assert(gDvm.gcHeap != NULL); \
58        assert(gDvm.gcHeap->heapSource != NULL); \
59        assert(gHs == gDvm.gcHeap->heapSource); \
60    } while (0)
61
62#define DEBUG_HEAP_SOURCE 0
63#if DEBUG_HEAP_SOURCE
64#define HSTRACE(...)  LOG(LOG_INFO, LOG_TAG "-hs", __VA_ARGS__)
65#else
66#define HSTRACE(...)  /**/
67#endif
68
69/*
70=======================================================
71=======================================================
72=======================================================
73
74How will this be used?
75allocating/freeing: Heap.c just wants to say "alloc(n)" and get a ptr
76    - if allocating in large doesn't work, try allocating from small
77Heap.c will use HeapSource.h; HeapSource.c will do the right thing
78    between small and large
79    - some operations should be abstracted; put in a structure
80
81How do we manage the size trade-offs?
82- keep mspace max footprint clamped to actual footprint
83- if small-alloc returns null, adjust large vs. small ratio
84    - give small all available slack and retry
85    - success or fail, snap back to actual footprint and give rest to large
86
87managed as "small actual" + "large actual" + "delta to allowed total footprint"
88- when allocating from one source or the other, give the delta to the
89    active source, but snap back afterwards
90- that may not work so great for a gc heap, because small will always consume.
91    - but we need to use the memory, and the current max is the amount we
92      need to fill before a GC.
93
94Find a way to permanently steal pages from the middle of the heap
95    - segment tricks?
96
97Allocate String and char[] in a separate heap?
98
99Maybe avoid growing small heap, even if there's slack?  Look at
100live ratio of small heap after a gc; scale it based on that.
101
102=======================================================
103=======================================================
104=======================================================
105*/
106
107typedef struct {
108    /* The mspace to allocate from.
109     */
110    mspace msp;
111
112    /* The largest size that this heap is allowed to grow to.
113     */
114    size_t absoluteMaxSize;
115
116    /* Number of bytes allocated from this mspace for objects,
117     * including any overhead.  This value is NOT exact, and
118     * should only be used as an input for certain heuristics.
119     */
120    size_t bytesAllocated;
121
122    /* Number of bytes allocated from this mspace at which a
123     * concurrent garbage collection will be started.
124     */
125    size_t concurrentStartBytes;
126
127    /* Number of objects currently allocated from this mspace.
128     */
129    size_t objectsAllocated;
130
131    /*
132     * The lowest address of this heap, inclusive.
133     */
134    char *base;
135
136    /*
137     * The highest address of this heap, exclusive.
138     */
139    char *limit;
140} Heap;
141
142struct HeapSource {
143    /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
144     */
145    size_t targetUtilization;
146
147    /* Requested minimum heap size, or zero if there is no minimum.
148     */
149    size_t minimumSize;
150
151    /* The starting heap size.
152     */
153    size_t startSize;
154
155    /* The largest that the heap source as a whole is allowed to grow.
156     */
157    size_t absoluteMaxSize;
158
159    /* The desired max size of the heap source as a whole.
160     */
161    size_t idealSize;
162
163    /* The maximum number of bytes allowed to be allocated from the
164     * active heap before a GC is forced.  This is used to "shrink" the
165     * heap in lieu of actual compaction.
166     */
167    size_t softLimit;
168
169    /* The heaps; heaps[0] is always the active heap,
170     * which new objects should be allocated from.
171     */
172    Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
173
174    /* The current number of heaps.
175     */
176    size_t numHeaps;
177
178    /* External allocation count.
179     */
180    size_t externalBytesAllocated;
181
182    /* The maximum number of external bytes that may be allocated.
183     */
184    size_t externalLimit;
185
186    /* True if zygote mode was active when the HeapSource was created.
187     */
188    bool sawZygote;
189
190    /*
191     * The base address of the virtual memory reservation.
192     */
193    char *heapBase;
194
195    /*
196     * The length in bytes of the virtual memory reservation.
197     */
198    size_t heapLength;
199
200    /*
201     * The live object bitmap.
202     */
203    HeapBitmap liveBits;
204
205    /*
206     * The mark bitmap.
207     */
208    HeapBitmap markBits;
209
210    /*
211     * State for the GC daemon.
212     */
213    bool hasGcThread;
214    pthread_t gcThread;
215    bool gcThreadShutdown;
216    pthread_mutex_t gcThreadMutex;
217    pthread_cond_t gcThreadCond;
218};
219
220#define hs2heap(hs_) (&((hs_)->heaps[0]))
221
222/*
223 * Returns true iff a soft limit is in effect for the active heap.
224 */
225static inline bool
226softLimited(const HeapSource *hs)
227{
228    /* softLimit will be either SIZE_MAX or the limit for the
229     * active mspace.  idealSize can be greater than softLimit
230     * if there is more than one heap.  If there is only one
231     * heap, a non-SIZE_MAX softLimit should always be the same
232     * as idealSize.
233     */
234    return hs->softLimit <= hs->idealSize;
235}
236
237/*
238 * Returns approximately the maximum number of bytes allowed to be
239 * allocated from the active heap before a GC is forced.
240 */
241static size_t
242getAllocLimit(const HeapSource *hs)
243{
244    if (softLimited(hs)) {
245        return hs->softLimit;
246    } else {
247        return mspace_max_allowed_footprint(hs2heap(hs)->msp);
248    }
249}
250
251/*
252 * Returns the current footprint of all heaps.  If includeActive
253 * is false, don't count the heap at index 0.
254 */
255static inline size_t
256oldHeapOverhead(const HeapSource *hs, bool includeActive)
257{
258    size_t footprint = 0;
259    size_t i;
260
261    if (includeActive) {
262        i = 0;
263    } else {
264        i = 1;
265    }
266    for (/* i = i */; i < hs->numHeaps; i++) {
267//TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
268        footprint += mspace_footprint(hs->heaps[i].msp);
269    }
270    return footprint;
271}
272
273/*
274 * Returns the heap that <ptr> could have come from, or NULL
275 * if it could not have come from any heap.
276 */
277static inline Heap *
278ptr2heap(const HeapSource *hs, const void *ptr)
279{
280    const size_t numHeaps = hs->numHeaps;
281    size_t i;
282
283//TODO: unroll this to HEAP_SOURCE_MAX_HEAP_COUNT
284    if (ptr != NULL) {
285        for (i = 0; i < numHeaps; i++) {
286            const Heap *const heap = &hs->heaps[i];
287
288            if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
289                return (Heap *)heap;
290            }
291        }
292    }
293    return NULL;
294}
295
296/*
297 * Functions to update heapSource->bytesAllocated when an object
298 * is allocated or freed.  mspace_usable_size() will give
299 * us a much more accurate picture of heap utilization than
300 * the requested byte sizes would.
301 *
302 * These aren't exact, and should not be treated as such.
303 */
304static void countAllocation(Heap *heap, const void *ptr, bool isObj)
305{
306    HeapSource *hs;
307
308    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
309
310    heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
311            HEAP_SOURCE_CHUNK_OVERHEAD;
312    if (isObj) {
313        heap->objectsAllocated++;
314        hs = gDvm.gcHeap->heapSource;
315        dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
316    }
317
318    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
319}
320
321static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
322{
323    HeapSource *hs;
324    size_t delta;
325
326    delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
327    assert(delta > 0);
328    if (delta < heap->bytesAllocated) {
329        heap->bytesAllocated -= delta;
330    } else {
331        heap->bytesAllocated = 0;
332    }
333    hs = gDvm.gcHeap->heapSource;
334    dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
335    if (heap->objectsAllocated > 0) {
336        heap->objectsAllocated--;
337    }
338    *numBytes += delta;
339}
340
341static HeapSource *gHs = NULL;
342
343static mspace
344createMspace(void *base, size_t startSize, size_t absoluteMaxSize)
345{
346    mspace msp;
347
348    /* Create an unlocked dlmalloc mspace to use as
349     * a small-object heap source.
350     *
351     * We start off reserving heapSizeStart/2 bytes but
352     * letting the heap grow to heapSizeStart.  This saves
353     * memory in the case where a process uses even less
354     * than the starting size.
355     */
356    LOGV_HEAP("Creating VM heap of size %u\n", startSize);
357    errno = 0;
358    msp = create_contiguous_mspace_with_base(startSize/2,
359            absoluteMaxSize, /*locked=*/false, base);
360    if (msp != NULL) {
361        /* Don't let the heap grow past the starting size without
362         * our intervention.
363         */
364        mspace_set_max_allowed_footprint(msp, startSize);
365    } else {
366        /* There's no guarantee that errno has meaning when the call
367         * fails, but it often does.
368         */
369        LOGE_HEAP("Can't create VM heap of size (%u,%u): %s\n",
370            startSize/2, absoluteMaxSize, strerror(errno));
371    }
372
373    return msp;
374}
375
376static bool
377addNewHeap(HeapSource *hs, mspace msp, size_t mspAbsoluteMaxSize)
378{
379    Heap heap;
380
381    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
382        LOGE("Attempt to create too many heaps (%zd >= %zd)\n",
383                hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
384        dvmAbort();
385        return false;
386    }
387
388    memset(&heap, 0, sizeof(heap));
389
390    if (msp != NULL) {
391        heap.msp = msp;
392        heap.absoluteMaxSize = mspAbsoluteMaxSize;
393        heap.concurrentStartBytes = SIZE_MAX;
394        heap.base = hs->heapBase;
395        heap.limit = hs->heapBase + heap.absoluteMaxSize;
396    } else {
397        void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
398        char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
399        size_t overhead = base - hs->heaps[0].base;
400
401        assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
402        if (overhead + HEAP_MIN_FREE >= hs->absoluteMaxSize) {
403            LOGE_HEAP("No room to create any more heaps "
404                    "(%zd overhead, %zd max)\n",
405                    overhead, hs->absoluteMaxSize);
406            return false;
407        }
408        hs->heaps[0].absoluteMaxSize = overhead;
409        hs->heaps[0].limit = base;
410        heap.absoluteMaxSize = hs->absoluteMaxSize - overhead;
411        heap.msp = createMspace(base, HEAP_MIN_FREE, heap.absoluteMaxSize);
412        heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
413        heap.base = base;
414        heap.limit = heap.base + heap.absoluteMaxSize;
415        if (heap.msp == NULL) {
416            return false;
417        }
418    }
419
420    /* Don't let the soon-to-be-old heap grow any further.
421     */
422    if (hs->numHeaps > 0) {
423        mspace msp = hs->heaps[0].msp;
424        mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
425    }
426
427    /* Put the new heap in the list, at heaps[0].
428     * Shift existing heaps down.
429     */
430    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
431    hs->heaps[0] = heap;
432    hs->numHeaps++;
433
434    return true;
435}
436
437/*
438 * The garbage collection daemon.  Initiates a concurrent collection
439 * when signaled.
440 */
441static void *gcDaemonThread(void* arg)
442{
443    dvmChangeStatus(NULL, THREAD_VMWAIT);
444    dvmLockMutex(&gHs->gcThreadMutex);
445    while (gHs->gcThreadShutdown != true) {
446        dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
447        dvmLockHeap();
448        dvmChangeStatus(NULL, THREAD_RUNNING);
449        dvmCollectGarbageInternal(false, GC_CONCURRENT);
450        dvmChangeStatus(NULL, THREAD_VMWAIT);
451        dvmUnlockHeap();
452    }
453    dvmChangeStatus(NULL, THREAD_RUNNING);
454    return NULL;
455}
456
457static bool gcDaemonStartup(void)
458{
459    dvmInitMutex(&gHs->gcThreadMutex);
460    pthread_cond_init(&gHs->gcThreadCond, NULL);
461    gHs->gcThreadShutdown = false;
462    gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
463                                               gcDaemonThread, NULL);
464    return gHs->hasGcThread;
465}
466
467static void gcDaemonShutdown(void)
468{
469    if (gHs->hasGcThread) {
470        dvmLockMutex(&gHs->gcThreadMutex);
471        gHs->gcThreadShutdown = true;
472        dvmSignalCond(&gHs->gcThreadCond);
473        dvmUnlockMutex(&gHs->gcThreadMutex);
474        pthread_join(gHs->gcThread, NULL);
475    }
476}
477
478/*
479 * Initializes the heap source; must be called before any other
480 * dvmHeapSource*() functions.  Returns a GcHeap structure
481 * allocated from the heap source.
482 */
483GcHeap *
484dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
485{
486    GcHeap *gcHeap;
487    HeapSource *hs;
488    mspace msp;
489    size_t length;
490    void *base;
491
492    assert(gHs == NULL);
493
494    if (startSize > absoluteMaxSize) {
495        LOGE("Bad heap parameters (start=%d, max=%d)\n",
496           startSize, absoluteMaxSize);
497        return NULL;
498    }
499
500    /*
501     * Allocate a contiguous region of virtual memory to subdivided
502     * among the heaps managed by the garbage collector.
503     */
504    length = ALIGN_UP_TO_PAGE_SIZE(absoluteMaxSize);
505    base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
506    if (base == NULL) {
507        return NULL;
508    }
509
510    /* Create an unlocked dlmalloc mspace to use as
511     * the small object heap source.
512     */
513    msp = createMspace(base, startSize, absoluteMaxSize);
514    if (msp == NULL) {
515        goto fail;
516    }
517
518    /* Allocate a descriptor from the heap we just created.
519     */
520    gcHeap = mspace_malloc(msp, sizeof(*gcHeap));
521    if (gcHeap == NULL) {
522        LOGE_HEAP("Can't allocate heap descriptor\n");
523        goto fail;
524    }
525    memset(gcHeap, 0, sizeof(*gcHeap));
526
527    hs = mspace_malloc(msp, sizeof(*hs));
528    if (hs == NULL) {
529        LOGE_HEAP("Can't allocate heap source\n");
530        goto fail;
531    }
532    memset(hs, 0, sizeof(*hs));
533
534    hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
535    hs->minimumSize = 0;
536    hs->startSize = startSize;
537    hs->absoluteMaxSize = absoluteMaxSize;
538    hs->idealSize = startSize;
539    hs->softLimit = SIZE_MAX;    // no soft limit at first
540    hs->numHeaps = 0;
541    hs->sawZygote = gDvm.zygote;
542    hs->hasGcThread = false;
543    hs->heapBase = base;
544    hs->heapLength = length;
545    if (!addNewHeap(hs, msp, absoluteMaxSize)) {
546        LOGE_HEAP("Can't add initial heap\n");
547        goto fail;
548    }
549    if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
550        LOGE_HEAP("Can't create liveBits\n");
551        goto fail;
552    }
553    if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
554        LOGE_HEAP("Can't create markBits\n");
555        dvmHeapBitmapDelete(&hs->liveBits);
556        goto fail;
557    }
558
559    gcHeap->markContext.bitmap = &hs->markBits;
560    gcHeap->heapSource = hs;
561
562    countAllocation(hs2heap(hs), gcHeap, false);
563    countAllocation(hs2heap(hs), hs, false);
564
565    gHs = hs;
566    return gcHeap;
567
568fail:
569    munmap(base, length);
570    return NULL;
571}
572
573bool dvmHeapSourceStartupAfterZygote(void)
574{
575    return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
576}
577
578/*
579 * This is called while in zygote mode, right before we fork() for the
580 * first time.  We create a heap for all future zygote process allocations,
581 * in an attempt to avoid touching pages in the zygote heap.  (This would
582 * probably be unnecessary if we had a compacting GC -- the source of our
583 * troubles is small allocations filling in the gaps from larger ones.)
584 */
585bool
586dvmHeapSourceStartupBeforeFork()
587{
588    HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
589
590    HS_BOILERPLATE();
591
592    assert(gDvm.zygote);
593
594    if (!gDvm.newZygoteHeapAllocated) {
595        /* Create a new heap for post-fork zygote allocations.  We only
596         * try once, even if it fails.
597         */
598        LOGV("Splitting out new zygote heap\n");
599        gDvm.newZygoteHeapAllocated = true;
600        return addNewHeap(hs, NULL, 0);
601    }
602    return true;
603}
604
605void dvmHeapSourceThreadShutdown(void)
606{
607    if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
608        gcDaemonShutdown();
609    }
610}
611
612/*
613 * Tears down the entire GcHeap structure and all of the substructures
614 * attached to it.  This call has the side effect of setting the given
615 * gcHeap pointer and gHs to NULL.
616 */
617void
618dvmHeapSourceShutdown(GcHeap **gcHeap)
619{
620    if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
621        HeapSource *hs;
622
623        hs = (*gcHeap)->heapSource;
624
625        assert((char *)*gcHeap >= hs->heapBase);
626        assert((char *)*gcHeap < hs->heapBase + hs->heapLength);
627
628        dvmHeapBitmapDelete(&hs->liveBits);
629        dvmHeapBitmapDelete(&hs->markBits);
630
631        munmap(hs->heapBase, hs->heapLength);
632        gHs = NULL;
633        *gcHeap = NULL;
634    }
635}
636
637/*
638 * Gets the begining of the allocation for the HeapSource.
639 */
640void *dvmHeapSourceGetBase(void)
641{
642    return gHs->heapBase;
643}
644
645/*
646 * Returns the requested value. If the per-heap stats are requested, fill
647 * them as well.
648 *
649 * Caller must hold the heap lock.
650 */
651size_t
652dvmHeapSourceGetValue(enum HeapSourceValueSpec spec, size_t perHeapStats[],
653                      size_t arrayLen)
654{
655    HeapSource *hs = gHs;
656    size_t value = 0;
657    size_t total = 0;
658    size_t i;
659
660    HS_BOILERPLATE();
661
662    switch (spec) {
663    case HS_EXTERNAL_BYTES_ALLOCATED:
664        return hs->externalBytesAllocated;
665    case HS_EXTERNAL_LIMIT:
666        return hs->externalLimit;
667    default:
668        // look at all heaps.
669        ;
670    }
671
672    assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
673    for (i = 0; i < hs->numHeaps; i++) {
674        Heap *const heap = &hs->heaps[i];
675
676        switch (spec) {
677        case HS_FOOTPRINT:
678            value = mspace_footprint(heap->msp);
679            break;
680        case HS_ALLOWED_FOOTPRINT:
681            value = mspace_max_allowed_footprint(heap->msp);
682            break;
683        case HS_BYTES_ALLOCATED:
684            value = heap->bytesAllocated;
685            break;
686        case HS_OBJECTS_ALLOCATED:
687            value = heap->objectsAllocated;
688            break;
689        default:
690            // quiet gcc
691            break;
692        }
693        if (perHeapStats) {
694            perHeapStats[i] = value;
695        }
696        total += value;
697    }
698    return total;
699}
700
701static void aliasBitmap(HeapBitmap *dst, HeapBitmap *src,
702                        uintptr_t base, uintptr_t max) {
703    size_t offset;
704
705    dst->base = base;
706    dst->max = max;
707    dst->bitsLen = HB_OFFSET_TO_BYTE_INDEX(max - base) + sizeof(dst->bits);
708    /* The exclusive limit from bitsLen is greater than the inclusive max. */
709    assert(base + HB_MAX_OFFSET(dst) > max);
710    /* The exclusive limit is at most one word of bits beyond max. */
711    assert((base + HB_MAX_OFFSET(dst)) - max <=
712           HB_OBJECT_ALIGNMENT * HB_BITS_PER_WORD);
713    dst->allocLen = dst->bitsLen;
714    offset = base - src->base;
715    assert(HB_OFFSET_TO_MASK(offset) == 1 << 31);
716    dst->bits = &src->bits[HB_OFFSET_TO_INDEX(offset)];
717}
718
719/*
720 * Initializes a vector of object and mark bits to the object and mark
721 * bits of each heap.  The bits are aliased to the heapsource
722 * object and mark bitmaps.  This routine is used by the sweep code
723 * which needs to free each object in the correct heap.
724 */
725void dvmHeapSourceGetObjectBitmaps(HeapBitmap liveBits[], HeapBitmap markBits[],
726                                   size_t numHeaps)
727{
728    HeapSource *hs = gHs;
729    uintptr_t base, max;
730    size_t i;
731
732    HS_BOILERPLATE();
733
734    assert(numHeaps == hs->numHeaps);
735    for (i = 0; i < hs->numHeaps; ++i) {
736        base = (uintptr_t)hs->heaps[i].base;
737        /* -1 because limit is exclusive but max is inclusive. */
738        max = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
739        aliasBitmap(&liveBits[i], &hs->liveBits, base, max);
740        aliasBitmap(&markBits[i], &hs->markBits, base, max);
741    }
742}
743
744/*
745 * Get the bitmap representing all live objects.
746 */
747HeapBitmap *dvmHeapSourceGetLiveBits(void)
748{
749    HS_BOILERPLATE();
750
751    return &gHs->liveBits;
752}
753
754void dvmHeapSourceSwapBitmaps(void)
755{
756    HeapBitmap tmp;
757
758    tmp = gHs->liveBits;
759    gHs->liveBits = gHs->markBits;
760    gHs->markBits = tmp;
761}
762
763void dvmHeapSourceZeroMarkBitmap(void)
764{
765    HS_BOILERPLATE();
766
767    dvmHeapBitmapZero(&gHs->markBits);
768}
769
770void dvmMarkImmuneObjects(const char *immuneLimit)
771{
772    char *dst, *src;
773    size_t i, index, length;
774
775    /*
776     * Copy the contents of the live bit vector for immune object
777     * range into the mark bit vector.
778     */
779    /* The only values generated by dvmHeapSourceGetImmuneLimit() */
780    assert(immuneLimit == gHs->heaps[0].base ||
781           immuneLimit == NULL);
782    assert(gHs->liveBits.base == gHs->markBits.base);
783    assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
784    /* heap[0] is never immune */
785    assert(gHs->heaps[0].base >= immuneLimit);
786    assert(gHs->heaps[0].limit > immuneLimit);
787
788    for (i = 1; i < gHs->numHeaps; ++i) {
789        if (gHs->heaps[i].base < immuneLimit) {
790            assert(gHs->heaps[i].limit <= immuneLimit);
791            /* Compute the number of words to copy in the bitmap. */
792            index = HB_OFFSET_TO_INDEX(
793                (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
794            /* Compute the starting offset in the live and mark bits. */
795            src = (char *)(gHs->liveBits.bits + index);
796            dst = (char *)(gHs->markBits.bits + index);
797            /* Compute the number of bytes of the live bitmap to copy. */
798            length = HB_OFFSET_TO_BYTE_INDEX(
799                gHs->heaps[i].limit - gHs->heaps[i].base);
800            /* Do the copy. */
801            memcpy(dst, src, length);
802            /* Make sure max points to the address of the highest set bit. */
803            if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
804                gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
805            }
806        }
807    }
808}
809
810/*
811 * Allocates <n> bytes of zeroed data.
812 */
813void *
814dvmHeapSourceAlloc(size_t n)
815{
816    HeapSource *hs = gHs;
817    Heap *heap;
818    void *ptr;
819
820    HS_BOILERPLATE();
821    heap = hs2heap(hs);
822    if (heap->bytesAllocated + n > hs->softLimit) {
823        /*
824         * This allocation would push us over the soft limit; act as
825         * if the heap is full.
826         */
827        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation\n",
828                  FRACTIONAL_MB(hs->softLimit), n);
829        return NULL;
830    }
831    ptr = mspace_calloc(heap->msp, 1, n);
832    if (ptr == NULL) {
833        return NULL;
834    }
835    countAllocation(heap, ptr, true);
836    /*
837     * Check to see if a concurrent GC should be initiated.
838     */
839    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
840        /*
841         * The garbage collector thread is already running or has yet
842         * to be started.  Do nothing.
843         */
844        return ptr;
845    }
846    if (heap->bytesAllocated > heap->concurrentStartBytes) {
847        /*
848         * We have exceeded the allocation threshold.  Wake up the
849         * garbage collector.
850         */
851        dvmSignalCond(&gHs->gcThreadCond);
852    }
853    return ptr;
854}
855
856/* Remove any hard limits, try to allocate, and shrink back down.
857 * Last resort when trying to allocate an object.
858 */
859static void *
860heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
861{
862    void *ptr;
863    size_t max;
864
865    /* Grow as much as possible, but don't let the real footprint
866     * plus external allocations go over the absolute max.
867     */
868    max = heap->absoluteMaxSize;
869    if (max > hs->externalBytesAllocated) {
870        max -= hs->externalBytesAllocated;
871
872        mspace_set_max_allowed_footprint(heap->msp, max);
873        ptr = dvmHeapSourceAlloc(n);
874
875        /* Shrink back down as small as possible.  Our caller may
876         * readjust max_allowed to a more appropriate value.
877         */
878        mspace_set_max_allowed_footprint(heap->msp,
879                mspace_footprint(heap->msp));
880    } else {
881        ptr = NULL;
882    }
883
884    return ptr;
885}
886
887/*
888 * Allocates <n> bytes of zeroed data, growing as much as possible
889 * if necessary.
890 */
891void *
892dvmHeapSourceAllocAndGrow(size_t n)
893{
894    HeapSource *hs = gHs;
895    Heap *heap;
896    void *ptr;
897    size_t oldIdealSize;
898
899    HS_BOILERPLATE();
900    heap = hs2heap(hs);
901
902    ptr = dvmHeapSourceAlloc(n);
903    if (ptr != NULL) {
904        return ptr;
905    }
906
907    oldIdealSize = hs->idealSize;
908    if (softLimited(hs)) {
909        /* We're soft-limited.  Try removing the soft limit to
910         * see if we can allocate without actually growing.
911         */
912        hs->softLimit = SIZE_MAX;
913        ptr = dvmHeapSourceAlloc(n);
914        if (ptr != NULL) {
915            /* Removing the soft limit worked;  fix things up to
916             * reflect the new effective ideal size.
917             */
918            snapIdealFootprint();
919            return ptr;
920        }
921        // softLimit intentionally left at SIZE_MAX.
922    }
923
924    /* We're not soft-limited.  Grow the heap to satisfy the request.
925     * If this call fails, no footprints will have changed.
926     */
927    ptr = heapAllocAndGrow(hs, heap, n);
928    if (ptr != NULL) {
929        /* The allocation succeeded.  Fix up the ideal size to
930         * reflect any footprint modifications that had to happen.
931         */
932        snapIdealFootprint();
933    } else {
934        /* We just couldn't do it.  Restore the original ideal size,
935         * fixing up softLimit if necessary.
936         */
937        setIdealFootprint(oldIdealSize);
938    }
939    return ptr;
940}
941
942/*
943 * Frees the first numPtrs objects in the ptrs list and returns the
944 * amount of reclaimed storage. The list must contain addresses all in
945 * the same mspace, and must be in increasing order. This implies that
946 * there are no duplicates, and no entries are NULL.
947 */
948size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
949{
950    Heap *heap;
951    size_t numBytes;
952
953    HS_BOILERPLATE();
954
955    if (numPtrs == 0) {
956        return 0;
957    }
958
959    assert(ptrs != NULL);
960    assert(*ptrs != NULL);
961    heap = ptr2heap(gHs, *ptrs);
962    numBytes = 0;
963    if (heap != NULL) {
964        mspace *msp = heap->msp;
965        // Calling mspace_free on shared heaps disrupts sharing too
966        // much. For heap[0] -- the 'active heap' -- we call
967        // mspace_free, but on the other heaps we only do some
968        // accounting.
969        if (heap == gHs->heaps) {
970            // mspace_merge_objects takes two allocated objects, and
971            // if the second immediately follows the first, will merge
972            // them, returning a larger object occupying the same
973            // memory. This is a local operation, and doesn't require
974            // dlmalloc to manipulate any freelists. It's pretty
975            // inexpensive compared to free().
976
977            // ptrs is an array of objects all in memory order, and if
978            // client code has been allocating lots of short-lived
979            // objects, this is likely to contain runs of objects all
980            // now garbage, and thus highly amenable to this optimization.
981
982            // Unroll the 0th iteration around the loop below,
983            // countFree ptrs[0] and initializing merged.
984            assert(ptrs[0] != NULL);
985            assert(ptr2heap(gHs, ptrs[0]) == heap);
986            countFree(heap, ptrs[0], &numBytes);
987            void *merged = ptrs[0];
988
989            size_t i;
990            for (i = 1; i < numPtrs; i++) {
991                assert(merged != NULL);
992                assert(ptrs[i] != NULL);
993                assert((intptr_t)merged < (intptr_t)ptrs[i]);
994                assert(ptr2heap(gHs, ptrs[i]) == heap);
995                countFree(heap, ptrs[i], &numBytes);
996                // Try to merge. If it works, merged now includes the
997                // memory of ptrs[i]. If it doesn't, free merged, and
998                // see if ptrs[i] starts a new run of adjacent
999                // objects to merge.
1000                if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
1001                    mspace_free(msp, merged);
1002                    merged = ptrs[i];
1003                }
1004            }
1005            assert(merged != NULL);
1006            mspace_free(msp, merged);
1007        } else {
1008            // This is not an 'active heap'. Only do the accounting.
1009            size_t i;
1010            for (i = 0; i < numPtrs; i++) {
1011                assert(ptrs[i] != NULL);
1012                assert(ptr2heap(gHs, ptrs[i]) == heap);
1013                countFree(heap, ptrs[i], &numBytes);
1014            }
1015        }
1016    }
1017    return numBytes;
1018}
1019
1020/*
1021 * Returns true iff <ptr> is in the heap source.
1022 */
1023bool
1024dvmHeapSourceContainsAddress(const void *ptr)
1025{
1026    HS_BOILERPLATE();
1027
1028    return (dvmHeapBitmapCoversAddress(&gHs->liveBits, ptr));
1029}
1030
1031/*
1032 * Returns true iff <ptr> was allocated from the heap source.
1033 */
1034bool
1035dvmHeapSourceContains(const void *ptr)
1036{
1037    HS_BOILERPLATE();
1038
1039    if (dvmHeapSourceContainsAddress(ptr)) {
1040        return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
1041    }
1042    return false;
1043}
1044
1045/*
1046 * Returns the value of the requested flag.
1047 */
1048bool
1049dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
1050{
1051    if (ptr == NULL) {
1052        return false;
1053    }
1054
1055    if (flag == HS_CONTAINS) {
1056        return dvmHeapSourceContains(ptr);
1057    } else if (flag == HS_ALLOCATED_IN_ZYGOTE) {
1058        HeapSource *hs = gHs;
1059
1060        HS_BOILERPLATE();
1061
1062        if (hs->sawZygote) {
1063            Heap *heap;
1064
1065            heap = ptr2heap(hs, ptr);
1066            if (heap != NULL) {
1067                /* If the object is not in the active heap, we assume that
1068                 * it was allocated as part of zygote.
1069                 */
1070                return heap != hs->heaps;
1071            }
1072        }
1073        /* The pointer is outside of any known heap, or we are not
1074         * running in zygote mode.
1075         */
1076        return false;
1077    }
1078
1079    return false;
1080}
1081
1082/*
1083 * Returns the number of usable bytes in an allocated chunk; the size
1084 * may be larger than the size passed to dvmHeapSourceAlloc().
1085 */
1086size_t
1087dvmHeapSourceChunkSize(const void *ptr)
1088{
1089    Heap *heap;
1090
1091    HS_BOILERPLATE();
1092
1093    heap = ptr2heap(gHs, ptr);
1094    if (heap != NULL) {
1095        return mspace_usable_size(heap->msp, ptr);
1096    }
1097    return 0;
1098}
1099
1100/*
1101 * Returns the number of bytes that the heap source has allocated
1102 * from the system using sbrk/mmap, etc.
1103 *
1104 * Caller must hold the heap lock.
1105 */
1106size_t
1107dvmHeapSourceFootprint()
1108{
1109    HS_BOILERPLATE();
1110
1111//TODO: include size of bitmaps?
1112    return oldHeapOverhead(gHs, true);
1113}
1114
1115/*
1116 * Return the real bytes used by old heaps and external memory
1117 * plus the soft usage of the current heap.  When a soft limit
1118 * is in effect, this is effectively what it's compared against
1119 * (though, in practice, it only looks at the current heap).
1120 */
1121static size_t
1122getSoftFootprint(bool includeActive)
1123{
1124    HeapSource *hs = gHs;
1125    size_t ret;
1126
1127    HS_BOILERPLATE();
1128
1129    ret = oldHeapOverhead(hs, false) + hs->externalBytesAllocated;
1130    if (includeActive) {
1131        ret += hs->heaps[0].bytesAllocated;
1132    }
1133
1134    return ret;
1135}
1136
1137/*
1138 * Gets the maximum number of bytes that the heap source is allowed
1139 * to allocate from the system.
1140 */
1141size_t
1142dvmHeapSourceGetIdealFootprint()
1143{
1144    HeapSource *hs = gHs;
1145
1146    HS_BOILERPLATE();
1147
1148    return hs->idealSize;
1149}
1150
1151/*
1152 * Sets the soft limit, handling any necessary changes to the allowed
1153 * footprint of the active heap.
1154 */
1155static void
1156setSoftLimit(HeapSource *hs, size_t softLimit)
1157{
1158    /* Compare against the actual footprint, rather than the
1159     * max_allowed, because the heap may not have grown all the
1160     * way to the allowed size yet.
1161     */
1162    mspace msp = hs->heaps[0].msp;
1163    size_t currentHeapSize = mspace_footprint(msp);
1164    if (softLimit < currentHeapSize) {
1165        /* Don't let the heap grow any more, and impose a soft limit.
1166         */
1167        mspace_set_max_allowed_footprint(msp, currentHeapSize);
1168        hs->softLimit = softLimit;
1169    } else {
1170        /* Let the heap grow to the requested max, and remove any
1171         * soft limit, if set.
1172         */
1173        mspace_set_max_allowed_footprint(msp, softLimit);
1174        hs->softLimit = SIZE_MAX;
1175    }
1176}
1177
1178/*
1179 * Sets the maximum number of bytes that the heap source is allowed
1180 * to allocate from the system.  Clamps to the appropriate maximum
1181 * value.
1182 */
1183static void
1184setIdealFootprint(size_t max)
1185{
1186    HeapSource *hs = gHs;
1187#if DEBUG_HEAP_SOURCE
1188    HeapSource oldHs = *hs;
1189    mspace msp = hs->heaps[0].msp;
1190    size_t oldAllowedFootprint =
1191            mspace_max_allowed_footprint(msp);
1192#endif
1193
1194    HS_BOILERPLATE();
1195
1196    if (max > hs->absoluteMaxSize) {
1197        LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB\n",
1198                FRACTIONAL_MB(max),
1199                FRACTIONAL_MB(hs->absoluteMaxSize));
1200        max = hs->absoluteMaxSize;
1201    } else if (max < hs->minimumSize) {
1202        max = hs->minimumSize;
1203    }
1204
1205    /* Convert max into a size that applies to the active heap.
1206     * Old heaps and external allocations will count against the ideal size.
1207     */
1208    size_t overhead = getSoftFootprint(false);
1209    size_t activeMax;
1210    if (overhead < max) {
1211        activeMax = max - overhead;
1212    } else {
1213        activeMax = 0;
1214    }
1215
1216    setSoftLimit(hs, activeMax);
1217    hs->idealSize = max;
1218
1219    HSTRACE("IDEAL %zd->%zd (%d), soft %zd->%zd (%d), allowed %zd->%zd (%d), "
1220            "ext %zd\n",
1221            oldHs.idealSize, hs->idealSize, hs->idealSize - oldHs.idealSize,
1222            oldHs.softLimit, hs->softLimit, hs->softLimit - oldHs.softLimit,
1223            oldAllowedFootprint, mspace_max_allowed_footprint(msp),
1224            mspace_max_allowed_footprint(msp) - oldAllowedFootprint,
1225            hs->externalBytesAllocated);
1226
1227}
1228
1229/*
1230 * Make the ideal footprint equal to the current footprint.
1231 */
1232static void
1233snapIdealFootprint()
1234{
1235    HS_BOILERPLATE();
1236
1237    setIdealFootprint(getSoftFootprint(true));
1238}
1239
1240/*
1241 * Gets the current ideal heap utilization, represented as a number
1242 * between zero and one.
1243 */
1244float dvmGetTargetHeapUtilization()
1245{
1246    HeapSource *hs = gHs;
1247
1248    HS_BOILERPLATE();
1249
1250    return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1251}
1252
1253/*
1254 * Sets the new ideal heap utilization, represented as a number
1255 * between zero and one.
1256 */
1257void dvmSetTargetHeapUtilization(float newTarget)
1258{
1259    HeapSource *hs = gHs;
1260
1261    HS_BOILERPLATE();
1262
1263    /* Clamp it to a reasonable range.
1264     */
1265    // TODO: This may need some tuning.
1266    if (newTarget < 0.2) {
1267        newTarget = 0.2;
1268    } else if (newTarget > 0.8) {
1269        newTarget = 0.8;
1270    }
1271
1272    hs->targetUtilization =
1273            (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1274    LOGV("Set heap target utilization to %zd/%d (%f)\n",
1275            hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1276}
1277
1278/*
1279 * If set is true, sets the new minimum heap size to size; always
1280 * returns the current (or previous) size.  If size is negative,
1281 * removes the current minimum constraint (if present).
1282 */
1283size_t
1284dvmMinimumHeapSize(size_t size, bool set)
1285{
1286    HeapSource *hs = gHs;
1287    size_t oldMinimumSize;
1288
1289    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1290     * heap lock if we're going to look at it.  We also need the
1291     * lock for the call to setIdealFootprint().
1292     */
1293    dvmLockHeap();
1294
1295    HS_BOILERPLATE();
1296
1297    oldMinimumSize = hs->minimumSize;
1298
1299    if (set) {
1300        /* Don't worry about external allocations right now.
1301         * setIdealFootprint() will take them into account when
1302         * minimumSize is used, and it's better to hold onto the
1303         * intended minimumSize than to clamp it arbitrarily based
1304         * on the current allocations.
1305         */
1306        if (size > hs->absoluteMaxSize) {
1307            size = hs->absoluteMaxSize;
1308        }
1309        hs->minimumSize = size;
1310        if (size > hs->idealSize) {
1311            /* Force a snap to the minimum value, which we just set
1312             * and which setIdealFootprint() will take into consideration.
1313             */
1314            setIdealFootprint(hs->idealSize);
1315        }
1316        /* Otherwise we'll just keep it in mind the next time
1317         * setIdealFootprint() is called.
1318         */
1319    }
1320
1321    dvmUnlockHeap();
1322
1323    return oldMinimumSize;
1324}
1325
1326/*
1327 * Given the size of a live set, returns the ideal heap size given
1328 * the current target utilization and MIN/MAX values.
1329 *
1330 * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
1331 */
1332static size_t
1333getUtilizationTarget(size_t liveSize, size_t targetUtilization)
1334{
1335    size_t targetSize;
1336
1337    /* Use the current target utilization ratio to determine the
1338     * ideal heap size based on the size of the live set.
1339     */
1340    targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
1341
1342    /* Cap the amount of free space, though, so we don't end up
1343     * with, e.g., 8MB of free space when the live set size hits 8MB.
1344     */
1345    if (targetSize > liveSize + HEAP_IDEAL_FREE) {
1346        targetSize = liveSize + HEAP_IDEAL_FREE;
1347    } else if (targetSize < liveSize + HEAP_MIN_FREE) {
1348        targetSize = liveSize + HEAP_MIN_FREE;
1349    }
1350    return targetSize;
1351}
1352
1353/*
1354 * Given the current contents of the active heap, increase the allowed
1355 * heap footprint to match the target utilization ratio.  This
1356 * should only be called immediately after a full mark/sweep.
1357 */
1358void dvmHeapSourceGrowForUtilization()
1359{
1360    HeapSource *hs = gHs;
1361    Heap *heap;
1362    size_t targetHeapSize;
1363    size_t currentHeapUsed;
1364    size_t oldIdealSize;
1365    size_t newHeapMax;
1366    size_t overhead;
1367    size_t freeBytes;
1368
1369    HS_BOILERPLATE();
1370    heap = hs2heap(hs);
1371
1372    /* Use the current target utilization ratio to determine the
1373     * ideal heap size based on the size of the live set.
1374     * Note that only the active heap plays any part in this.
1375     *
1376     * Avoid letting the old heaps influence the target free size,
1377     * because they may be full of objects that aren't actually
1378     * in the working set.  Just look at the allocated size of
1379     * the current heap.
1380     */
1381    currentHeapUsed = heap->bytesAllocated;
1382#define LET_EXTERNAL_INFLUENCE_UTILIZATION 1
1383#if LET_EXTERNAL_INFLUENCE_UTILIZATION
1384    /* This is a hack to deal with the side-effects of moving
1385     * bitmap data out of the Dalvik heap.  Since the amount
1386     * of free space after a GC scales with the size of the
1387     * live set, many apps expected the large free space that
1388     * appeared along with megabytes' worth of bitmaps.  When
1389     * the bitmaps were removed, the free size shrank significantly,
1390     * and apps started GCing constantly.  This makes it so the
1391     * post-GC free space is the same size it would have been
1392     * if the bitmaps were still in the Dalvik heap.
1393     */
1394    currentHeapUsed += hs->externalBytesAllocated;
1395#endif
1396    targetHeapSize =
1397            getUtilizationTarget(currentHeapUsed, hs->targetUtilization);
1398#if LET_EXTERNAL_INFLUENCE_UTILIZATION
1399    currentHeapUsed -= hs->externalBytesAllocated;
1400    targetHeapSize -= hs->externalBytesAllocated;
1401#endif
1402
1403    /* The ideal size includes the old heaps; add overhead so that
1404     * it can be immediately subtracted again in setIdealFootprint().
1405     * If the target heap size would exceed the max, setIdealFootprint()
1406     * will clamp it to a legal value.
1407     */
1408    overhead = getSoftFootprint(false);
1409    oldIdealSize = hs->idealSize;
1410    setIdealFootprint(targetHeapSize + overhead);
1411
1412    freeBytes = getAllocLimit(hs);
1413    if (freeBytes < CONCURRENT_MIN_FREE) {
1414        /* Not enough free memory to allow a concurrent GC. */
1415        heap->concurrentStartBytes = SIZE_MAX;
1416    } else {
1417        heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
1418    }
1419    newHeapMax = mspace_max_allowed_footprint(heap->msp);
1420    if (softLimited(hs)) {
1421        LOGD_HEAP("GC old usage %zd.%zd%%; now "
1422                "%zd.%03zdMB used / %zd.%03zdMB soft max "
1423                "(%zd.%03zdMB over, "
1424                "%zd.%03zdMB ext, "
1425                "%zd.%03zdMB real max)\n",
1426                FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
1427                FRACTIONAL_MB(currentHeapUsed),
1428                FRACTIONAL_MB(hs->softLimit),
1429                FRACTIONAL_MB(overhead),
1430                FRACTIONAL_MB(hs->externalBytesAllocated),
1431                FRACTIONAL_MB(newHeapMax));
1432    } else {
1433        LOGD_HEAP("GC old usage %zd.%zd%%; now "
1434                "%zd.%03zdMB used / %zd.%03zdMB real max "
1435                "(%zd.%03zdMB over, "
1436                "%zd.%03zdMB ext)\n",
1437                FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
1438                FRACTIONAL_MB(currentHeapUsed),
1439                FRACTIONAL_MB(newHeapMax),
1440                FRACTIONAL_MB(overhead),
1441                FRACTIONAL_MB(hs->externalBytesAllocated));
1442    }
1443}
1444
1445/*
1446 * Return free pages to the system.
1447 * TODO: move this somewhere else, especially the native heap part.
1448 */
1449
1450static void releasePagesInRange(void *start, void *end, void *nbytes)
1451{
1452    /* Linux requires that the madvise() start address is page-aligned.
1453    * We also align the end address.
1454    */
1455    start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1456    end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1457    if (start < end) {
1458        size_t length = (char *)end - (char *)start;
1459        madvise(start, length, MADV_DONTNEED);
1460        *(size_t *)nbytes += length;
1461    }
1462}
1463
1464/*
1465 * Return unused memory to the system if possible.
1466 */
1467void
1468dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
1469{
1470    HeapSource *hs = gHs;
1471    size_t nativeBytes, heapBytes;
1472    size_t i;
1473
1474    HS_BOILERPLATE();
1475
1476    assert(arrayLen >= hs->numHeaps);
1477
1478    heapBytes = 0;
1479    for (i = 0; i < hs->numHeaps; i++) {
1480        Heap *heap = &hs->heaps[i];
1481
1482        /* Return the wilderness chunk to the system.
1483         */
1484        mspace_trim(heap->msp, 0);
1485
1486        /* Return any whole free pages to the system.
1487         */
1488        bytesTrimmed[i] = 0;
1489        mspace_walk_free_pages(heap->msp, releasePagesInRange,
1490                               &bytesTrimmed[i]);
1491        heapBytes += bytesTrimmed[i];
1492    }
1493
1494    /* Same for the native heap.
1495     */
1496    dlmalloc_trim(0);
1497    nativeBytes = 0;
1498    dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
1499
1500    LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes\n",
1501            heapBytes, nativeBytes, heapBytes + nativeBytes);
1502}
1503
1504/*
1505 * Walks over the heap source and passes every allocated and
1506 * free chunk to the callback.
1507 */
1508void
1509dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
1510                                      const void *userptr, size_t userlen,
1511                                      void *arg),
1512                  void *arg)
1513{
1514    HeapSource *hs = gHs;
1515    size_t i;
1516
1517    HS_BOILERPLATE();
1518
1519    /* Walk the heaps from oldest to newest.
1520     */
1521//TODO: do this in address order
1522    for (i = hs->numHeaps; i > 0; --i) {
1523        mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
1524    }
1525}
1526
1527/*
1528 * Gets the number of heaps available in the heap source.
1529 *
1530 * Caller must hold the heap lock, because gHs caches a field
1531 * in gDvm.gcHeap.
1532 */
1533size_t
1534dvmHeapSourceGetNumHeaps()
1535{
1536    HeapSource *hs = gHs;
1537
1538    HS_BOILERPLATE();
1539
1540    return hs->numHeaps;
1541}
1542
1543
1544/*
1545 * External allocation tracking
1546 *
1547 * In some situations, memory outside of the heap is tied to the
1548 * lifetime of objects in the heap.  Since that memory is kept alive
1549 * by heap objects, it should provide memory pressure that can influence
1550 * GCs.
1551 */
1552
1553/*
1554 * Returns true if the requested number of bytes can be allocated from
1555 * available storage.
1556 */
1557static bool externalBytesAvailable(const HeapSource *hs, size_t numBytes)
1558{
1559    const Heap *heap;
1560    size_t currentHeapSize, newHeapSize;
1561
1562    /* Make sure that this allocation is even possible.
1563     * Don't let the external size plus the actual heap size
1564     * go over the absolute max.  This essentially treats
1565     * external allocations as part of the active heap.
1566     *
1567     * Note that this will fail "mysteriously" if there's
1568     * a small softLimit but a large heap footprint.
1569     */
1570    heap = hs2heap(hs);
1571    currentHeapSize = mspace_max_allowed_footprint(heap->msp);
1572    newHeapSize = currentHeapSize + hs->externalBytesAllocated + numBytes;
1573    if (newHeapSize <= heap->absoluteMaxSize) {
1574        return true;
1575    }
1576    HSTRACE("externalBytesAvailable(): "
1577            "footprint %zu + extAlloc %zu + n %zu >= max %zu (space for %zu)\n",
1578            currentHeapSize, hs->externalBytesAllocated, numBytes,
1579            heap->absoluteMaxSize,
1580            heap->absoluteMaxSize -
1581                    (currentHeapSize + hs->externalBytesAllocated));
1582    return false;
1583}
1584
1585#define EXTERNAL_TARGET_UTILIZATION 820  // 80%
1586
1587/*
1588 * Tries to update the internal count of externally-allocated memory.
1589 * If there's enough room for that memory, returns true.  If not, returns
1590 * false and does not update the count.
1591 *
1592 * The caller must ensure externalBytesAvailable(hs, n) == true.
1593 */
1594static bool
1595externalAlloc(HeapSource *hs, size_t n, bool grow)
1596{
1597    assert(hs->externalLimit >= hs->externalBytesAllocated);
1598
1599    HSTRACE("externalAlloc(%zd%s)\n", n, grow ? ", grow" : "");
1600    assert(externalBytesAvailable(hs, n));  // The caller must ensure this.
1601
1602    /* External allocations have their own "free space" that they
1603     * can allocate from without causing a GC.
1604     */
1605    if (hs->externalBytesAllocated + n <= hs->externalLimit) {
1606        hs->externalBytesAllocated += n;
1607#if PROFILE_EXTERNAL_ALLOCATIONS
1608        if (gDvm.allocProf.enabled) {
1609            Thread* self = dvmThreadSelf();
1610            gDvm.allocProf.externalAllocCount++;
1611            gDvm.allocProf.externalAllocSize += n;
1612            if (self != NULL) {
1613                self->allocProf.externalAllocCount++;
1614                self->allocProf.externalAllocSize += n;
1615            }
1616        }
1617#endif
1618        return true;
1619    }
1620    if (!grow) {
1621        return false;
1622    }
1623
1624    /* GROW */
1625    hs->externalBytesAllocated += n;
1626    hs->externalLimit = getUtilizationTarget(
1627            hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
1628    HSTRACE("EXTERNAL grow limit to %zd\n", hs->externalLimit);
1629    return true;
1630}
1631
1632static void
1633gcForExternalAlloc(bool collectSoftReferences)
1634{
1635    if (gDvm.allocProf.enabled) {
1636        Thread* self = dvmThreadSelf();
1637        gDvm.allocProf.gcCount++;
1638        if (self != NULL) {
1639            self->allocProf.gcCount++;
1640        }
1641    }
1642    dvmCollectGarbageInternal(collectSoftReferences, GC_EXTERNAL_ALLOC);
1643}
1644
1645/*
1646 * Returns true if there is enough unused storage to perform an
1647 * external allocation of the specified size.  If there insufficient
1648 * free storage we try to releasing memory from external allocations
1649 * and trimming the heap.
1650 */
1651static bool externalAllocPossible(const HeapSource *hs, size_t n)
1652{
1653    size_t bytesTrimmed[HEAP_SOURCE_MAX_HEAP_COUNT];
1654
1655    /*
1656     * If there is sufficient space return immediately.
1657     */
1658    if (externalBytesAvailable(hs, n)) {
1659        return true;
1660    }
1661    /*
1662     * There is insufficient space.  Wait for the garbage collector to
1663     * become inactive before proceeding.
1664     */
1665    while (gDvm.gcHeap->gcRunning) {
1666        dvmWaitForConcurrentGcToComplete();
1667    }
1668    /*
1669     * The heap may have grown or become trimmed while we were
1670     * waiting.
1671     */
1672    if (externalBytesAvailable(hs, n)) {
1673        return true;
1674    }
1675    /*
1676     * Try a garbage collection that clears soft references.  This may
1677     * make trimming more effective.
1678     */
1679    gcForExternalAlloc(true);
1680    if (externalBytesAvailable(hs, n)) {
1681        return true;
1682    }
1683    /*
1684     * Try trimming the mspace to reclaim unused pages.
1685     */
1686    dvmHeapSourceTrim(bytesTrimmed, NELEM(bytesTrimmed));
1687    snapIdealFootprint();
1688    if (externalBytesAvailable(hs, n)) {
1689        return true;
1690    }
1691    /*
1692     * Nothing worked, return an error.
1693     */
1694    return false;
1695}
1696
1697/*
1698 * Updates the internal count of externally-allocated memory.  If there's
1699 * enough room for that memory, returns true.  If not, returns false and
1700 * does not update the count.
1701 *
1702 * May cause a GC as a side-effect.
1703 */
1704bool
1705dvmTrackExternalAllocation(size_t n)
1706{
1707    HeapSource *hs = gHs;
1708    bool ret = false;
1709
1710    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1711     * heap lock if we're going to look at it.
1712     */
1713    dvmLockHeap();
1714
1715    HS_BOILERPLATE();
1716    assert(hs->externalLimit >= hs->externalBytesAllocated);
1717
1718    /*
1719     * The externalAlloc calls require the externalAllocPossible
1720     * invariant to be established.
1721     */
1722    if (!externalAllocPossible(hs, n)) {
1723        LOGE_HEAP("%zd-byte external allocation "
1724                  "too large for this process.", n);
1725        goto out;
1726    }
1727
1728    /* Try "allocating" using the existing "free space".
1729     */
1730    HSTRACE("EXTERNAL alloc %zu (%zu < %zu)\n",
1731            n, hs->externalBytesAllocated, hs->externalLimit);
1732    if (externalAlloc(hs, n, false)) {
1733        ret = true;
1734        goto out;
1735    }
1736    /*
1737     * Wait until garbage collector is quiescent before proceeding.
1738     */
1739    while (gDvm.gcHeap->gcRunning) {
1740        dvmWaitForConcurrentGcToComplete();
1741    }
1742    /*
1743     * Re-establish the invariant if it was lost while we were
1744     * waiting.
1745     */
1746    if (!externalAllocPossible(hs, n)) {
1747        LOGE_HEAP("%zd-byte external allocation "
1748                  "too large for this process.", n);
1749        goto out;
1750    }
1751    /* The "allocation" failed.  Free up some space by doing
1752     * a full garbage collection.  This may grow the heap source
1753     * if the live set is sufficiently large.
1754     */
1755    HSTRACE("EXTERNAL alloc %zd: GC 1\n", n);
1756    gcForExternalAlloc(false);  // don't collect SoftReferences
1757    if (externalAlloc(hs, n, false)) {
1758        ret = true;
1759        goto out;
1760    }
1761
1762    /* Even that didn't work;  this is an exceptional state.
1763     * Try harder, growing the heap source if necessary.
1764     */
1765    HSTRACE("EXTERNAL alloc %zd: frag\n", n);
1766    ret = externalAlloc(hs, n, true);
1767    if (ret) {
1768        goto out;
1769    }
1770
1771    /* We couldn't even grow enough to satisfy the request.
1772     * Try one last GC, collecting SoftReferences this time.
1773     */
1774    HSTRACE("EXTERNAL alloc %zd: GC 2\n", n);
1775    gcForExternalAlloc(true);  // collect SoftReferences
1776    ret = externalAlloc(hs, n, true);
1777    if (!ret) {
1778        LOGE_HEAP("Out of external memory on a %zu-byte allocation.\n", n);
1779    }
1780
1781#if PROFILE_EXTERNAL_ALLOCATIONS
1782    if (gDvm.allocProf.enabled) {
1783        Thread* self = dvmThreadSelf();
1784        gDvm.allocProf.failedExternalAllocCount++;
1785        gDvm.allocProf.failedExternalAllocSize += n;
1786        if (self != NULL) {
1787            self->allocProf.failedExternalAllocCount++;
1788            self->allocProf.failedExternalAllocSize += n;
1789        }
1790    }
1791#endif
1792
1793out:
1794    dvmUnlockHeap();
1795
1796    return ret;
1797}
1798
1799/*
1800 * Reduces the internal count of externally-allocated memory.
1801 */
1802void
1803dvmTrackExternalFree(size_t n)
1804{
1805    HeapSource *hs = gHs;
1806    size_t newExternalLimit;
1807    size_t oldExternalBytesAllocated;
1808
1809    HSTRACE("EXTERNAL free %zu (%zu < %zu)\n",
1810            n, hs->externalBytesAllocated, hs->externalLimit);
1811
1812    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1813     * heap lock if we're going to look at it.
1814     */
1815    dvmLockHeap();
1816
1817    HS_BOILERPLATE();
1818    assert(hs->externalLimit >= hs->externalBytesAllocated);
1819
1820    oldExternalBytesAllocated = hs->externalBytesAllocated;
1821    if (n <= hs->externalBytesAllocated) {
1822        hs->externalBytesAllocated -= n;
1823    } else {
1824        n = hs->externalBytesAllocated;
1825        hs->externalBytesAllocated = 0;
1826    }
1827
1828#if PROFILE_EXTERNAL_ALLOCATIONS
1829    if (gDvm.allocProf.enabled) {
1830        Thread* self = dvmThreadSelf();
1831        gDvm.allocProf.externalFreeCount++;
1832        gDvm.allocProf.externalFreeSize += n;
1833        if (self != NULL) {
1834            self->allocProf.externalFreeCount++;
1835            self->allocProf.externalFreeSize += n;
1836        }
1837    }
1838#endif
1839
1840    /* Shrink as quickly as we can.
1841     */
1842    newExternalLimit = getUtilizationTarget(
1843            hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
1844    if (newExternalLimit < oldExternalBytesAllocated) {
1845        /* Make sure that the remaining free space is at least
1846         * big enough to allocate something of the size that was
1847         * just freed.  This makes it more likely that
1848         *     externalFree(N); externalAlloc(N);
1849         * will work without causing a GC.
1850         */
1851        HSTRACE("EXTERNAL free preserved %zu extra free bytes\n",
1852                oldExternalBytesAllocated - newExternalLimit);
1853        newExternalLimit = oldExternalBytesAllocated;
1854    }
1855    if (newExternalLimit < hs->externalLimit) {
1856        hs->externalLimit = newExternalLimit;
1857    }
1858
1859    dvmUnlockHeap();
1860}
1861
1862/*
1863 * Returns the number of externally-allocated bytes being tracked by
1864 * dvmTrackExternalAllocation/Free().
1865 */
1866size_t
1867dvmGetExternalBytesAllocated()
1868{
1869    const HeapSource *hs = gHs;
1870    size_t ret;
1871
1872    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1873     * heap lock if we're going to look at it.  We also need the
1874     * lock for the call to setIdealFootprint().
1875     */
1876    dvmLockHeap();
1877    HS_BOILERPLATE();
1878    ret = hs->externalBytesAllocated;
1879    dvmUnlockHeap();
1880
1881    return ret;
1882}
1883
1884void *dvmHeapSourceGetImmuneLimit(GcMode mode)
1885{
1886    if (mode == GC_PARTIAL) {
1887        return hs2heap(gHs)->base;
1888    } else {
1889        return NULL;
1890    }
1891}
1892