HeapSource.c revision c5f53e2c1107e8a62638038bbff163731908da34
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <cutils/ashmem.h>
18#include <cutils/mspace.h>
19#include <limits.h>     // for INT_MAX
20#include <sys/mman.h>
21#include <errno.h>
22
23#include "Dalvik.h"
24#include "alloc/Heap.h"
25#include "alloc/HeapInternal.h"
26#include "alloc/HeapSource.h"
27#include "alloc/HeapBitmap.h"
28
29// TODO: find a real header file for these.
30extern int dlmalloc_trim(size_t);
31extern void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
32
33static void snapIdealFootprint(void);
34static void setIdealFootprint(size_t max);
35
36#define ALIGN_UP_TO_PAGE_SIZE(p) \
37    (((size_t)(p) + (SYSTEM_PAGE_SIZE - 1)) & ~(SYSTEM_PAGE_SIZE - 1))
38#define ALIGN_DOWN_TO_PAGE_SIZE(p) \
39    ((size_t)(p) & ~(SYSTEM_PAGE_SIZE - 1))
40
41#define HEAP_UTILIZATION_MAX        1024
42#define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
43#define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
44#define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
45
46#define HS_BOILERPLATE() \
47    do { \
48        assert(gDvm.gcHeap != NULL); \
49        assert(gDvm.gcHeap->heapSource != NULL); \
50        assert(gHs == gDvm.gcHeap->heapSource); \
51    } while (0)
52
53#define DEBUG_HEAP_SOURCE 0
54#if DEBUG_HEAP_SOURCE
55#define HSTRACE(...)  LOG(LOG_INFO, LOG_TAG "-hs", __VA_ARGS__)
56#else
57#define HSTRACE(...)  /**/
58#endif
59
60/*
61=======================================================
62=======================================================
63=======================================================
64
65How will this be used?
66allocating/freeing: Heap.c just wants to say "alloc(n)" and get a ptr
67    - if allocating in large doesn't work, try allocating from small
68Heap.c will use HeapSource.h; HeapSource.c will do the right thing
69    between small and large
70    - some operations should be abstracted; put in a structure
71
72How do we manage the size trade-offs?
73- keep mspace max footprint clamped to actual footprint
74- if small-alloc returns null, adjust large vs. small ratio
75    - give small all available slack and retry
76    - success or fail, snap back to actual footprint and give rest to large
77
78managed as "small actual" + "large actual" + "delta to allowed total footprint"
79- when allocating from one source or the other, give the delta to the
80    active source, but snap back afterwards
81- that may not work so great for a gc heap, because small will always consume.
82    - but we need to use the memory, and the current max is the amount we
83      need to fill before a GC.
84
85Find a way to permanently steal pages from the middle of the heap
86    - segment tricks?
87
88Allocate String and char[] in a separate heap?
89
90Maybe avoid growing small heap, even if there's slack?  Look at
91live ratio of small heap after a gc; scale it based on that.
92
93=======================================================
94=======================================================
95=======================================================
96*/
97
98typedef struct {
99    /* The mspace to allocate from.
100     */
101    mspace msp;
102
103    /* The largest size that this heap is allowed to grow to.
104     */
105    size_t absoluteMaxSize;
106
107    /* Number of bytes allocated from this mspace for objects,
108     * including any overhead.  This value is NOT exact, and
109     * should only be used as an input for certain heuristics.
110     */
111    size_t bytesAllocated;
112
113    /* Number of objects currently allocated from this mspace.
114     */
115    size_t objectsAllocated;
116
117    /*
118     * The lowest address of this heap, inclusive.
119     */
120    char *base;
121
122    /*
123     * The highest address of this heap, exclusive.
124     */
125    char *limit;
126} Heap;
127
128struct HeapSource {
129    /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
130     */
131    size_t targetUtilization;
132
133    /* Requested minimum heap size, or zero if there is no minimum.
134     */
135    size_t minimumSize;
136
137    /* The starting heap size.
138     */
139    size_t startSize;
140
141    /* The largest that the heap source as a whole is allowed to grow.
142     */
143    size_t absoluteMaxSize;
144
145    /* The desired max size of the heap source as a whole.
146     */
147    size_t idealSize;
148
149    /* The maximum number of bytes allowed to be allocated from the
150     * active heap before a GC is forced.  This is used to "shrink" the
151     * heap in lieu of actual compaction.
152     */
153    size_t softLimit;
154
155    /* The heaps; heaps[0] is always the active heap,
156     * which new objects should be allocated from.
157     */
158    Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
159
160    /* The current number of heaps.
161     */
162    size_t numHeaps;
163
164    /* External allocation count.
165     */
166    size_t externalBytesAllocated;
167
168    /* The maximum number of external bytes that may be allocated.
169     */
170    size_t externalLimit;
171
172    /* True if zygote mode was active when the HeapSource was created.
173     */
174    bool sawZygote;
175
176    /*
177     * The base address of the virtual memory reservation.
178     */
179    char *heapBase;
180
181    /*
182     * The length in bytes of the virtual memory reservation.
183     */
184    size_t heapLength;
185
186    /*
187     * The live object bitmap.
188     */
189    HeapBitmap liveBits;
190
191    /*
192     * The mark bitmap.
193     */
194    HeapBitmap markBits;
195};
196
197#define hs2heap(hs_) (&((hs_)->heaps[0]))
198
199/*
200 * Returns true iff a soft limit is in effect for the active heap.
201 */
202static inline bool
203softLimited(const HeapSource *hs)
204{
205    /* softLimit will be either INT_MAX or the limit for the
206     * active mspace.  idealSize can be greater than softLimit
207     * if there is more than one heap.  If there is only one
208     * heap, a non-INT_MAX softLimit should always be the same
209     * as idealSize.
210     */
211    return hs->softLimit <= hs->idealSize;
212}
213
214/*
215 * Returns the current footprint of all heaps.  If includeActive
216 * is false, don't count the heap at index 0.
217 */
218static inline size_t
219oldHeapOverhead(const HeapSource *hs, bool includeActive)
220{
221    size_t footprint = 0;
222    size_t i;
223
224    if (includeActive) {
225        i = 0;
226    } else {
227        i = 1;
228    }
229    for (/* i = i */; i < hs->numHeaps; i++) {
230//TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
231        footprint += mspace_footprint(hs->heaps[i].msp);
232    }
233    return footprint;
234}
235
236/*
237 * Returns the heap that <ptr> could have come from, or NULL
238 * if it could not have come from any heap.
239 */
240static inline Heap *
241ptr2heap(const HeapSource *hs, const void *ptr)
242{
243    const size_t numHeaps = hs->numHeaps;
244    size_t i;
245
246//TODO: unroll this to HEAP_SOURCE_MAX_HEAP_COUNT
247    if (ptr != NULL) {
248        for (i = 0; i < numHeaps; i++) {
249            const Heap *const heap = &hs->heaps[i];
250
251            if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
252                return (Heap *)heap;
253            }
254        }
255    }
256    return NULL;
257}
258
259/*
260 * Functions to update heapSource->bytesAllocated when an object
261 * is allocated or freed.  mspace_usable_size() will give
262 * us a much more accurate picture of heap utilization than
263 * the requested byte sizes would.
264 *
265 * These aren't exact, and should not be treated as such.
266 */
267static inline void
268countAllocation(Heap *heap, const void *ptr, bool isObj)
269{
270    HeapSource *hs;
271
272    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
273
274    heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
275            HEAP_SOURCE_CHUNK_OVERHEAD;
276    if (isObj) {
277        heap->objectsAllocated++;
278        hs = gDvm.gcHeap->heapSource;
279        dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
280    }
281
282    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
283}
284
285static inline void
286countFree(Heap *heap, const void *ptr, bool isObj)
287{
288    HeapSource *hs;
289    size_t delta;
290
291    delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
292    assert(delta > 0);
293    if (delta < heap->bytesAllocated) {
294        heap->bytesAllocated -= delta;
295    } else {
296        heap->bytesAllocated = 0;
297    }
298    if (isObj) {
299        hs = gDvm.gcHeap->heapSource;
300        dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
301        if (heap->objectsAllocated > 0) {
302            heap->objectsAllocated--;
303        }
304    }
305}
306
307static HeapSource *gHs = NULL;
308
309static mspace
310createMspace(void *base, size_t startSize, size_t absoluteMaxSize)
311{
312    mspace msp;
313
314    /* Create an unlocked dlmalloc mspace to use as
315     * a small-object heap source.
316     *
317     * We start off reserving heapSizeStart/2 bytes but
318     * letting the heap grow to heapSizeStart.  This saves
319     * memory in the case where a process uses even less
320     * than the starting size.
321     */
322    LOGV_HEAP("Creating VM heap of size %u\n", startSize);
323    errno = 0;
324    msp = create_contiguous_mspace_with_base(startSize/2,
325            absoluteMaxSize, /*locked=*/false, base);
326    if (msp != NULL) {
327        /* Don't let the heap grow past the starting size without
328         * our intervention.
329         */
330        mspace_set_max_allowed_footprint(msp, startSize);
331    } else {
332        /* There's no guarantee that errno has meaning when the call
333         * fails, but it often does.
334         */
335        LOGE_HEAP("Can't create VM heap of size (%u,%u): %s\n",
336            startSize/2, absoluteMaxSize, strerror(errno));
337    }
338
339    return msp;
340}
341
342static bool
343addNewHeap(HeapSource *hs, mspace msp, size_t mspAbsoluteMaxSize)
344{
345    Heap heap;
346    void *base;
347
348    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
349        LOGE("Attempt to create too many heaps (%zd >= %zd)\n",
350                hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
351        dvmAbort();
352        return false;
353    }
354
355    memset(&heap, 0, sizeof(heap));
356
357    if (msp != NULL) {
358        heap.msp = msp;
359        heap.absoluteMaxSize = mspAbsoluteMaxSize;
360        heap.base = hs->heapBase;
361        heap.limit = hs->heapBase + heap.absoluteMaxSize;
362    } else {
363        size_t overhead;
364
365        overhead = ALIGN_UP_TO_PAGE_SIZE(oldHeapOverhead(hs, true));
366        if (overhead + HEAP_MIN_FREE >= hs->absoluteMaxSize) {
367            LOGE_HEAP("No room to create any more heaps "
368                    "(%zd overhead, %zd max)\n",
369                    overhead, hs->absoluteMaxSize);
370            return false;
371        }
372        hs->heaps[0].absoluteMaxSize = overhead;
373        heap.absoluteMaxSize = hs->absoluteMaxSize - overhead;
374        base = contiguous_mspace_sbrk0(hs->heaps[0].msp);
375        hs->heaps[0].limit = base;
376        base = (void *)ALIGN_UP_TO_PAGE_SIZE(base);
377        heap.msp = createMspace(base, HEAP_MIN_FREE, heap.absoluteMaxSize);
378        heap.base = base;
379        heap.limit = heap.base + heap.absoluteMaxSize;
380        if (heap.msp == NULL) {
381            return false;
382        }
383    }
384
385    /* Don't let the soon-to-be-old heap grow any further.
386     */
387    if (hs->numHeaps > 0) {
388        mspace msp = hs->heaps[0].msp;
389        mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
390    }
391
392    /* Put the new heap in the list, at heaps[0].
393     * Shift existing heaps down.
394     */
395    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
396    hs->heaps[0] = heap;
397    hs->numHeaps++;
398
399    return true;
400}
401
402/*
403 * Initializes the heap source; must be called before any other
404 * dvmHeapSource*() functions.  Returns a GcHeap structure
405 * allocated from the heap source.
406 */
407GcHeap *
408dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
409{
410    GcHeap *gcHeap;
411    HeapSource *hs;
412    mspace msp;
413    size_t length;
414    void *base;
415    int fd, ret;
416
417    assert(gHs == NULL);
418
419    if (startSize > absoluteMaxSize) {
420        LOGE("Bad heap parameters (start=%d, max=%d)\n",
421           startSize, absoluteMaxSize);
422        return NULL;
423    }
424
425    /*
426     * Allocate a contiguous region of virtual memory to subdivided
427     * among the heaps managed by the garbage collector.
428     */
429    length = ALIGN_UP_TO_PAGE_SIZE(absoluteMaxSize);
430    fd = ashmem_create_region("dalvik-heap", length);
431    if (fd == -1) {
432        return NULL;
433    }
434    base = mmap(NULL, length, PROT_NONE, MAP_PRIVATE, fd, 0);
435    if (base == MAP_FAILED) {
436        return NULL;
437    }
438    ret = close(fd);
439    if (ret == -1) {
440        goto fail;
441    }
442
443    /* Create an unlocked dlmalloc mspace to use as
444     * the small object heap source.
445     */
446    msp = createMspace(base, startSize, absoluteMaxSize);
447    if (msp == NULL) {
448        goto fail;
449    }
450
451    /* Allocate a descriptor from the heap we just created.
452     */
453    gcHeap = mspace_malloc(msp, sizeof(*gcHeap));
454    if (gcHeap == NULL) {
455        LOGE_HEAP("Can't allocate heap descriptor\n");
456        goto fail;
457    }
458    memset(gcHeap, 0, sizeof(*gcHeap));
459
460    hs = mspace_malloc(msp, sizeof(*hs));
461    if (hs == NULL) {
462        LOGE_HEAP("Can't allocate heap source\n");
463        goto fail;
464    }
465    memset(hs, 0, sizeof(*hs));
466
467    hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
468    hs->minimumSize = 0;
469    hs->startSize = startSize;
470    hs->absoluteMaxSize = absoluteMaxSize;
471    hs->idealSize = startSize;
472    hs->softLimit = INT_MAX;    // no soft limit at first
473    hs->numHeaps = 0;
474    hs->sawZygote = gDvm.zygote;
475    hs->heapBase = base;
476    hs->heapLength = length;
477    if (!addNewHeap(hs, msp, absoluteMaxSize)) {
478        LOGE_HEAP("Can't add initial heap\n");
479        goto fail;
480    }
481    if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
482        LOGE_HEAP("Can't create liveBits\n");
483        goto fail;
484    }
485    if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
486        LOGE_HEAP("Can't create markBits\n");
487        dvmHeapBitmapDelete(&hs->liveBits);
488        goto fail;
489    }
490
491    gcHeap->markContext.bitmap = &hs->markBits;
492    gcHeap->heapSource = hs;
493
494    countAllocation(hs2heap(hs), gcHeap, false);
495    countAllocation(hs2heap(hs), hs, false);
496
497    gHs = hs;
498    return gcHeap;
499
500fail:
501    munmap(base, length);
502    return NULL;
503}
504
505/*
506 * This is called while in zygote mode, right before we fork() for the
507 * first time.  We create a heap for all future zygote process allocations,
508 * in an attempt to avoid touching pages in the zygote heap.  (This would
509 * probably be unnecessary if we had a compacting GC -- the source of our
510 * troubles is small allocations filling in the gaps from larger ones.)
511 */
512bool
513dvmHeapSourceStartupBeforeFork()
514{
515    HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
516
517    HS_BOILERPLATE();
518
519    assert(gDvm.zygote);
520
521    if (!gDvm.newZygoteHeapAllocated) {
522        /* Create a new heap for post-fork zygote allocations.  We only
523         * try once, even if it fails.
524         */
525        LOGV("Splitting out new zygote heap\n");
526        gDvm.newZygoteHeapAllocated = true;
527        return addNewHeap(hs, NULL, 0);
528    }
529    return true;
530}
531
532/*
533 * Tears down the entire GcHeap structure and all of the substructures
534 * attached to it.  This call has the side effect of setting the given
535 * gcHeap pointer and gHs to NULL.
536 */
537void
538dvmHeapSourceShutdown(GcHeap **gcHeap)
539{
540    if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
541        HeapSource *hs;
542
543        hs = (*gcHeap)->heapSource;
544
545        assert((char *)*gcHeap >= hs->heapBase);
546        assert((char *)*gcHeap < hs->heapBase + hs->heapLength);
547
548        dvmHeapBitmapDelete(&hs->liveBits);
549        dvmHeapBitmapDelete(&hs->markBits);
550
551        munmap(hs->heapBase, hs->heapLength);
552        gHs = NULL;
553        *gcHeap = NULL;
554    }
555}
556
557/*
558 * Returns the requested value. If the per-heap stats are requested, fill
559 * them as well.
560 *
561 * Caller must hold the heap lock.
562 */
563size_t
564dvmHeapSourceGetValue(enum HeapSourceValueSpec spec, size_t perHeapStats[],
565                      size_t arrayLen)
566{
567    HeapSource *hs = gHs;
568    size_t value = 0;
569    size_t total = 0;
570    size_t i;
571
572    HS_BOILERPLATE();
573
574    switch (spec) {
575    case HS_EXTERNAL_BYTES_ALLOCATED:
576        return hs->externalBytesAllocated;
577    case HS_EXTERNAL_LIMIT:
578        return hs->externalLimit;
579    default:
580        // look at all heaps.
581        ;
582    }
583
584    assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
585    for (i = 0; i < hs->numHeaps; i++) {
586        Heap *const heap = &hs->heaps[i];
587
588        switch (spec) {
589        case HS_FOOTPRINT:
590            value = mspace_footprint(heap->msp);
591            break;
592        case HS_ALLOWED_FOOTPRINT:
593            value = mspace_max_allowed_footprint(heap->msp);
594            break;
595        case HS_BYTES_ALLOCATED:
596            value = heap->bytesAllocated;
597            break;
598        case HS_OBJECTS_ALLOCATED:
599            value = heap->objectsAllocated;
600            break;
601        default:
602            // quiet gcc
603            break;
604        }
605        if (perHeapStats) {
606            perHeapStats[i] = value;
607        }
608        total += value;
609    }
610    return total;
611}
612
613static void aliasBitmap(HeapBitmap *dst, HeapBitmap *src,
614                        uintptr_t base, uintptr_t max) {
615    size_t offset;
616
617    dst->base = base;
618    dst->max = max;
619    dst->bitsLen = HB_OFFSET_TO_BYTE_INDEX(max - base);
620    dst->allocLen = dst->bitsLen;
621    offset = base - src->base;
622    assert(HB_OFFSET_TO_MASK(offset) == 1 << 31);
623    dst->bits = &src->bits[HB_OFFSET_TO_INDEX(offset)];
624}
625
626/*
627 * Initializes a vector of object and mark bits to the object and mark
628 * bits of each heap.  The bits are aliased to the heapsource
629 * object and mark bitmaps.  This routine is used by the sweep code
630 * which needs to free each object in the correct heap.
631 */
632void dvmHeapSourceGetObjectBitmaps(HeapBitmap liveBits[], HeapBitmap markBits[],
633                                   size_t numHeaps)
634{
635    HeapSource *hs = gHs;
636    uintptr_t base, max;
637    size_t i;
638
639    HS_BOILERPLATE();
640
641    assert(numHeaps == hs->numHeaps);
642    for (i = 0; i < hs->numHeaps; ++i) {
643        base = (uintptr_t)hs->heaps[i].base;
644        max = (uintptr_t)hs->heaps[i].limit - 1;
645        aliasBitmap(&liveBits[i], &hs->liveBits, base, max);
646        aliasBitmap(&markBits[i], &hs->markBits, base, max);
647    }
648}
649
650/*
651 * Get the bitmap representing all live objects.
652 */
653HeapBitmap *dvmHeapSourceGetLiveBits()
654{
655    HS_BOILERPLATE();
656
657    return &gHs->liveBits;
658}
659
660void dvmHeapSourceSwapBitmaps(void)
661{
662    HeapBitmap tmp;
663    tmp = gHs->liveBits;
664    gHs->liveBits = gHs->markBits;
665    gHs->markBits = tmp;
666    dvmHeapBitmapZero(&gHs->markBits);
667}
668
669void dvmMarkImmuneObjects(const char *immuneLimit)
670{
671    char *dst, *src;
672    size_t i, index, length;
673
674    /*
675     * Copy the contents of the live bit vector for immune object
676     * range into the mark bit vector.
677     */
678    /* The only values generated by dvmHeapSourceGetImmuneLimit() */
679    assert(immuneLimit == gHs->heaps[0].base ||
680           immuneLimit == NULL);
681    assert(gHs->liveBits.base == gHs->markBits.base);
682    assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
683    /* heap[0] is never immune */
684    assert(gHs->heaps[0].base >= immuneLimit);
685    assert(gHs->heaps[0].limit > immuneLimit);
686
687    for (i = 1; i < gHs->numHeaps; ++i) {
688        if (gHs->heaps[i].base < immuneLimit) {
689            assert(gHs->heaps[i].limit <= immuneLimit);
690            LOGV("Copying markBits for immune heap %d", i);
691            /* Compute the number of words to copy in the bitmap. */
692            index = HB_OFFSET_TO_INDEX(
693                (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
694            /* Compute the starting offset in the live and mark bits. */
695            src = (char *)(gHs->liveBits.bits + index);
696            dst = (char *)(gHs->markBits.bits + index);
697            /* Compute the number of bytes of the live bitmap to copy. */
698            length = HB_OFFSET_TO_BYTE_INDEX(
699                gHs->heaps[i].limit - gHs->heaps[i].base);
700            /* Do the copy. */
701            memcpy(dst, src, length);
702            /* Make sure max points to the address of the highest set bit. */
703            if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
704                gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
705            }
706        }
707    }
708}
709
710/*
711 * Allocates <n> bytes of zeroed data.
712 */
713void *
714dvmHeapSourceAlloc(size_t n)
715{
716    HeapSource *hs = gHs;
717    Heap *heap;
718    void *ptr;
719
720    HS_BOILERPLATE();
721    heap = hs2heap(hs);
722
723    if (heap->bytesAllocated + n <= hs->softLimit) {
724// TODO: allocate large blocks (>64k?) as separate mmap regions so that
725//       they don't increase the high-water mark when they're freed.
726// TODO: zero out large objects using madvise
727        ptr = mspace_calloc(heap->msp, 1, n);
728        if (ptr != NULL) {
729            countAllocation(heap, ptr, true);
730        }
731    } else {
732        /* This allocation would push us over the soft limit;
733         * act as if the heap is full.
734         */
735        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation\n",
736                FRACTIONAL_MB(hs->softLimit), n);
737        ptr = NULL;
738    }
739    return ptr;
740}
741
742/* Remove any hard limits, try to allocate, and shrink back down.
743 * Last resort when trying to allocate an object.
744 */
745static void *
746heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
747{
748    void *ptr;
749    size_t max;
750
751    /* Grow as much as possible, but don't let the real footprint
752     * plus external allocations go over the absolute max.
753     */
754    max = heap->absoluteMaxSize;
755    if (max > hs->externalBytesAllocated) {
756        max -= hs->externalBytesAllocated;
757
758        mspace_set_max_allowed_footprint(heap->msp, max);
759        ptr = dvmHeapSourceAlloc(n);
760
761        /* Shrink back down as small as possible.  Our caller may
762         * readjust max_allowed to a more appropriate value.
763         */
764        mspace_set_max_allowed_footprint(heap->msp,
765                mspace_footprint(heap->msp));
766    } else {
767        ptr = NULL;
768    }
769
770    return ptr;
771}
772
773/*
774 * Allocates <n> bytes of zeroed data, growing as much as possible
775 * if necessary.
776 */
777void *
778dvmHeapSourceAllocAndGrow(size_t n)
779{
780    HeapSource *hs = gHs;
781    Heap *heap;
782    void *ptr;
783    size_t oldIdealSize;
784
785    HS_BOILERPLATE();
786    heap = hs2heap(hs);
787
788    ptr = dvmHeapSourceAlloc(n);
789    if (ptr != NULL) {
790        return ptr;
791    }
792
793    oldIdealSize = hs->idealSize;
794    if (softLimited(hs)) {
795        /* We're soft-limited.  Try removing the soft limit to
796         * see if we can allocate without actually growing.
797         */
798        hs->softLimit = INT_MAX;
799        ptr = dvmHeapSourceAlloc(n);
800        if (ptr != NULL) {
801            /* Removing the soft limit worked;  fix things up to
802             * reflect the new effective ideal size.
803             */
804            snapIdealFootprint();
805            return ptr;
806        }
807        // softLimit intentionally left at INT_MAX.
808    }
809
810    /* We're not soft-limited.  Grow the heap to satisfy the request.
811     * If this call fails, no footprints will have changed.
812     */
813    ptr = heapAllocAndGrow(hs, heap, n);
814    if (ptr != NULL) {
815        /* The allocation succeeded.  Fix up the ideal size to
816         * reflect any footprint modifications that had to happen.
817         */
818        snapIdealFootprint();
819    } else {
820        /* We just couldn't do it.  Restore the original ideal size,
821         * fixing up softLimit if necessary.
822         */
823        setIdealFootprint(oldIdealSize);
824    }
825    return ptr;
826}
827
828/*
829 * Frees the memory pointed to by <ptr>, which may be NULL.
830 */
831void
832dvmHeapSourceFree(void *ptr)
833{
834    Heap *heap;
835
836    HS_BOILERPLATE();
837
838    heap = ptr2heap(gHs, ptr);
839    if (heap != NULL) {
840        countFree(heap, ptr, true);
841        /* Only free objects that are in the active heap.
842         * Touching old heaps would pull pages into this process.
843         */
844        if (heap == gHs->heaps) {
845            mspace_free(heap->msp, ptr);
846        }
847    }
848}
849
850/*
851 * Frees the first numPtrs objects in the ptrs list. The list must
852 * contain addresses all in the same mspace, and must be in increasing
853 * order. This implies that there are no duplicates, and no entries
854 * are NULL.
855 */
856void
857dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
858{
859    Heap *heap;
860
861    HS_BOILERPLATE();
862
863    if (numPtrs == 0) {
864        return;
865    }
866
867    assert(ptrs != NULL);
868    assert(*ptrs != NULL);
869    heap = ptr2heap(gHs, *ptrs);
870    if (heap != NULL) {
871        mspace *msp = heap->msp;
872        // Calling mspace_free on shared heaps disrupts sharing too
873        // much. For heap[0] -- the 'active heap' -- we call
874        // mspace_free, but on the other heaps we only do some
875        // accounting.
876        if (heap == gHs->heaps) {
877            // mspace_merge_objects takes two allocated objects, and
878            // if the second immediately follows the first, will merge
879            // them, returning a larger object occupying the same
880            // memory. This is a local operation, and doesn't require
881            // dlmalloc to manipulate any freelists. It's pretty
882            // inexpensive compared to free().
883
884            // ptrs is an array of objects all in memory order, and if
885            // client code has been allocating lots of short-lived
886            // objects, this is likely to contain runs of objects all
887            // now garbage, and thus highly amenable to this optimization.
888
889            // Unroll the 0th iteration around the loop below,
890            // countFree ptrs[0] and initializing merged.
891            assert(ptrs[0] != NULL);
892            assert(ptr2heap(gHs, ptrs[0]) == heap);
893            countFree(heap, ptrs[0], true);
894            void *merged = ptrs[0];
895
896            size_t i;
897            for (i = 1; i < numPtrs; i++) {
898                assert(merged != NULL);
899                assert(ptrs[i] != NULL);
900                assert((intptr_t)merged < (intptr_t)ptrs[i]);
901                assert(ptr2heap(gHs, ptrs[i]) == heap);
902                countFree(heap, ptrs[i], true);
903                // Try to merge. If it works, merged now includes the
904                // memory of ptrs[i]. If it doesn't, free merged, and
905                // see if ptrs[i] starts a new run of adjacent
906                // objects to merge.
907                if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
908                    mspace_free(msp, merged);
909                    merged = ptrs[i];
910                }
911            }
912            assert(merged != NULL);
913            mspace_free(msp, merged);
914        } else {
915            // This is not an 'active heap'. Only do the accounting.
916            size_t i;
917            for (i = 0; i < numPtrs; i++) {
918                assert(ptrs[i] != NULL);
919                assert(ptr2heap(gHs, ptrs[i]) == heap);
920                countFree(heap, ptrs[i], true);
921            }
922        }
923    }
924}
925
926/*
927 * Returns true iff <ptr> was allocated from the heap source.
928 */
929bool
930dvmHeapSourceContains(const void *ptr)
931{
932    HS_BOILERPLATE();
933
934    if (dvmHeapBitmapCoversAddress(&gHs->liveBits, ptr)) {
935        return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
936    }
937    return false;
938}
939
940/*
941 * Returns the value of the requested flag.
942 */
943bool
944dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
945{
946    if (ptr == NULL) {
947        return false;
948    }
949
950    if (flag == HS_CONTAINS) {
951        return dvmHeapSourceContains(ptr);
952    } else if (flag == HS_ALLOCATED_IN_ZYGOTE) {
953        HeapSource *hs = gHs;
954
955        HS_BOILERPLATE();
956
957        if (hs->sawZygote) {
958            Heap *heap;
959
960            heap = ptr2heap(hs, ptr);
961            if (heap != NULL) {
962                /* If the object is not in the active heap, we assume that
963                 * it was allocated as part of zygote.
964                 */
965                return heap != hs->heaps;
966            }
967        }
968        /* The pointer is outside of any known heap, or we are not
969         * running in zygote mode.
970         */
971        return false;
972    }
973
974    return false;
975}
976
977/*
978 * Returns the number of usable bytes in an allocated chunk; the size
979 * may be larger than the size passed to dvmHeapSourceAlloc().
980 */
981size_t
982dvmHeapSourceChunkSize(const void *ptr)
983{
984    Heap *heap;
985
986    HS_BOILERPLATE();
987
988    heap = ptr2heap(gHs, ptr);
989    if (heap != NULL) {
990        return mspace_usable_size(heap->msp, ptr);
991    }
992    return 0;
993}
994
995/*
996 * Returns the number of bytes that the heap source has allocated
997 * from the system using sbrk/mmap, etc.
998 *
999 * Caller must hold the heap lock.
1000 */
1001size_t
1002dvmHeapSourceFootprint()
1003{
1004    HS_BOILERPLATE();
1005
1006//TODO: include size of bitmaps?
1007    return oldHeapOverhead(gHs, true);
1008}
1009
1010/*
1011 * Return the real bytes used by old heaps and external memory
1012 * plus the soft usage of the current heap.  When a soft limit
1013 * is in effect, this is effectively what it's compared against
1014 * (though, in practice, it only looks at the current heap).
1015 */
1016static size_t
1017getSoftFootprint(bool includeActive)
1018{
1019    HeapSource *hs = gHs;
1020    size_t ret;
1021
1022    HS_BOILERPLATE();
1023
1024    ret = oldHeapOverhead(hs, false) + hs->externalBytesAllocated;
1025    if (includeActive) {
1026        ret += hs->heaps[0].bytesAllocated;
1027    }
1028
1029    return ret;
1030}
1031
1032/*
1033 * Gets the maximum number of bytes that the heap source is allowed
1034 * to allocate from the system.
1035 */
1036size_t
1037dvmHeapSourceGetIdealFootprint()
1038{
1039    HeapSource *hs = gHs;
1040
1041    HS_BOILERPLATE();
1042
1043    return hs->idealSize;
1044}
1045
1046/*
1047 * Sets the soft limit, handling any necessary changes to the allowed
1048 * footprint of the active heap.
1049 */
1050static void
1051setSoftLimit(HeapSource *hs, size_t softLimit)
1052{
1053    /* Compare against the actual footprint, rather than the
1054     * max_allowed, because the heap may not have grown all the
1055     * way to the allowed size yet.
1056     */
1057    mspace msp = hs->heaps[0].msp;
1058    size_t currentHeapSize = mspace_footprint(msp);
1059    if (softLimit < currentHeapSize) {
1060        /* Don't let the heap grow any more, and impose a soft limit.
1061         */
1062        mspace_set_max_allowed_footprint(msp, currentHeapSize);
1063        hs->softLimit = softLimit;
1064    } else {
1065        /* Let the heap grow to the requested max, and remove any
1066         * soft limit, if set.
1067         */
1068        mspace_set_max_allowed_footprint(msp, softLimit);
1069        hs->softLimit = INT_MAX;
1070    }
1071}
1072
1073/*
1074 * Sets the maximum number of bytes that the heap source is allowed
1075 * to allocate from the system.  Clamps to the appropriate maximum
1076 * value.
1077 */
1078static void
1079setIdealFootprint(size_t max)
1080{
1081    HeapSource *hs = gHs;
1082#if DEBUG_HEAP_SOURCE
1083    HeapSource oldHs = *hs;
1084    mspace msp = hs->heaps[0].msp;
1085    size_t oldAllowedFootprint =
1086            mspace_max_allowed_footprint(msp);
1087#endif
1088
1089    HS_BOILERPLATE();
1090
1091    if (max > hs->absoluteMaxSize) {
1092        LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB\n",
1093                FRACTIONAL_MB(max),
1094                FRACTIONAL_MB(hs->absoluteMaxSize));
1095        max = hs->absoluteMaxSize;
1096    } else if (max < hs->minimumSize) {
1097        max = hs->minimumSize;
1098    }
1099
1100    /* Convert max into a size that applies to the active heap.
1101     * Old heaps and external allocations will count against the ideal size.
1102     */
1103    size_t overhead = getSoftFootprint(false);
1104    size_t activeMax;
1105    if (overhead < max) {
1106        activeMax = max - overhead;
1107    } else {
1108        activeMax = 0;
1109    }
1110
1111    setSoftLimit(hs, activeMax);
1112    hs->idealSize = max;
1113
1114    HSTRACE("IDEAL %zd->%zd (%d), soft %zd->%zd (%d), allowed %zd->%zd (%d), "
1115            "ext %zd\n",
1116            oldHs.idealSize, hs->idealSize, hs->idealSize - oldHs.idealSize,
1117            oldHs.softLimit, hs->softLimit, hs->softLimit - oldHs.softLimit,
1118            oldAllowedFootprint, mspace_max_allowed_footprint(msp),
1119            mspace_max_allowed_footprint(msp) - oldAllowedFootprint,
1120            hs->externalBytesAllocated);
1121
1122}
1123
1124/*
1125 * Make the ideal footprint equal to the current footprint.
1126 */
1127static void
1128snapIdealFootprint()
1129{
1130    HS_BOILERPLATE();
1131
1132    setIdealFootprint(getSoftFootprint(true));
1133}
1134
1135/*
1136 * Gets the current ideal heap utilization, represented as a number
1137 * between zero and one.
1138 */
1139float dvmGetTargetHeapUtilization()
1140{
1141    HeapSource *hs = gHs;
1142
1143    HS_BOILERPLATE();
1144
1145    return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1146}
1147
1148/*
1149 * Sets the new ideal heap utilization, represented as a number
1150 * between zero and one.
1151 */
1152void dvmSetTargetHeapUtilization(float newTarget)
1153{
1154    HeapSource *hs = gHs;
1155
1156    HS_BOILERPLATE();
1157
1158    /* Clamp it to a reasonable range.
1159     */
1160    // TODO: This may need some tuning.
1161    if (newTarget < 0.2) {
1162        newTarget = 0.2;
1163    } else if (newTarget > 0.8) {
1164        newTarget = 0.8;
1165    }
1166
1167    hs->targetUtilization =
1168            (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1169    LOGV("Set heap target utilization to %zd/%d (%f)\n",
1170            hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1171}
1172
1173/*
1174 * If set is true, sets the new minimum heap size to size; always
1175 * returns the current (or previous) size.  If size is negative,
1176 * removes the current minimum constraint (if present).
1177 */
1178size_t
1179dvmMinimumHeapSize(size_t size, bool set)
1180{
1181    HeapSource *hs = gHs;
1182    size_t oldMinimumSize;
1183
1184    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1185     * heap lock if we're going to look at it.  We also need the
1186     * lock for the call to setIdealFootprint().
1187     */
1188    dvmLockHeap();
1189
1190    HS_BOILERPLATE();
1191
1192    oldMinimumSize = hs->minimumSize;
1193
1194    if (set) {
1195        /* Don't worry about external allocations right now.
1196         * setIdealFootprint() will take them into account when
1197         * minimumSize is used, and it's better to hold onto the
1198         * intended minimumSize than to clamp it arbitrarily based
1199         * on the current allocations.
1200         */
1201        if (size > hs->absoluteMaxSize) {
1202            size = hs->absoluteMaxSize;
1203        }
1204        hs->minimumSize = size;
1205        if (size > hs->idealSize) {
1206            /* Force a snap to the minimum value, which we just set
1207             * and which setIdealFootprint() will take into consideration.
1208             */
1209            setIdealFootprint(hs->idealSize);
1210        }
1211        /* Otherwise we'll just keep it in mind the next time
1212         * setIdealFootprint() is called.
1213         */
1214    }
1215
1216    dvmUnlockHeap();
1217
1218    return oldMinimumSize;
1219}
1220
1221/*
1222 * Given the size of a live set, returns the ideal heap size given
1223 * the current target utilization and MIN/MAX values.
1224 *
1225 * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
1226 */
1227static size_t
1228getUtilizationTarget(size_t liveSize, size_t targetUtilization)
1229{
1230    size_t targetSize;
1231
1232    /* Use the current target utilization ratio to determine the
1233     * ideal heap size based on the size of the live set.
1234     */
1235    targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
1236
1237    /* Cap the amount of free space, though, so we don't end up
1238     * with, e.g., 8MB of free space when the live set size hits 8MB.
1239     */
1240    if (targetSize > liveSize + HEAP_IDEAL_FREE) {
1241        targetSize = liveSize + HEAP_IDEAL_FREE;
1242    } else if (targetSize < liveSize + HEAP_MIN_FREE) {
1243        targetSize = liveSize + HEAP_MIN_FREE;
1244    }
1245    return targetSize;
1246}
1247
1248/*
1249 * Given the current contents of the active heap, increase the allowed
1250 * heap footprint to match the target utilization ratio.  This
1251 * should only be called immediately after a full mark/sweep.
1252 */
1253void dvmHeapSourceGrowForUtilization()
1254{
1255    HeapSource *hs = gHs;
1256    Heap *heap;
1257    size_t targetHeapSize;
1258    size_t currentHeapUsed;
1259    size_t oldIdealSize;
1260    size_t newHeapMax;
1261    size_t overhead;
1262
1263    HS_BOILERPLATE();
1264    heap = hs2heap(hs);
1265
1266    /* Use the current target utilization ratio to determine the
1267     * ideal heap size based on the size of the live set.
1268     * Note that only the active heap plays any part in this.
1269     *
1270     * Avoid letting the old heaps influence the target free size,
1271     * because they may be full of objects that aren't actually
1272     * in the working set.  Just look at the allocated size of
1273     * the current heap.
1274     */
1275    currentHeapUsed = heap->bytesAllocated;
1276#define LET_EXTERNAL_INFLUENCE_UTILIZATION 1
1277#if LET_EXTERNAL_INFLUENCE_UTILIZATION
1278    /* This is a hack to deal with the side-effects of moving
1279     * bitmap data out of the Dalvik heap.  Since the amount
1280     * of free space after a GC scales with the size of the
1281     * live set, many apps expected the large free space that
1282     * appeared along with megabytes' worth of bitmaps.  When
1283     * the bitmaps were removed, the free size shrank significantly,
1284     * and apps started GCing constantly.  This makes it so the
1285     * post-GC free space is the same size it would have been
1286     * if the bitmaps were still in the Dalvik heap.
1287     */
1288    currentHeapUsed += hs->externalBytesAllocated;
1289#endif
1290    targetHeapSize =
1291            getUtilizationTarget(currentHeapUsed, hs->targetUtilization);
1292#if LET_EXTERNAL_INFLUENCE_UTILIZATION
1293    currentHeapUsed -= hs->externalBytesAllocated;
1294    targetHeapSize -= hs->externalBytesAllocated;
1295#endif
1296
1297    /* The ideal size includes the old heaps; add overhead so that
1298     * it can be immediately subtracted again in setIdealFootprint().
1299     * If the target heap size would exceed the max, setIdealFootprint()
1300     * will clamp it to a legal value.
1301     */
1302    overhead = getSoftFootprint(false);
1303    oldIdealSize = hs->idealSize;
1304    setIdealFootprint(targetHeapSize + overhead);
1305
1306    newHeapMax = mspace_max_allowed_footprint(heap->msp);
1307    if (softLimited(hs)) {
1308        LOGD_HEAP("GC old usage %zd.%zd%%; now "
1309                "%zd.%03zdMB used / %zd.%03zdMB soft max "
1310                "(%zd.%03zdMB over, "
1311                "%zd.%03zdMB ext, "
1312                "%zd.%03zdMB real max)\n",
1313                FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
1314                FRACTIONAL_MB(currentHeapUsed),
1315                FRACTIONAL_MB(hs->softLimit),
1316                FRACTIONAL_MB(overhead),
1317                FRACTIONAL_MB(hs->externalBytesAllocated),
1318                FRACTIONAL_MB(newHeapMax));
1319    } else {
1320        LOGD_HEAP("GC old usage %zd.%zd%%; now "
1321                "%zd.%03zdMB used / %zd.%03zdMB real max "
1322                "(%zd.%03zdMB over, "
1323                "%zd.%03zdMB ext)\n",
1324                FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
1325                FRACTIONAL_MB(currentHeapUsed),
1326                FRACTIONAL_MB(newHeapMax),
1327                FRACTIONAL_MB(overhead),
1328                FRACTIONAL_MB(hs->externalBytesAllocated));
1329    }
1330}
1331
1332/*
1333 * Return free pages to the system.
1334 * TODO: move this somewhere else, especially the native heap part.
1335 */
1336
1337static void releasePagesInRange(void *start, void *end, void *nbytes)
1338{
1339    /* Linux requires that the madvise() start address is page-aligned.
1340    * We also align the end address.
1341    */
1342    start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1343    end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1344    if (start < end) {
1345        size_t length = (char *)end - (char *)start;
1346        madvise(start, length, MADV_DONTNEED);
1347        *(size_t *)nbytes += length;
1348    }
1349}
1350
1351/*
1352 * Return unused memory to the system if possible.
1353 */
1354void
1355dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
1356{
1357    HeapSource *hs = gHs;
1358    size_t nativeBytes, heapBytes;
1359    size_t i;
1360
1361    HS_BOILERPLATE();
1362
1363    assert(arrayLen >= hs->numHeaps);
1364
1365    heapBytes = 0;
1366    for (i = 0; i < hs->numHeaps; i++) {
1367        Heap *heap = &hs->heaps[i];
1368
1369        /* Return the wilderness chunk to the system.
1370         */
1371        mspace_trim(heap->msp, 0);
1372
1373        /* Return any whole free pages to the system.
1374         */
1375        bytesTrimmed[i] = 0;
1376        mspace_walk_free_pages(heap->msp, releasePagesInRange,
1377                               &bytesTrimmed[i]);
1378        heapBytes += bytesTrimmed[i];
1379    }
1380
1381    /* Same for the native heap.
1382     */
1383    dlmalloc_trim(0);
1384    nativeBytes = 0;
1385    dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
1386
1387    LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes\n",
1388            heapBytes, nativeBytes, heapBytes + nativeBytes);
1389}
1390
1391/*
1392 * Walks over the heap source and passes every allocated and
1393 * free chunk to the callback.
1394 */
1395void
1396dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
1397                                      const void *userptr, size_t userlen,
1398                                      void *arg),
1399                  void *arg)
1400{
1401    HeapSource *hs = gHs;
1402    size_t i;
1403
1404    HS_BOILERPLATE();
1405
1406    /* Walk the heaps from oldest to newest.
1407     */
1408//TODO: do this in address order
1409    for (i = hs->numHeaps; i > 0; --i) {
1410        mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
1411    }
1412}
1413
1414/*
1415 * Gets the number of heaps available in the heap source.
1416 *
1417 * Caller must hold the heap lock, because gHs caches a field
1418 * in gDvm.gcHeap.
1419 */
1420size_t
1421dvmHeapSourceGetNumHeaps()
1422{
1423    HeapSource *hs = gHs;
1424
1425    HS_BOILERPLATE();
1426
1427    return hs->numHeaps;
1428}
1429
1430
1431/*
1432 * External allocation tracking
1433 *
1434 * In some situations, memory outside of the heap is tied to the
1435 * lifetime of objects in the heap.  Since that memory is kept alive
1436 * by heap objects, it should provide memory pressure that can influence
1437 * GCs.
1438 */
1439
1440
1441static bool
1442externalAllocPossible(const HeapSource *hs, size_t n)
1443{
1444    const Heap *heap;
1445    size_t currentHeapSize;
1446
1447    /* Make sure that this allocation is even possible.
1448     * Don't let the external size plus the actual heap size
1449     * go over the absolute max.  This essentially treats
1450     * external allocations as part of the active heap.
1451     *
1452     * Note that this will fail "mysteriously" if there's
1453     * a small softLimit but a large heap footprint.
1454     */
1455    heap = hs2heap(hs);
1456    currentHeapSize = mspace_max_allowed_footprint(heap->msp);
1457    if (currentHeapSize + hs->externalBytesAllocated + n <=
1458            heap->absoluteMaxSize)
1459    {
1460        return true;
1461    }
1462    HSTRACE("externalAllocPossible(): "
1463            "footprint %zu + extAlloc %zu + n %zu >= max %zu (space for %zu)\n",
1464            currentHeapSize, hs->externalBytesAllocated, n,
1465            heap->absoluteMaxSize,
1466            heap->absoluteMaxSize -
1467                    (currentHeapSize + hs->externalBytesAllocated));
1468    return false;
1469}
1470
1471#define EXTERNAL_TARGET_UTILIZATION 820  // 80%
1472
1473/*
1474 * Tries to update the internal count of externally-allocated memory.
1475 * If there's enough room for that memory, returns true.  If not, returns
1476 * false and does not update the count.
1477 *
1478 * The caller must ensure externalAllocPossible(hs, n) == true.
1479 */
1480static bool
1481externalAlloc(HeapSource *hs, size_t n, bool grow)
1482{
1483    assert(hs->externalLimit >= hs->externalBytesAllocated);
1484
1485    HSTRACE("externalAlloc(%zd%s)\n", n, grow ? ", grow" : "");
1486    assert(externalAllocPossible(hs, n));  // The caller must ensure this.
1487
1488    /* External allocations have their own "free space" that they
1489     * can allocate from without causing a GC.
1490     */
1491    if (hs->externalBytesAllocated + n <= hs->externalLimit) {
1492        hs->externalBytesAllocated += n;
1493#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
1494        if (gDvm.allocProf.enabled) {
1495            Thread* self = dvmThreadSelf();
1496            gDvm.allocProf.externalAllocCount++;
1497            gDvm.allocProf.externalAllocSize += n;
1498            if (self != NULL) {
1499                self->allocProf.externalAllocCount++;
1500                self->allocProf.externalAllocSize += n;
1501            }
1502        }
1503#endif
1504        return true;
1505    }
1506    if (!grow) {
1507        return false;
1508    }
1509
1510    /* GROW */
1511    hs->externalBytesAllocated += n;
1512    hs->externalLimit = getUtilizationTarget(
1513            hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
1514    HSTRACE("EXTERNAL grow limit to %zd\n", hs->externalLimit);
1515    return true;
1516}
1517
1518static void
1519gcForExternalAlloc(bool collectSoftReferences)
1520{
1521#ifdef WITH_PROFILER  // even if !PROFILE_EXTERNAL_ALLOCATIONS
1522    if (gDvm.allocProf.enabled) {
1523        Thread* self = dvmThreadSelf();
1524        gDvm.allocProf.gcCount++;
1525        if (self != NULL) {
1526            self->allocProf.gcCount++;
1527        }
1528    }
1529#endif
1530    dvmCollectGarbageInternal(collectSoftReferences, GC_EXTERNAL_ALLOC);
1531}
1532
1533/*
1534 * Updates the internal count of externally-allocated memory.  If there's
1535 * enough room for that memory, returns true.  If not, returns false and
1536 * does not update the count.
1537 *
1538 * May cause a GC as a side-effect.
1539 */
1540bool
1541dvmTrackExternalAllocation(size_t n)
1542{
1543    HeapSource *hs = gHs;
1544    bool ret = false;
1545
1546    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1547     * heap lock if we're going to look at it.
1548     */
1549    dvmLockHeap();
1550
1551    HS_BOILERPLATE();
1552    assert(hs->externalLimit >= hs->externalBytesAllocated);
1553
1554    if (!externalAllocPossible(hs, n)) {
1555        LOGE_HEAP("%zd-byte external allocation "
1556                "too large for this process.\n", n);
1557        goto out;
1558    }
1559
1560    /* Try "allocating" using the existing "free space".
1561     */
1562    HSTRACE("EXTERNAL alloc %zu (%zu < %zu)\n",
1563            n, hs->externalBytesAllocated, hs->externalLimit);
1564    if (externalAlloc(hs, n, false)) {
1565        ret = true;
1566        goto out;
1567    }
1568
1569    /* The "allocation" failed.  Free up some space by doing
1570     * a full garbage collection.  This may grow the heap source
1571     * if the live set is sufficiently large.
1572     */
1573    HSTRACE("EXTERNAL alloc %zd: GC 1\n", n);
1574    gcForExternalAlloc(false);  // don't collect SoftReferences
1575    if (externalAlloc(hs, n, false)) {
1576        ret = true;
1577        goto out;
1578    }
1579
1580    /* Even that didn't work;  this is an exceptional state.
1581     * Try harder, growing the heap source if necessary.
1582     */
1583    HSTRACE("EXTERNAL alloc %zd: frag\n", n);
1584    ret = externalAlloc(hs, n, true);
1585    dvmHeapSizeChanged();
1586    if (ret) {
1587        goto out;
1588    }
1589
1590    /* We couldn't even grow enough to satisfy the request.
1591     * Try one last GC, collecting SoftReferences this time.
1592     */
1593    HSTRACE("EXTERNAL alloc %zd: GC 2\n", n);
1594    gcForExternalAlloc(true);  // collect SoftReferences
1595    ret = externalAlloc(hs, n, true);
1596    dvmHeapSizeChanged();
1597    if (!ret) {
1598        LOGE_HEAP("Out of external memory on a %zu-byte allocation.\n", n);
1599    }
1600
1601#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
1602    if (gDvm.allocProf.enabled) {
1603        Thread* self = dvmThreadSelf();
1604        gDvm.allocProf.failedExternalAllocCount++;
1605        gDvm.allocProf.failedExternalAllocSize += n;
1606        if (self != NULL) {
1607            self->allocProf.failedExternalAllocCount++;
1608            self->allocProf.failedExternalAllocSize += n;
1609        }
1610    }
1611#endif
1612
1613out:
1614    dvmUnlockHeap();
1615
1616    return ret;
1617}
1618
1619/*
1620 * Reduces the internal count of externally-allocated memory.
1621 */
1622void
1623dvmTrackExternalFree(size_t n)
1624{
1625    HeapSource *hs = gHs;
1626    size_t newExternalLimit;
1627    size_t oldExternalBytesAllocated;
1628
1629    HSTRACE("EXTERNAL free %zu (%zu < %zu)\n",
1630            n, hs->externalBytesAllocated, hs->externalLimit);
1631
1632    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1633     * heap lock if we're going to look at it.
1634     */
1635    dvmLockHeap();
1636
1637    HS_BOILERPLATE();
1638    assert(hs->externalLimit >= hs->externalBytesAllocated);
1639
1640    oldExternalBytesAllocated = hs->externalBytesAllocated;
1641    if (n <= hs->externalBytesAllocated) {
1642        hs->externalBytesAllocated -= n;
1643    } else {
1644        n = hs->externalBytesAllocated;
1645        hs->externalBytesAllocated = 0;
1646    }
1647
1648#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
1649    if (gDvm.allocProf.enabled) {
1650        Thread* self = dvmThreadSelf();
1651        gDvm.allocProf.externalFreeCount++;
1652        gDvm.allocProf.externalFreeSize += n;
1653        if (self != NULL) {
1654            self->allocProf.externalFreeCount++;
1655            self->allocProf.externalFreeSize += n;
1656        }
1657    }
1658#endif
1659
1660    /* Shrink as quickly as we can.
1661     */
1662    newExternalLimit = getUtilizationTarget(
1663            hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
1664    if (newExternalLimit < oldExternalBytesAllocated) {
1665        /* Make sure that the remaining free space is at least
1666         * big enough to allocate something of the size that was
1667         * just freed.  This makes it more likely that
1668         *     externalFree(N); externalAlloc(N);
1669         * will work without causing a GC.
1670         */
1671        HSTRACE("EXTERNAL free preserved %zu extra free bytes\n",
1672                oldExternalBytesAllocated - newExternalLimit);
1673        newExternalLimit = oldExternalBytesAllocated;
1674    }
1675    if (newExternalLimit < hs->externalLimit) {
1676        hs->externalLimit = newExternalLimit;
1677    }
1678
1679    dvmUnlockHeap();
1680}
1681
1682/*
1683 * Returns the number of externally-allocated bytes being tracked by
1684 * dvmTrackExternalAllocation/Free().
1685 */
1686size_t
1687dvmGetExternalBytesAllocated()
1688{
1689    const HeapSource *hs = gHs;
1690    size_t ret;
1691
1692    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1693     * heap lock if we're going to look at it.  We also need the
1694     * lock for the call to setIdealFootprint().
1695     */
1696    dvmLockHeap();
1697    HS_BOILERPLATE();
1698    ret = hs->externalBytesAllocated;
1699    dvmUnlockHeap();
1700
1701    return ret;
1702}
1703
1704void *dvmHeapSourceGetImmuneLimit(GcMode mode)
1705{
1706    if (mode == GC_PARTIAL) {
1707        return hs2heap(gHs)->base;
1708    } else {
1709        return NULL;
1710    }
1711}
1712