HeapSource.c revision d25566d9278e6424e521f4b7148ac31480e60c5c
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <cutils/ashmem.h>
18#include <cutils/mspace.h>
19#include <limits.h>     // for INT_MAX
20#include <sys/mman.h>
21#include <errno.h>
22
23#include "Dalvik.h"
24#include "alloc/Heap.h"
25#include "alloc/HeapInternal.h"
26#include "alloc/HeapSource.h"
27#include "alloc/HeapBitmap.h"
28
29// TODO: find a real header file for these.
30extern int dlmalloc_trim(size_t);
31extern void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
32
33static void snapIdealFootprint(void);
34static void setIdealFootprint(size_t max);
35
36#define ALIGN_UP_TO_PAGE_SIZE(p) \
37    (((size_t)(p) + (SYSTEM_PAGE_SIZE - 1)) & ~(SYSTEM_PAGE_SIZE - 1))
38#define ALIGN_DOWN_TO_PAGE_SIZE(p) \
39    ((size_t)(p) & ~(SYSTEM_PAGE_SIZE - 1))
40
41#define HEAP_UTILIZATION_MAX        1024
42#define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
43#define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
44#define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
45
46#define HS_BOILERPLATE() \
47    do { \
48        assert(gDvm.gcHeap != NULL); \
49        assert(gDvm.gcHeap->heapSource != NULL); \
50        assert(gHs == gDvm.gcHeap->heapSource); \
51    } while (0)
52
53#define DEBUG_HEAP_SOURCE 0
54#if DEBUG_HEAP_SOURCE
55#define HSTRACE(...)  LOG(LOG_INFO, LOG_TAG "-hs", __VA_ARGS__)
56#else
57#define HSTRACE(...)  /**/
58#endif
59
60/*
61=======================================================
62=======================================================
63=======================================================
64
65How will this be used?
66allocating/freeing: Heap.c just wants to say "alloc(n)" and get a ptr
67    - if allocating in large doesn't work, try allocating from small
68Heap.c will use HeapSource.h; HeapSource.c will do the right thing
69    between small and large
70    - some operations should be abstracted; put in a structure
71
72How do we manage the size trade-offs?
73- keep mspace max footprint clamped to actual footprint
74- if small-alloc returns null, adjust large vs. small ratio
75    - give small all available slack and retry
76    - success or fail, snap back to actual footprint and give rest to large
77
78managed as "small actual" + "large actual" + "delta to allowed total footprint"
79- when allocating from one source or the other, give the delta to the
80    active source, but snap back afterwards
81- that may not work so great for a gc heap, because small will always consume.
82    - but we need to use the memory, and the current max is the amount we
83      need to fill before a GC.
84
85Find a way to permanently steal pages from the middle of the heap
86    - segment tricks?
87
88Allocate String and char[] in a separate heap?
89
90Maybe avoid growing small heap, even if there's slack?  Look at
91live ratio of small heap after a gc; scale it based on that.
92
93=======================================================
94=======================================================
95=======================================================
96*/
97
98typedef struct {
99    /* The mspace to allocate from.
100     */
101    mspace msp;
102
103    /* The largest size that this heap is allowed to grow to.
104     */
105    size_t absoluteMaxSize;
106
107    /* Number of bytes allocated from this mspace for objects,
108     * including any overhead.  This value is NOT exact, and
109     * should only be used as an input for certain heuristics.
110     */
111    size_t bytesAllocated;
112
113    /* Number of objects currently allocated from this mspace.
114     */
115    size_t objectsAllocated;
116
117    /*
118     * The lowest address of this heap, inclusive.
119     */
120    char *base;
121
122    /*
123     * The highest address of this heap, exclusive.
124     */
125    char *limit;
126} Heap;
127
128struct HeapSource {
129    /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
130     */
131    size_t targetUtilization;
132
133    /* Requested minimum heap size, or zero if there is no minimum.
134     */
135    size_t minimumSize;
136
137    /* The starting heap size.
138     */
139    size_t startSize;
140
141    /* The largest that the heap source as a whole is allowed to grow.
142     */
143    size_t absoluteMaxSize;
144
145    /* The desired max size of the heap source as a whole.
146     */
147    size_t idealSize;
148
149    /* The maximum number of bytes allowed to be allocated from the
150     * active heap before a GC is forced.  This is used to "shrink" the
151     * heap in lieu of actual compaction.
152     */
153    size_t softLimit;
154
155    /* The heaps; heaps[0] is always the active heap,
156     * which new objects should be allocated from.
157     */
158    Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
159
160    /* The current number of heaps.
161     */
162    size_t numHeaps;
163
164    /* External allocation count.
165     */
166    size_t externalBytesAllocated;
167
168    /* The maximum number of external bytes that may be allocated.
169     */
170    size_t externalLimit;
171
172    /* True if zygote mode was active when the HeapSource was created.
173     */
174    bool sawZygote;
175
176    /*
177     * The base address of the virtual memory reservation.
178     */
179    char *heapBase;
180
181    /*
182     * The length in bytes of the virtual memory reservation.
183     */
184    size_t heapLength;
185
186    /*
187     * The object (live, allocated) bitmap.
188     */
189    HeapBitmap objBits;
190
191    /*
192     * The mark bitmap.
193     */
194    HeapBitmap markBits;
195};
196
197#define hs2heap(hs_) (&((hs_)->heaps[0]))
198
199/*
200 * Returns true iff a soft limit is in effect for the active heap.
201 */
202static inline bool
203softLimited(const HeapSource *hs)
204{
205    /* softLimit will be either INT_MAX or the limit for the
206     * active mspace.  idealSize can be greater than softLimit
207     * if there is more than one heap.  If there is only one
208     * heap, a non-INT_MAX softLimit should always be the same
209     * as idealSize.
210     */
211    return hs->softLimit <= hs->idealSize;
212}
213
214/*
215 * Returns the current footprint of all heaps.  If includeActive
216 * is false, don't count the heap at index 0.
217 */
218static inline size_t
219oldHeapOverhead(const HeapSource *hs, bool includeActive)
220{
221    size_t footprint = 0;
222    size_t i;
223
224    if (includeActive) {
225        i = 0;
226    } else {
227        i = 1;
228    }
229    for (/* i = i */; i < hs->numHeaps; i++) {
230//TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
231        footprint += mspace_footprint(hs->heaps[i].msp);
232    }
233    return footprint;
234}
235
236/*
237 * Returns the heap that <ptr> could have come from, or NULL
238 * if it could not have come from any heap.
239 */
240static inline Heap *
241ptr2heap(const HeapSource *hs, const void *ptr)
242{
243    const size_t numHeaps = hs->numHeaps;
244    size_t i;
245
246//TODO: unroll this to HEAP_SOURCE_MAX_HEAP_COUNT
247    if (ptr != NULL) {
248        for (i = 0; i < numHeaps; i++) {
249            const Heap *const heap = &hs->heaps[i];
250
251            if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
252                return (Heap *)heap;
253            }
254        }
255    }
256    return NULL;
257}
258
259/*
260 * Functions to update heapSource->bytesAllocated when an object
261 * is allocated or freed.  mspace_usable_size() will give
262 * us a much more accurate picture of heap utilization than
263 * the requested byte sizes would.
264 *
265 * These aren't exact, and should not be treated as such.
266 */
267static inline void
268countAllocation(Heap *heap, const void *ptr, bool isObj)
269{
270    HeapSource *hs;
271
272    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
273
274    heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
275            HEAP_SOURCE_CHUNK_OVERHEAD;
276    if (isObj) {
277        heap->objectsAllocated++;
278        hs = gDvm.gcHeap->heapSource;
279        dvmHeapBitmapSetObjectBit(&hs->objBits, ptr);
280    }
281
282    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
283}
284
285static inline void
286countFree(Heap *heap, const void *ptr, bool isObj)
287{
288    HeapSource *hs;
289    size_t delta;
290
291    delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
292    assert(delta > 0);
293    if (delta < heap->bytesAllocated) {
294        heap->bytesAllocated -= delta;
295    } else {
296        heap->bytesAllocated = 0;
297    }
298    if (isObj) {
299        hs = gDvm.gcHeap->heapSource;
300        dvmHeapBitmapClearObjectBit(&hs->objBits, ptr);
301        if (heap->objectsAllocated > 0) {
302            heap->objectsAllocated--;
303        }
304    }
305}
306
307static HeapSource *gHs = NULL;
308
309static mspace
310createMspace(void *base, size_t startSize, size_t absoluteMaxSize)
311{
312    mspace msp;
313
314    /* Create an unlocked dlmalloc mspace to use as
315     * a small-object heap source.
316     *
317     * We start off reserving heapSizeStart/2 bytes but
318     * letting the heap grow to heapSizeStart.  This saves
319     * memory in the case where a process uses even less
320     * than the starting size.
321     */
322    LOGV_HEAP("Creating VM heap of size %u\n", startSize);
323    errno = 0;
324    msp = create_contiguous_mspace_with_base(startSize/2,
325            absoluteMaxSize, /*locked=*/false, base);
326    if (msp != NULL) {
327        /* Don't let the heap grow past the starting size without
328         * our intervention.
329         */
330        mspace_set_max_allowed_footprint(msp, startSize);
331    } else {
332        /* There's no guarantee that errno has meaning when the call
333         * fails, but it often does.
334         */
335        LOGE_HEAP("Can't create VM heap of size (%u,%u) (errno=%d)\n",
336            startSize/2, absoluteMaxSize, errno);
337    }
338
339    return msp;
340}
341
342static bool
343addNewHeap(HeapSource *hs, mspace msp, size_t mspAbsoluteMaxSize)
344{
345    Heap heap;
346    void *base;
347
348    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
349        LOGE("Attempt to create too many heaps (%zd >= %zd)\n",
350                hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
351        dvmAbort();
352        return false;
353    }
354
355    memset(&heap, 0, sizeof(heap));
356
357    if (msp != NULL) {
358        heap.msp = msp;
359        heap.absoluteMaxSize = mspAbsoluteMaxSize;
360        heap.base = hs->heapBase;
361        heap.limit = hs->heapBase + heap.absoluteMaxSize;
362    } else {
363        size_t overhead;
364
365        overhead = ALIGN_UP_TO_PAGE_SIZE(oldHeapOverhead(hs, true));
366        if (overhead + HEAP_MIN_FREE >= hs->absoluteMaxSize) {
367            LOGE_HEAP("No room to create any more heaps "
368                    "(%zd overhead, %zd max)\n",
369                    overhead, hs->absoluteMaxSize);
370            return false;
371        }
372        hs->heaps[0].absoluteMaxSize = overhead;
373        heap.absoluteMaxSize = hs->absoluteMaxSize - overhead;
374        base = contiguous_mspace_sbrk0(hs->heaps[0].msp);
375        hs->heaps[0].limit = base;
376        base = (void *)ALIGN_UP_TO_PAGE_SIZE(base);
377        heap.msp = createMspace(base, HEAP_MIN_FREE, heap.absoluteMaxSize);
378        heap.base = base;
379        heap.limit = heap.base + heap.absoluteMaxSize;
380        if (heap.msp == NULL) {
381            return false;
382        }
383    }
384
385    /* Don't let the soon-to-be-old heap grow any further.
386     */
387    if (hs->numHeaps > 0) {
388        mspace msp = hs->heaps[0].msp;
389        mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
390    }
391
392    /* Put the new heap in the list, at heaps[0].
393     * Shift existing heaps down.
394     */
395    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
396    hs->heaps[0] = heap;
397    hs->numHeaps++;
398
399    return true;
400}
401
402/*
403 * Initializes the heap source; must be called before any other
404 * dvmHeapSource*() functions.  Returns a GcHeap structure
405 * allocated from the heap source.
406 */
407GcHeap *
408dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
409{
410    GcHeap *gcHeap;
411    HeapSource *hs;
412    mspace msp;
413    size_t length;
414    void *base;
415    int fd, ret;
416
417    assert(gHs == NULL);
418
419    if (startSize > absoluteMaxSize) {
420        LOGE("Bad heap parameters (start=%d, max=%d)\n",
421           startSize, absoluteMaxSize);
422        return NULL;
423    }
424
425    /*
426     * Allocate a contiguous region of virtual memory to subdivided
427     * among the heaps managed by the garbage collector.
428     */
429    length = ALIGN_UP_TO_PAGE_SIZE(absoluteMaxSize);
430    fd = ashmem_create_region("the-java-heap", length);
431    if (fd == -1) {
432        return NULL;
433    }
434    base = mmap(NULL, length, PROT_NONE, MAP_PRIVATE, fd, 0);
435    if (base == MAP_FAILED) {
436        return NULL;
437    }
438    ret = close(fd);
439    if (ret == -1) {
440        goto fail;
441    }
442
443    /* Create an unlocked dlmalloc mspace to use as
444     * the small object heap source.
445     */
446    msp = createMspace(base, startSize, absoluteMaxSize);
447    if (msp == NULL) {
448        goto fail;
449    }
450
451    /* Allocate a descriptor from the heap we just created.
452     */
453    gcHeap = mspace_malloc(msp, sizeof(*gcHeap));
454    if (gcHeap == NULL) {
455        LOGE_HEAP("Can't allocate heap descriptor\n");
456        goto fail;
457    }
458    memset(gcHeap, 0, sizeof(*gcHeap));
459
460    hs = mspace_malloc(msp, sizeof(*hs));
461    if (hs == NULL) {
462        LOGE_HEAP("Can't allocate heap source\n");
463        goto fail;
464    }
465    memset(hs, 0, sizeof(*hs));
466
467    hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
468    hs->minimumSize = 0;
469    hs->startSize = startSize;
470    hs->absoluteMaxSize = absoluteMaxSize;
471    hs->idealSize = startSize;
472    hs->softLimit = INT_MAX;    // no soft limit at first
473    hs->numHeaps = 0;
474    hs->sawZygote = gDvm.zygote;
475    hs->heapBase = base;
476    hs->heapLength = length;
477    if (!addNewHeap(hs, msp, absoluteMaxSize)) {
478        LOGE_HEAP("Can't add initial heap\n");
479        goto fail;
480    }
481    if (!dvmHeapBitmapInit(&hs->objBits, base, length, "dalvik-bitmap-1")) {
482        LOGE_HEAP("Can't create objBits\n");
483        goto fail;
484    }
485    if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
486        LOGE_HEAP("Can't create markBits\n");
487        dvmHeapBitmapDelete(&hs->objBits);
488        goto fail;
489    }
490
491    gcHeap->markContext.bitmap = &hs->markBits;
492    gcHeap->heapSource = hs;
493
494    countAllocation(hs2heap(hs), gcHeap, false);
495    countAllocation(hs2heap(hs), hs, false);
496
497    gHs = hs;
498    return gcHeap;
499
500fail:
501    munmap(base, length);
502    return NULL;
503}
504
505/*
506 * This is called while in zygote mode, right before we fork() for the
507 * first time.  We create a heap for all future zygote process allocations,
508 * in an attempt to avoid touching pages in the zygote heap.  (This would
509 * probably be unnecessary if we had a compacting GC -- the source of our
510 * troubles is small allocations filling in the gaps from larger ones.)
511 */
512bool
513dvmHeapSourceStartupBeforeFork()
514{
515    HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
516
517    HS_BOILERPLATE();
518
519    assert(gDvm.zygote);
520
521    if (!gDvm.newZygoteHeapAllocated) {
522        /* Create a new heap for post-fork zygote allocations.  We only
523         * try once, even if it fails.
524         */
525        LOGV("Splitting out new zygote heap\n");
526        gDvm.newZygoteHeapAllocated = true;
527        return addNewHeap(hs, NULL, 0);
528    }
529    return true;
530}
531
532/*
533 * Tears down the entire GcHeap structure and all of the substructures
534 * attached to it.  This call has the side effect of setting the given
535 * gcHeap pointer and gHs to NULL.
536 */
537void
538dvmHeapSourceShutdown(GcHeap **gcHeap)
539{
540    if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
541        HeapSource *hs;
542        size_t i;
543
544        hs = (*gcHeap)->heapSource;
545
546        assert((char *)*gcHeap >= hs->heapBase);
547        assert((char *)*gcHeap < hs->heapBase + hs->heapLength);
548
549        dvmHeapBitmapDelete(&hs->objBits);
550        dvmHeapBitmapDelete(&hs->markBits);
551
552        munmap(hs->heapBase, hs->heapLength);
553        gHs = NULL;
554        *gcHeap = NULL;
555    }
556}
557
558/*
559 * Returns the requested value. If the per-heap stats are requested, fill
560 * them as well.
561 *
562 * Caller must hold the heap lock.
563 */
564size_t
565dvmHeapSourceGetValue(enum HeapSourceValueSpec spec, size_t perHeapStats[],
566                      size_t arrayLen)
567{
568    HeapSource *hs = gHs;
569    size_t value = 0;
570    size_t total = 0;
571    size_t i;
572
573    HS_BOILERPLATE();
574
575    switch (spec) {
576    case HS_EXTERNAL_BYTES_ALLOCATED:
577        return hs->externalBytesAllocated;
578    case HS_EXTERNAL_LIMIT:
579        return hs->externalLimit;
580    default:
581        // look at all heaps.
582        ;
583    }
584
585    assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
586    for (i = 0; i < hs->numHeaps; i++) {
587        Heap *const heap = &hs->heaps[i];
588
589        switch (spec) {
590        case HS_FOOTPRINT:
591            value = mspace_footprint(heap->msp);
592            break;
593        case HS_ALLOWED_FOOTPRINT:
594            value = mspace_max_allowed_footprint(heap->msp);
595            break;
596        case HS_BYTES_ALLOCATED:
597            value = heap->bytesAllocated;
598            break;
599        case HS_OBJECTS_ALLOCATED:
600            value = heap->objectsAllocated;
601            break;
602        default:
603            // quiet gcc
604            break;
605        }
606        if (perHeapStats) {
607            perHeapStats[i] = value;
608        }
609        total += value;
610    }
611    return total;
612}
613
614static void aliasBitmap(HeapBitmap *dst, HeapBitmap *src,
615                        uintptr_t base, uintptr_t max) {
616    size_t offset;
617
618    dst->base = base;
619    dst->max = max;
620    dst->bitsLen = HB_OFFSET_TO_BYTE_INDEX(max - base);
621    dst->allocLen = dst->bitsLen;
622    offset = base - src->base;
623    assert(HB_OFFSET_TO_MASK(offset) == 1 << 31);
624    dst->bits = &src->bits[HB_OFFSET_TO_INDEX(offset)];
625}
626
627/*
628 * Initializes a vector of object and mark bits to the object and mark
629 * bits of to each heap.  The bits are aliased to the heapsource
630 * object and mark bitmaps.  This routine is used by the sweep code
631 * which needs to free each object in the correct heap.
632 */
633void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
634                                   size_t numHeaps)
635{
636    HeapSource *hs = gHs;
637    uintptr_t base, max;
638    size_t i, offset;
639
640    HS_BOILERPLATE();
641
642    assert(numHeaps == hs->numHeaps);
643    for (i = 0; i < hs->numHeaps; ++i) {
644        base = (uintptr_t)hs->heaps[i].base;
645        max = (uintptr_t)hs->heaps[i].limit;
646        aliasBitmap(&objBits[i], &hs->objBits, base, max);
647        aliasBitmap(&markBits[i], &hs->markBits, base, max);
648    }
649}
650
651/*
652 * Get the bitmap representing all live objects.
653 */
654HeapBitmap *dvmHeapSourceGetLiveBits()
655{
656    HS_BOILERPLATE();
657
658    return &gHs->objBits;
659}
660
661void dvmHeapSourceSwapBitmaps(void)
662{
663    HeapBitmap tmp;
664    tmp = gHs->objBits;
665    gHs->objBits = gHs->markBits;
666    gHs->markBits = tmp;
667    dvmHeapBitmapZero(&gHs->markBits);
668}
669
670void dvmMarkImmuneObjects(void)
671{
672    char *dst, *src;
673    size_t i, offset, index, length;
674
675    /*
676     * Copy the contents of the live bit vector for immune object
677     * range into the mark bit vector.
678     */
679    assert(gHs->objBits.base == gHs->markBits.base);
680    assert(gHs->objBits.bitsLen == gHs->markBits.bitsLen);
681    for (i = 1; i < gHs->numHeaps; ++i) {
682        /* Compute the number of words to copy in the bitmap. */
683        index = HB_OFFSET_TO_INDEX((uintptr_t)gHs->heaps[i].base - gHs->objBits.base);
684        /* Compute the starting offset in the live and mark bits. */
685        src = (char *)(gHs->objBits.bits + index);
686        dst = (char *)(gHs->markBits.bits + index);
687        /* Compute the number of bytes of the live bitmap to copy. */
688        length = HB_OFFSET_TO_BYTE_INDEX(gHs->heaps[i].limit - gHs->heaps[i].base);
689        /* Do the copy. */
690        memcpy(dst, src, length);
691        /* Make sure max points to the address of the highest set bit. */
692        if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
693            gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
694        }
695    }
696}
697
698/*
699 * Allocates <n> bytes of zeroed data.
700 */
701void *
702dvmHeapSourceAlloc(size_t n)
703{
704    HeapSource *hs = gHs;
705    Heap *heap;
706    void *ptr;
707
708    HS_BOILERPLATE();
709    heap = hs2heap(hs);
710
711    if (heap->bytesAllocated + n <= hs->softLimit) {
712// TODO: allocate large blocks (>64k?) as separate mmap regions so that
713//       they don't increase the high-water mark when they're freed.
714// TODO: zero out large objects using madvise
715        ptr = mspace_calloc(heap->msp, 1, n);
716        if (ptr != NULL) {
717            countAllocation(heap, ptr, true);
718        }
719    } else {
720        /* This allocation would push us over the soft limit;
721         * act as if the heap is full.
722         */
723        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation\n",
724                FRACTIONAL_MB(hs->softLimit), n);
725        ptr = NULL;
726    }
727    return ptr;
728}
729
730/* Remove any hard limits, try to allocate, and shrink back down.
731 * Last resort when trying to allocate an object.
732 */
733static void *
734heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
735{
736    void *ptr;
737    size_t max;
738
739    /* Grow as much as possible, but don't let the real footprint
740     * plus external allocations go over the absolute max.
741     */
742    max = heap->absoluteMaxSize;
743    if (max > hs->externalBytesAllocated) {
744        max -= hs->externalBytesAllocated;
745
746        mspace_set_max_allowed_footprint(heap->msp, max);
747        ptr = dvmHeapSourceAlloc(n);
748
749        /* Shrink back down as small as possible.  Our caller may
750         * readjust max_allowed to a more appropriate value.
751         */
752        mspace_set_max_allowed_footprint(heap->msp,
753                mspace_footprint(heap->msp));
754    } else {
755        ptr = NULL;
756    }
757
758    return ptr;
759}
760
761/*
762 * Allocates <n> bytes of zeroed data, growing as much as possible
763 * if necessary.
764 */
765void *
766dvmHeapSourceAllocAndGrow(size_t n)
767{
768    HeapSource *hs = gHs;
769    Heap *heap;
770    void *ptr;
771    size_t oldIdealSize;
772
773    HS_BOILERPLATE();
774    heap = hs2heap(hs);
775
776    ptr = dvmHeapSourceAlloc(n);
777    if (ptr != NULL) {
778        return ptr;
779    }
780
781    oldIdealSize = hs->idealSize;
782    if (softLimited(hs)) {
783        /* We're soft-limited.  Try removing the soft limit to
784         * see if we can allocate without actually growing.
785         */
786        hs->softLimit = INT_MAX;
787        ptr = dvmHeapSourceAlloc(n);
788        if (ptr != NULL) {
789            /* Removing the soft limit worked;  fix things up to
790             * reflect the new effective ideal size.
791             */
792            snapIdealFootprint();
793            return ptr;
794        }
795        // softLimit intentionally left at INT_MAX.
796    }
797
798    /* We're not soft-limited.  Grow the heap to satisfy the request.
799     * If this call fails, no footprints will have changed.
800     */
801    ptr = heapAllocAndGrow(hs, heap, n);
802    if (ptr != NULL) {
803        /* The allocation succeeded.  Fix up the ideal size to
804         * reflect any footprint modifications that had to happen.
805         */
806        snapIdealFootprint();
807    } else {
808        /* We just couldn't do it.  Restore the original ideal size,
809         * fixing up softLimit if necessary.
810         */
811        setIdealFootprint(oldIdealSize);
812    }
813    return ptr;
814}
815
816/*
817 * Frees the memory pointed to by <ptr>, which may be NULL.
818 */
819void
820dvmHeapSourceFree(void *ptr)
821{
822    Heap *heap;
823
824    HS_BOILERPLATE();
825
826    heap = ptr2heap(gHs, ptr);
827    if (heap != NULL) {
828        countFree(heap, ptr, true);
829        /* Only free objects that are in the active heap.
830         * Touching old heaps would pull pages into this process.
831         */
832        if (heap == gHs->heaps) {
833            mspace_free(heap->msp, ptr);
834        }
835    }
836}
837
838/*
839 * Frees the first numPtrs objects in the ptrs list. The list must
840 * contain addresses all in the same mspace, and must be in increasing
841 * order. This implies that there are no duplicates, and no entries
842 * are NULL.
843 */
844void
845dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
846{
847    Heap *heap;
848
849    HS_BOILERPLATE();
850
851    if (numPtrs == 0) {
852        return;
853    }
854
855    assert(ptrs != NULL);
856    assert(*ptrs != NULL);
857    heap = ptr2heap(gHs, *ptrs);
858    if (heap != NULL) {
859        mspace *msp = heap->msp;
860        // Calling mspace_free on shared heaps disrupts sharing too
861        // much. For heap[0] -- the 'active heap' -- we call
862        // mspace_free, but on the other heaps we only do some
863        // accounting.
864        if (heap == gHs->heaps) {
865            // mspace_merge_objects takes two allocated objects, and
866            // if the second immediately follows the first, will merge
867            // them, returning a larger object occupying the same
868            // memory. This is a local operation, and doesn't require
869            // dlmalloc to manipulate any freelists. It's pretty
870            // inexpensive compared to free().
871
872            // ptrs is an array of objects all in memory order, and if
873            // client code has been allocating lots of short-lived
874            // objects, this is likely to contain runs of objects all
875            // now garbage, and thus highly amenable to this optimization.
876
877            // Unroll the 0th iteration around the loop below,
878            // countFree ptrs[0] and initializing merged.
879            assert(ptrs[0] != NULL);
880            assert(ptr2heap(gHs, ptrs[0]) == heap);
881            countFree(heap, ptrs[0], true);
882            void *merged = ptrs[0];
883
884            size_t i;
885            for (i = 1; i < numPtrs; i++) {
886                assert(merged != NULL);
887                assert(ptrs[i] != NULL);
888                assert((intptr_t)merged < (intptr_t)ptrs[i]);
889                assert(ptr2heap(gHs, ptrs[i]) == heap);
890                countFree(heap, ptrs[i], true);
891                // Try to merge. If it works, merged now includes the
892                // memory of ptrs[i]. If it doesn't, free merged, and
893                // see if ptrs[i] starts a new run of adjacent
894                // objects to merge.
895                if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
896                    mspace_free(msp, merged);
897                    merged = ptrs[i];
898                }
899            }
900            assert(merged != NULL);
901            mspace_free(msp, merged);
902        } else {
903            // This is not an 'active heap'. Only do the accounting.
904            size_t i;
905            for (i = 0; i < numPtrs; i++) {
906                assert(ptrs[i] != NULL);
907                assert(ptr2heap(gHs, ptrs[i]) == heap);
908                countFree(heap, ptrs[i], true);
909            }
910        }
911    }
912}
913
914/*
915 * Returns true iff <ptr> was allocated from the heap source.
916 */
917bool
918dvmHeapSourceContains(const void *ptr)
919{
920    HS_BOILERPLATE();
921
922    if (dvmHeapBitmapCoversAddress(&gHs->objBits, ptr)) {
923        return dvmHeapBitmapIsObjectBitSet(&gHs->objBits, ptr) != 0;
924    }
925    return false;
926}
927
928/*
929 * Returns the value of the requested flag.
930 */
931bool
932dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
933{
934    if (ptr == NULL) {
935        return false;
936    }
937
938    if (flag == HS_CONTAINS) {
939        return dvmHeapSourceContains(ptr);
940    } else if (flag == HS_ALLOCATED_IN_ZYGOTE) {
941        HeapSource *hs = gHs;
942
943        HS_BOILERPLATE();
944
945        if (hs->sawZygote) {
946            Heap *heap;
947
948            heap = ptr2heap(hs, ptr);
949            if (heap != NULL) {
950                /* If the object is not in the active heap, we assume that
951                 * it was allocated as part of zygote.
952                 */
953                return heap != hs->heaps;
954            }
955        }
956        /* The pointer is outside of any known heap, or we are not
957         * running in zygote mode.
958         */
959        return false;
960    }
961
962    return false;
963}
964
965/*
966 * Returns the number of usable bytes in an allocated chunk; the size
967 * may be larger than the size passed to dvmHeapSourceAlloc().
968 */
969size_t
970dvmHeapSourceChunkSize(const void *ptr)
971{
972    Heap *heap;
973
974    HS_BOILERPLATE();
975
976    heap = ptr2heap(gHs, ptr);
977    if (heap != NULL) {
978        return mspace_usable_size(heap->msp, ptr);
979    }
980    return 0;
981}
982
983/*
984 * Returns the number of bytes that the heap source has allocated
985 * from the system using sbrk/mmap, etc.
986 *
987 * Caller must hold the heap lock.
988 */
989size_t
990dvmHeapSourceFootprint()
991{
992    HS_BOILERPLATE();
993
994//TODO: include size of bitmaps?
995    return oldHeapOverhead(gHs, true);
996}
997
998/*
999 * Return the real bytes used by old heaps and external memory
1000 * plus the soft usage of the current heap.  When a soft limit
1001 * is in effect, this is effectively what it's compared against
1002 * (though, in practice, it only looks at the current heap).
1003 */
1004static size_t
1005getSoftFootprint(bool includeActive)
1006{
1007    HeapSource *hs = gHs;
1008    size_t ret;
1009
1010    HS_BOILERPLATE();
1011
1012    ret = oldHeapOverhead(hs, false) + hs->externalBytesAllocated;
1013    if (includeActive) {
1014        ret += hs->heaps[0].bytesAllocated;
1015    }
1016
1017    return ret;
1018}
1019
1020/*
1021 * Gets the maximum number of bytes that the heap source is allowed
1022 * to allocate from the system.
1023 */
1024size_t
1025dvmHeapSourceGetIdealFootprint()
1026{
1027    HeapSource *hs = gHs;
1028
1029    HS_BOILERPLATE();
1030
1031    return hs->idealSize;
1032}
1033
1034/*
1035 * Sets the soft limit, handling any necessary changes to the allowed
1036 * footprint of the active heap.
1037 */
1038static void
1039setSoftLimit(HeapSource *hs, size_t softLimit)
1040{
1041    /* Compare against the actual footprint, rather than the
1042     * max_allowed, because the heap may not have grown all the
1043     * way to the allowed size yet.
1044     */
1045    mspace msp = hs->heaps[0].msp;
1046    size_t currentHeapSize = mspace_footprint(msp);
1047    if (softLimit < currentHeapSize) {
1048        /* Don't let the heap grow any more, and impose a soft limit.
1049         */
1050        mspace_set_max_allowed_footprint(msp, currentHeapSize);
1051        hs->softLimit = softLimit;
1052    } else {
1053        /* Let the heap grow to the requested max, and remove any
1054         * soft limit, if set.
1055         */
1056        mspace_set_max_allowed_footprint(msp, softLimit);
1057        hs->softLimit = INT_MAX;
1058    }
1059}
1060
1061/*
1062 * Sets the maximum number of bytes that the heap source is allowed
1063 * to allocate from the system.  Clamps to the appropriate maximum
1064 * value.
1065 */
1066static void
1067setIdealFootprint(size_t max)
1068{
1069    HeapSource *hs = gHs;
1070#if DEBUG_HEAP_SOURCE
1071    HeapSource oldHs = *hs;
1072    mspace msp = hs->heaps[0].msp;
1073    size_t oldAllowedFootprint =
1074            mspace_max_allowed_footprint(msp);
1075#endif
1076
1077    HS_BOILERPLATE();
1078
1079    if (max > hs->absoluteMaxSize) {
1080        LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB\n",
1081                FRACTIONAL_MB(max),
1082                FRACTIONAL_MB(hs->absoluteMaxSize));
1083        max = hs->absoluteMaxSize;
1084    } else if (max < hs->minimumSize) {
1085        max = hs->minimumSize;
1086    }
1087
1088    /* Convert max into a size that applies to the active heap.
1089     * Old heaps and external allocations will count against the ideal size.
1090     */
1091    size_t overhead = getSoftFootprint(false);
1092    size_t activeMax;
1093    if (overhead < max) {
1094        activeMax = max - overhead;
1095    } else {
1096        activeMax = 0;
1097    }
1098
1099    setSoftLimit(hs, activeMax);
1100    hs->idealSize = max;
1101
1102    HSTRACE("IDEAL %zd->%zd (%d), soft %zd->%zd (%d), allowed %zd->%zd (%d), "
1103            "ext %zd\n",
1104            oldHs.idealSize, hs->idealSize, hs->idealSize - oldHs.idealSize,
1105            oldHs.softLimit, hs->softLimit, hs->softLimit - oldHs.softLimit,
1106            oldAllowedFootprint, mspace_max_allowed_footprint(msp),
1107            mspace_max_allowed_footprint(msp) - oldAllowedFootprint,
1108            hs->externalBytesAllocated);
1109
1110}
1111
1112/*
1113 * Make the ideal footprint equal to the current footprint.
1114 */
1115static void
1116snapIdealFootprint()
1117{
1118    HS_BOILERPLATE();
1119
1120    setIdealFootprint(getSoftFootprint(true));
1121}
1122
1123/*
1124 * Gets the current ideal heap utilization, represented as a number
1125 * between zero and one.
1126 */
1127float dvmGetTargetHeapUtilization()
1128{
1129    HeapSource *hs = gHs;
1130
1131    HS_BOILERPLATE();
1132
1133    return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1134}
1135
1136/*
1137 * Sets the new ideal heap utilization, represented as a number
1138 * between zero and one.
1139 */
1140void dvmSetTargetHeapUtilization(float newTarget)
1141{
1142    HeapSource *hs = gHs;
1143
1144    HS_BOILERPLATE();
1145
1146    /* Clamp it to a reasonable range.
1147     */
1148    // TODO: This may need some tuning.
1149    if (newTarget < 0.2) {
1150        newTarget = 0.2;
1151    } else if (newTarget > 0.8) {
1152        newTarget = 0.8;
1153    }
1154
1155    hs->targetUtilization =
1156            (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1157    LOGV("Set heap target utilization to %zd/%d (%f)\n",
1158            hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1159}
1160
1161/*
1162 * If set is true, sets the new minimum heap size to size; always
1163 * returns the current (or previous) size.  If size is negative,
1164 * removes the current minimum constraint (if present).
1165 */
1166size_t
1167dvmMinimumHeapSize(size_t size, bool set)
1168{
1169    HeapSource *hs = gHs;
1170    size_t oldMinimumSize;
1171
1172    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1173     * heap lock if we're going to look at it.  We also need the
1174     * lock for the call to setIdealFootprint().
1175     */
1176    dvmLockHeap();
1177
1178    HS_BOILERPLATE();
1179
1180    oldMinimumSize = hs->minimumSize;
1181
1182    if (set) {
1183        /* Don't worry about external allocations right now.
1184         * setIdealFootprint() will take them into account when
1185         * minimumSize is used, and it's better to hold onto the
1186         * intended minimumSize than to clamp it arbitrarily based
1187         * on the current allocations.
1188         */
1189        if (size > hs->absoluteMaxSize) {
1190            size = hs->absoluteMaxSize;
1191        }
1192        hs->minimumSize = size;
1193        if (size > hs->idealSize) {
1194            /* Force a snap to the minimum value, which we just set
1195             * and which setIdealFootprint() will take into consideration.
1196             */
1197            setIdealFootprint(hs->idealSize);
1198        }
1199        /* Otherwise we'll just keep it in mind the next time
1200         * setIdealFootprint() is called.
1201         */
1202    }
1203
1204    dvmUnlockHeap();
1205
1206    return oldMinimumSize;
1207}
1208
1209/*
1210 * Given the size of a live set, returns the ideal heap size given
1211 * the current target utilization and MIN/MAX values.
1212 *
1213 * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
1214 */
1215static size_t
1216getUtilizationTarget(size_t liveSize, size_t targetUtilization)
1217{
1218    size_t targetSize;
1219
1220    /* Use the current target utilization ratio to determine the
1221     * ideal heap size based on the size of the live set.
1222     */
1223    targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
1224
1225    /* Cap the amount of free space, though, so we don't end up
1226     * with, e.g., 8MB of free space when the live set size hits 8MB.
1227     */
1228    if (targetSize > liveSize + HEAP_IDEAL_FREE) {
1229        targetSize = liveSize + HEAP_IDEAL_FREE;
1230    } else if (targetSize < liveSize + HEAP_MIN_FREE) {
1231        targetSize = liveSize + HEAP_MIN_FREE;
1232    }
1233    return targetSize;
1234}
1235
1236/*
1237 * Given the current contents of the active heap, increase the allowed
1238 * heap footprint to match the target utilization ratio.  This
1239 * should only be called immediately after a full mark/sweep.
1240 */
1241void dvmHeapSourceGrowForUtilization()
1242{
1243    HeapSource *hs = gHs;
1244    Heap *heap;
1245    size_t targetHeapSize;
1246    size_t currentHeapUsed;
1247    size_t oldIdealSize;
1248    size_t newHeapMax;
1249    size_t overhead;
1250
1251    HS_BOILERPLATE();
1252    heap = hs2heap(hs);
1253
1254    /* Use the current target utilization ratio to determine the
1255     * ideal heap size based on the size of the live set.
1256     * Note that only the active heap plays any part in this.
1257     *
1258     * Avoid letting the old heaps influence the target free size,
1259     * because they may be full of objects that aren't actually
1260     * in the working set.  Just look at the allocated size of
1261     * the current heap.
1262     */
1263    currentHeapUsed = heap->bytesAllocated;
1264#define LET_EXTERNAL_INFLUENCE_UTILIZATION 1
1265#if LET_EXTERNAL_INFLUENCE_UTILIZATION
1266    /* This is a hack to deal with the side-effects of moving
1267     * bitmap data out of the Dalvik heap.  Since the amount
1268     * of free space after a GC scales with the size of the
1269     * live set, many apps expected the large free space that
1270     * appeared along with megabytes' worth of bitmaps.  When
1271     * the bitmaps were removed, the free size shrank significantly,
1272     * and apps started GCing constantly.  This makes it so the
1273     * post-GC free space is the same size it would have been
1274     * if the bitmaps were still in the Dalvik heap.
1275     */
1276    currentHeapUsed += hs->externalBytesAllocated;
1277#endif
1278    targetHeapSize =
1279            getUtilizationTarget(currentHeapUsed, hs->targetUtilization);
1280#if LET_EXTERNAL_INFLUENCE_UTILIZATION
1281    currentHeapUsed -= hs->externalBytesAllocated;
1282    targetHeapSize -= hs->externalBytesAllocated;
1283#endif
1284
1285    /* The ideal size includes the old heaps; add overhead so that
1286     * it can be immediately subtracted again in setIdealFootprint().
1287     * If the target heap size would exceed the max, setIdealFootprint()
1288     * will clamp it to a legal value.
1289     */
1290    overhead = getSoftFootprint(false);
1291    oldIdealSize = hs->idealSize;
1292    setIdealFootprint(targetHeapSize + overhead);
1293
1294    newHeapMax = mspace_max_allowed_footprint(heap->msp);
1295    if (softLimited(hs)) {
1296        LOGD_HEAP("GC old usage %zd.%zd%%; now "
1297                "%zd.%03zdMB used / %zd.%03zdMB soft max "
1298                "(%zd.%03zdMB over, "
1299                "%zd.%03zdMB ext, "
1300                "%zd.%03zdMB real max)\n",
1301                FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
1302                FRACTIONAL_MB(currentHeapUsed),
1303                FRACTIONAL_MB(hs->softLimit),
1304                FRACTIONAL_MB(overhead),
1305                FRACTIONAL_MB(hs->externalBytesAllocated),
1306                FRACTIONAL_MB(newHeapMax));
1307    } else {
1308        LOGD_HEAP("GC old usage %zd.%zd%%; now "
1309                "%zd.%03zdMB used / %zd.%03zdMB real max "
1310                "(%zd.%03zdMB over, "
1311                "%zd.%03zdMB ext)\n",
1312                FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
1313                FRACTIONAL_MB(currentHeapUsed),
1314                FRACTIONAL_MB(newHeapMax),
1315                FRACTIONAL_MB(overhead),
1316                FRACTIONAL_MB(hs->externalBytesAllocated));
1317    }
1318}
1319
1320/*
1321 * Return free pages to the system.
1322 * TODO: move this somewhere else, especially the native heap part.
1323 */
1324
1325static void releasePagesInRange(void *start, void *end, void *nbytes)
1326{
1327    /* Linux requires that the madvise() start address is page-aligned.
1328    * We also align the end address.
1329    */
1330    start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1331    end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1332    if (start < end) {
1333        size_t length = (char *)end - (char *)start;
1334        madvise(start, length, MADV_DONTNEED);
1335        *(size_t *)nbytes += length;
1336    }
1337}
1338
1339/*
1340 * Return unused memory to the system if possible.
1341 */
1342void
1343dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
1344{
1345    HeapSource *hs = gHs;
1346    size_t nativeBytes, heapBytes;
1347    size_t i;
1348
1349    HS_BOILERPLATE();
1350
1351    assert(arrayLen >= hs->numHeaps);
1352
1353    heapBytes = 0;
1354    for (i = 0; i < hs->numHeaps; i++) {
1355        Heap *heap = &hs->heaps[i];
1356
1357        /* Return the wilderness chunk to the system.
1358         */
1359        mspace_trim(heap->msp, 0);
1360
1361        /* Return any whole free pages to the system.
1362         */
1363        bytesTrimmed[i] = 0;
1364        mspace_walk_free_pages(heap->msp, releasePagesInRange,
1365                               &bytesTrimmed[i]);
1366        heapBytes += bytesTrimmed[i];
1367    }
1368
1369    /* Same for the native heap.
1370     */
1371    dlmalloc_trim(0);
1372    nativeBytes = 0;
1373    dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
1374
1375    LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes\n",
1376            heapBytes, nativeBytes, heapBytes + nativeBytes);
1377}
1378
1379/*
1380 * Walks over the heap source and passes every allocated and
1381 * free chunk to the callback.
1382 */
1383void
1384dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
1385                                      const void *userptr, size_t userlen,
1386                                      void *arg),
1387                  void *arg)
1388{
1389    HeapSource *hs = gHs;
1390    size_t i;
1391
1392    HS_BOILERPLATE();
1393
1394    /* Walk the heaps from oldest to newest.
1395     */
1396//TODO: do this in address order
1397    for (i = hs->numHeaps; i > 0; --i) {
1398        mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
1399    }
1400}
1401
1402/*
1403 * Gets the number of heaps available in the heap source.
1404 *
1405 * Caller must hold the heap lock, because gHs caches a field
1406 * in gDvm.gcHeap.
1407 */
1408size_t
1409dvmHeapSourceGetNumHeaps()
1410{
1411    HeapSource *hs = gHs;
1412
1413    HS_BOILERPLATE();
1414
1415    return hs->numHeaps;
1416}
1417
1418
1419/*
1420 * External allocation tracking
1421 *
1422 * In some situations, memory outside of the heap is tied to the
1423 * lifetime of objects in the heap.  Since that memory is kept alive
1424 * by heap objects, it should provide memory pressure that can influence
1425 * GCs.
1426 */
1427
1428
1429static bool
1430externalAllocPossible(const HeapSource *hs, size_t n)
1431{
1432    const Heap *heap;
1433    size_t currentHeapSize;
1434
1435    /* Make sure that this allocation is even possible.
1436     * Don't let the external size plus the actual heap size
1437     * go over the absolute max.  This essentially treats
1438     * external allocations as part of the active heap.
1439     *
1440     * Note that this will fail "mysteriously" if there's
1441     * a small softLimit but a large heap footprint.
1442     */
1443    heap = hs2heap(hs);
1444    currentHeapSize = mspace_max_allowed_footprint(heap->msp);
1445    if (currentHeapSize + hs->externalBytesAllocated + n <=
1446            heap->absoluteMaxSize)
1447    {
1448        return true;
1449    }
1450    HSTRACE("externalAllocPossible(): "
1451            "footprint %zu + extAlloc %zu + n %zu >= max %zu (space for %zu)\n",
1452            currentHeapSize, hs->externalBytesAllocated, n,
1453            heap->absoluteMaxSize,
1454            heap->absoluteMaxSize -
1455                    (currentHeapSize + hs->externalBytesAllocated));
1456    return false;
1457}
1458
1459#define EXTERNAL_TARGET_UTILIZATION 820  // 80%
1460
1461/*
1462 * Tries to update the internal count of externally-allocated memory.
1463 * If there's enough room for that memory, returns true.  If not, returns
1464 * false and does not update the count.
1465 *
1466 * The caller must ensure externalAllocPossible(hs, n) == true.
1467 */
1468static bool
1469externalAlloc(HeapSource *hs, size_t n, bool grow)
1470{
1471    assert(hs->externalLimit >= hs->externalBytesAllocated);
1472
1473    HSTRACE("externalAlloc(%zd%s)\n", n, grow ? ", grow" : "");
1474    assert(externalAllocPossible(hs, n));  // The caller must ensure this.
1475
1476    /* External allocations have their own "free space" that they
1477     * can allocate from without causing a GC.
1478     */
1479    if (hs->externalBytesAllocated + n <= hs->externalLimit) {
1480        hs->externalBytesAllocated += n;
1481#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
1482        if (gDvm.allocProf.enabled) {
1483            Thread* self = dvmThreadSelf();
1484            gDvm.allocProf.externalAllocCount++;
1485            gDvm.allocProf.externalAllocSize += n;
1486            if (self != NULL) {
1487                self->allocProf.externalAllocCount++;
1488                self->allocProf.externalAllocSize += n;
1489            }
1490        }
1491#endif
1492        return true;
1493    }
1494    if (!grow) {
1495        return false;
1496    }
1497
1498    /* GROW */
1499    hs->externalBytesAllocated += n;
1500    hs->externalLimit = getUtilizationTarget(
1501            hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
1502    HSTRACE("EXTERNAL grow limit to %zd\n", hs->externalLimit);
1503    return true;
1504}
1505
1506static void
1507gcForExternalAlloc(bool collectSoftReferences)
1508{
1509#ifdef WITH_PROFILER  // even if !PROFILE_EXTERNAL_ALLOCATIONS
1510    if (gDvm.allocProf.enabled) {
1511        Thread* self = dvmThreadSelf();
1512        gDvm.allocProf.gcCount++;
1513        if (self != NULL) {
1514            self->allocProf.gcCount++;
1515        }
1516    }
1517#endif
1518    dvmCollectGarbageInternal(collectSoftReferences, GC_EXTERNAL_ALLOC);
1519}
1520
1521/*
1522 * Updates the internal count of externally-allocated memory.  If there's
1523 * enough room for that memory, returns true.  If not, returns false and
1524 * does not update the count.
1525 *
1526 * May cause a GC as a side-effect.
1527 */
1528bool
1529dvmTrackExternalAllocation(size_t n)
1530{
1531    HeapSource *hs = gHs;
1532    bool ret = false;
1533
1534    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1535     * heap lock if we're going to look at it.
1536     */
1537    dvmLockHeap();
1538
1539    HS_BOILERPLATE();
1540    assert(hs->externalLimit >= hs->externalBytesAllocated);
1541
1542    if (!externalAllocPossible(hs, n)) {
1543        LOGE_HEAP("%zd-byte external allocation "
1544                "too large for this process.\n", n);
1545        goto out;
1546    }
1547
1548    /* Try "allocating" using the existing "free space".
1549     */
1550    HSTRACE("EXTERNAL alloc %zu (%zu < %zu)\n",
1551            n, hs->externalBytesAllocated, hs->externalLimit);
1552    if (externalAlloc(hs, n, false)) {
1553        ret = true;
1554        goto out;
1555    }
1556
1557    /* The "allocation" failed.  Free up some space by doing
1558     * a full garbage collection.  This may grow the heap source
1559     * if the live set is sufficiently large.
1560     */
1561    HSTRACE("EXTERNAL alloc %zd: GC 1\n", n);
1562    gcForExternalAlloc(false);  // don't collect SoftReferences
1563    if (externalAlloc(hs, n, false)) {
1564        ret = true;
1565        goto out;
1566    }
1567
1568    /* Even that didn't work;  this is an exceptional state.
1569     * Try harder, growing the heap source if necessary.
1570     */
1571    HSTRACE("EXTERNAL alloc %zd: frag\n", n);
1572    ret = externalAlloc(hs, n, true);
1573    dvmHeapSizeChanged();
1574    if (ret) {
1575        goto out;
1576    }
1577
1578    /* We couldn't even grow enough to satisfy the request.
1579     * Try one last GC, collecting SoftReferences this time.
1580     */
1581    HSTRACE("EXTERNAL alloc %zd: GC 2\n", n);
1582    gcForExternalAlloc(true);  // collect SoftReferences
1583    ret = externalAlloc(hs, n, true);
1584    dvmHeapSizeChanged();
1585    if (!ret) {
1586        LOGE_HEAP("Out of external memory on a %zu-byte allocation.\n", n);
1587    }
1588
1589#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
1590    if (gDvm.allocProf.enabled) {
1591        Thread* self = dvmThreadSelf();
1592        gDvm.allocProf.failedExternalAllocCount++;
1593        gDvm.allocProf.failedExternalAllocSize += n;
1594        if (self != NULL) {
1595            self->allocProf.failedExternalAllocCount++;
1596            self->allocProf.failedExternalAllocSize += n;
1597        }
1598    }
1599#endif
1600
1601out:
1602    dvmUnlockHeap();
1603
1604    return ret;
1605}
1606
1607/*
1608 * Reduces the internal count of externally-allocated memory.
1609 */
1610void
1611dvmTrackExternalFree(size_t n)
1612{
1613    HeapSource *hs = gHs;
1614    size_t newExternalLimit;
1615    size_t oldExternalBytesAllocated;
1616
1617    HSTRACE("EXTERNAL free %zu (%zu < %zu)\n",
1618            n, hs->externalBytesAllocated, hs->externalLimit);
1619
1620    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1621     * heap lock if we're going to look at it.
1622     */
1623    dvmLockHeap();
1624
1625    HS_BOILERPLATE();
1626    assert(hs->externalLimit >= hs->externalBytesAllocated);
1627
1628    oldExternalBytesAllocated = hs->externalBytesAllocated;
1629    if (n <= hs->externalBytesAllocated) {
1630        hs->externalBytesAllocated -= n;
1631    } else {
1632        n = hs->externalBytesAllocated;
1633        hs->externalBytesAllocated = 0;
1634    }
1635
1636#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
1637    if (gDvm.allocProf.enabled) {
1638        Thread* self = dvmThreadSelf();
1639        gDvm.allocProf.externalFreeCount++;
1640        gDvm.allocProf.externalFreeSize += n;
1641        if (self != NULL) {
1642            self->allocProf.externalFreeCount++;
1643            self->allocProf.externalFreeSize += n;
1644        }
1645    }
1646#endif
1647
1648    /* Shrink as quickly as we can.
1649     */
1650    newExternalLimit = getUtilizationTarget(
1651            hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
1652    if (newExternalLimit < oldExternalBytesAllocated) {
1653        /* Make sure that the remaining free space is at least
1654         * big enough to allocate something of the size that was
1655         * just freed.  This makes it more likely that
1656         *     externalFree(N); externalAlloc(N);
1657         * will work without causing a GC.
1658         */
1659        HSTRACE("EXTERNAL free preserved %zu extra free bytes\n",
1660                oldExternalBytesAllocated - newExternalLimit);
1661        newExternalLimit = oldExternalBytesAllocated;
1662    }
1663    if (newExternalLimit < hs->externalLimit) {
1664        hs->externalLimit = newExternalLimit;
1665    }
1666
1667    dvmUnlockHeap();
1668}
1669
1670/*
1671 * Returns the number of externally-allocated bytes being tracked by
1672 * dvmTrackExternalAllocation/Free().
1673 */
1674size_t
1675dvmGetExternalBytesAllocated()
1676{
1677    const HeapSource *hs = gHs;
1678    size_t ret;
1679
1680    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1681     * heap lock if we're going to look at it.  We also need the
1682     * lock for the call to setIdealFootprint().
1683     */
1684    dvmLockHeap();
1685    HS_BOILERPLATE();
1686    ret = hs->externalBytesAllocated;
1687    dvmUnlockHeap();
1688
1689    return ret;
1690}
1691
1692void *dvmHeapSourceGetImmuneLimit(GcMode mode)
1693{
1694    if (mode == GC_PARTIAL) {
1695        return hs2heap(gHs)->base;
1696    } else {
1697        return NULL;
1698    }
1699}
1700