HeapSource.cpp revision d862faa2ceae186da5518607505eb942d634ced9
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <cutils/mspace.h>
18#include <stdint.h>
19#include <sys/mman.h>
20#include <errno.h>
21
22#define SIZE_MAX UINT_MAX  // TODO: get SIZE_MAX from stdint.h
23
24#include "Dalvik.h"
25#include "alloc/Heap.h"
26#include "alloc/HeapInternal.h"
27#include "alloc/HeapSource.h"
28#include "alloc/HeapBitmap.h"
29#include "alloc/HeapBitmapInlines.h"
30
31// TODO: find a real header file for these.
32extern "C" int dlmalloc_trim(size_t);
33extern "C" void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
34
35static void snapIdealFootprint();
36static void setIdealFootprint(size_t max);
37static size_t getMaximumSize(const HeapSource *hs);
38
39#define HEAP_UTILIZATION_MAX        1024
40#define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
41#define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
42#define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
43
44/* Start a concurrent collection when free memory falls under this
45 * many bytes.
46 */
47#define CONCURRENT_START (128 << 10)
48
49/* The next GC will not be concurrent when free memory after a GC is
50 * under this many bytes.
51 */
52#define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
53
54#define HS_BOILERPLATE() \
55    do { \
56        assert(gDvm.gcHeap != NULL); \
57        assert(gDvm.gcHeap->heapSource != NULL); \
58        assert(gHs == gDvm.gcHeap->heapSource); \
59    } while (0)
60
61#define DEBUG_HEAP_SOURCE 0
62#if DEBUG_HEAP_SOURCE
63#define HSTRACE(...)  LOG(LOG_INFO, LOG_TAG "-hs", __VA_ARGS__)
64#else
65#define HSTRACE(...)  /**/
66#endif
67
68struct Heap {
69    /* The mspace to allocate from.
70     */
71    mspace msp;
72
73    /* The largest size that this heap is allowed to grow to.
74     */
75    size_t maximumSize;
76
77    /* Number of bytes allocated from this mspace for objects,
78     * including any overhead.  This value is NOT exact, and
79     * should only be used as an input for certain heuristics.
80     */
81    size_t bytesAllocated;
82
83    /* Number of bytes allocated from this mspace at which a
84     * concurrent garbage collection will be started.
85     */
86    size_t concurrentStartBytes;
87
88    /* Number of objects currently allocated from this mspace.
89     */
90    size_t objectsAllocated;
91
92    /*
93     * The lowest address of this heap, inclusive.
94     */
95    char *base;
96
97    /*
98     * The highest address of this heap, exclusive.
99     */
100    char *limit;
101};
102
103struct HeapSource {
104    /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
105     */
106    size_t targetUtilization;
107
108    /* The starting heap size.
109     */
110    size_t startSize;
111
112    /* The largest that the heap source as a whole is allowed to grow.
113     */
114    size_t maximumSize;
115
116    /*
117     * The largest size we permit the heap to grow.  This value allows
118     * the user to limit the heap growth below the maximum size.  This
119     * is a work around until we can dynamically set the maximum size.
120     * This value can range between the starting size and the maximum
121     * size but should never be set below the current footprint of the
122     * heap.
123     */
124    size_t growthLimit;
125
126    /* The desired max size of the heap source as a whole.
127     */
128    size_t idealSize;
129
130    /* The maximum number of bytes allowed to be allocated from the
131     * active heap before a GC is forced.  This is used to "shrink" the
132     * heap in lieu of actual compaction.
133     */
134    size_t softLimit;
135
136    /* The heaps; heaps[0] is always the active heap,
137     * which new objects should be allocated from.
138     */
139    Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
140
141    /* The current number of heaps.
142     */
143    size_t numHeaps;
144
145    /* True if zygote mode was active when the HeapSource was created.
146     */
147    bool sawZygote;
148
149    /*
150     * The base address of the virtual memory reservation.
151     */
152    char *heapBase;
153
154    /*
155     * The length in bytes of the virtual memory reservation.
156     */
157    size_t heapLength;
158
159    /*
160     * The live object bitmap.
161     */
162    HeapBitmap liveBits;
163
164    /*
165     * The mark bitmap.
166     */
167    HeapBitmap markBits;
168
169    /*
170     * State for the GC daemon.
171     */
172    bool hasGcThread;
173    pthread_t gcThread;
174    bool gcThreadShutdown;
175    pthread_mutex_t gcThreadMutex;
176    pthread_cond_t gcThreadCond;
177};
178
179#define hs2heap(hs_) (&((hs_)->heaps[0]))
180
181/*
182 * Returns true iff a soft limit is in effect for the active heap.
183 */
184static bool isSoftLimited(const HeapSource *hs)
185{
186    /* softLimit will be either SIZE_MAX or the limit for the
187     * active mspace.  idealSize can be greater than softLimit
188     * if there is more than one heap.  If there is only one
189     * heap, a non-SIZE_MAX softLimit should always be the same
190     * as idealSize.
191     */
192    return hs->softLimit <= hs->idealSize;
193}
194
195/*
196 * Returns approximately the maximum number of bytes allowed to be
197 * allocated from the active heap before a GC is forced.
198 */
199static size_t
200getAllocLimit(const HeapSource *hs)
201{
202    if (isSoftLimited(hs)) {
203        return hs->softLimit;
204    } else {
205        return mspace_max_allowed_footprint(hs2heap(hs)->msp);
206    }
207}
208
209/*
210 * Returns the current footprint of all heaps.  If includeActive
211 * is false, don't count the heap at index 0.
212 */
213static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive)
214{
215    size_t footprint = 0;
216    size_t i;
217
218    if (includeActive) {
219        i = 0;
220    } else {
221        i = 1;
222    }
223    for (/* i = i */; i < hs->numHeaps; i++) {
224//TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
225        footprint += mspace_footprint(hs->heaps[i].msp);
226    }
227    return footprint;
228}
229
230/*
231 * Returns the heap that <ptr> could have come from, or NULL
232 * if it could not have come from any heap.
233 */
234static Heap *ptr2heap(const HeapSource *hs, const void *ptr)
235{
236    const size_t numHeaps = hs->numHeaps;
237
238//TODO: unroll this to HEAP_SOURCE_MAX_HEAP_COUNT
239    if (ptr != NULL) {
240        for (size_t i = 0; i < numHeaps; i++) {
241            const Heap *const heap = &hs->heaps[i];
242
243            if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
244                return (Heap *)heap;
245            }
246        }
247    }
248    return NULL;
249}
250
251/*
252 * Functions to update heapSource->bytesAllocated when an object
253 * is allocated or freed.  mspace_usable_size() will give
254 * us a much more accurate picture of heap utilization than
255 * the requested byte sizes would.
256 *
257 * These aren't exact, and should not be treated as such.
258 */
259static void countAllocation(Heap *heap, const void *ptr)
260{
261    HeapSource *hs;
262
263    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
264
265    heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
266            HEAP_SOURCE_CHUNK_OVERHEAD;
267    heap->objectsAllocated++;
268    hs = gDvm.gcHeap->heapSource;
269    dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
270
271    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
272}
273
274static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
275{
276    HeapSource *hs;
277    size_t delta;
278
279    delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
280    assert(delta > 0);
281    if (delta < heap->bytesAllocated) {
282        heap->bytesAllocated -= delta;
283    } else {
284        heap->bytesAllocated = 0;
285    }
286    hs = gDvm.gcHeap->heapSource;
287    dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
288    if (heap->objectsAllocated > 0) {
289        heap->objectsAllocated--;
290    }
291    *numBytes += delta;
292}
293
294static HeapSource *gHs = NULL;
295
296static mspace
297createMspace(void *base, size_t startSize, size_t maximumSize)
298{
299    mspace msp;
300
301    /* Create an unlocked dlmalloc mspace to use as
302     * a heap source.
303     *
304     * We start off reserving heapSizeStart/2 bytes but
305     * letting the heap grow to heapSizeStart.  This saves
306     * memory in the case where a process uses even less
307     * than the starting size.
308     */
309    LOGV_HEAP("Creating VM heap of size %zu\n", startSize);
310    errno = 0;
311    msp = create_contiguous_mspace_with_base(startSize/2,
312            maximumSize, /*locked=*/false, base);
313    if (msp != NULL) {
314        /* Don't let the heap grow past the starting size without
315         * our intervention.
316         */
317        mspace_set_max_allowed_footprint(msp, startSize);
318    } else {
319        /* There's no guarantee that errno has meaning when the call
320         * fails, but it often does.
321         */
322        LOGE_HEAP("Can't create VM heap of size (%zu,%zu): %s\n",
323            startSize/2, maximumSize, strerror(errno));
324    }
325
326    return msp;
327}
328
329/*
330 * Add the initial heap.  Returns false if the initial heap was
331 * already added to the heap source.
332 */
333static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
334{
335    assert(hs != NULL);
336    assert(msp != NULL);
337    if (hs->numHeaps != 0) {
338        return false;
339    }
340    hs->heaps[0].msp = msp;
341    hs->heaps[0].maximumSize = maximumSize;
342    hs->heaps[0].concurrentStartBytes = SIZE_MAX;
343    hs->heaps[0].base = hs->heapBase;
344    hs->heaps[0].limit = hs->heapBase + hs->heaps[0].maximumSize;
345    hs->numHeaps = 1;
346    return true;
347}
348
349/*
350 * Adds an additional heap to the heap source.  Returns false if there
351 * are too many heaps or insufficient free space to add another heap.
352 */
353static bool addNewHeap(HeapSource *hs)
354{
355    Heap heap;
356
357    assert(hs != NULL);
358    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
359        LOGE("Attempt to create too many heaps (%zd >= %zd)\n",
360                hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
361        dvmAbort();
362        return false;
363    }
364
365    memset(&heap, 0, sizeof(heap));
366
367    /*
368     * Heap storage comes from a common virtual memory reservation.
369     * The new heap will start on the page after the old heap.
370     */
371    void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
372    char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
373    size_t overhead = base - hs->heaps[0].base;
374    assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
375
376    if (overhead + HEAP_MIN_FREE >= hs->maximumSize) {
377        LOGE_HEAP("No room to create any more heaps "
378                  "(%zd overhead, %zd max)",
379                  overhead, hs->maximumSize);
380        return false;
381    }
382
383    heap.maximumSize = hs->growthLimit - overhead;
384    heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
385    heap.base = base;
386    heap.limit = heap.base + heap.maximumSize;
387    heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead);
388    if (heap.msp == NULL) {
389        return false;
390    }
391
392    /* Don't let the soon-to-be-old heap grow any further.
393     */
394    hs->heaps[0].maximumSize = overhead;
395    hs->heaps[0].limit = base;
396    mspace msp = hs->heaps[0].msp;
397    mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
398
399    /* Put the new heap in the list, at heaps[0].
400     * Shift existing heaps down.
401     */
402    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
403    hs->heaps[0] = heap;
404    hs->numHeaps++;
405
406    return true;
407}
408
409/*
410 * The garbage collection daemon.  Initiates a concurrent collection
411 * when signaled.
412 */
413static void *gcDaemonThread(void* arg)
414{
415    dvmChangeStatus(NULL, THREAD_VMWAIT);
416    dvmLockMutex(&gHs->gcThreadMutex);
417    while (gHs->gcThreadShutdown != true) {
418        dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
419        dvmLockHeap();
420        dvmChangeStatus(NULL, THREAD_RUNNING);
421        dvmCollectGarbageInternal(GC_CONCURRENT);
422        dvmChangeStatus(NULL, THREAD_VMWAIT);
423        dvmUnlockHeap();
424    }
425    dvmChangeStatus(NULL, THREAD_RUNNING);
426    return NULL;
427}
428
429static bool gcDaemonStartup()
430{
431    dvmInitMutex(&gHs->gcThreadMutex);
432    pthread_cond_init(&gHs->gcThreadCond, NULL);
433    gHs->gcThreadShutdown = false;
434    gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
435                                               gcDaemonThread, NULL);
436    return gHs->hasGcThread;
437}
438
439static void gcDaemonShutdown()
440{
441    if (gHs->hasGcThread) {
442        dvmLockMutex(&gHs->gcThreadMutex);
443        gHs->gcThreadShutdown = true;
444        dvmSignalCond(&gHs->gcThreadCond);
445        dvmUnlockMutex(&gHs->gcThreadMutex);
446        pthread_join(gHs->gcThread, NULL);
447    }
448}
449
450/*
451 * Create a stack big enough for the worst possible case, where the
452 * heap is perfectly full of the smallest object.
453 * TODO: be better about memory usage; use a smaller stack with
454 *       overflow detection and recovery.
455 */
456static bool allocMarkStack(GcMarkStack *stack, size_t maximumSize)
457{
458    const char *name = "dalvik-mark-stack";
459    void *addr;
460
461    assert(stack != NULL);
462    stack->length = maximumSize * sizeof(Object*) /
463        (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
464    addr = dvmAllocRegion(stack->length, PROT_READ | PROT_WRITE, name);
465    if (addr == NULL) {
466        return false;
467    }
468    stack->base = (const Object **)addr;
469    stack->limit = (const Object **)((char *)addr + stack->length);
470    stack->top = NULL;
471    madvise(stack->base, stack->length, MADV_DONTNEED);
472    return true;
473}
474
475static void freeMarkStack(GcMarkStack *stack)
476{
477    assert(stack != NULL);
478    munmap(stack->base, stack->length);
479    memset(stack, 0, sizeof(*stack));
480}
481
482/*
483 * Initializes the heap source; must be called before any other
484 * dvmHeapSource*() functions.  Returns a GcHeap structure
485 * allocated from the heap source.
486 */
487GcHeap *
488dvmHeapSourceStartup(size_t startSize, size_t maximumSize, size_t growthLimit)
489{
490    GcHeap *gcHeap;
491    HeapSource *hs;
492    mspace msp;
493    size_t length;
494    void *base;
495
496    assert(gHs == NULL);
497
498    if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
499        LOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
500             startSize, maximumSize, growthLimit);
501        return NULL;
502    }
503
504    /*
505     * Allocate a contiguous region of virtual memory to subdivided
506     * among the heaps managed by the garbage collector.
507     */
508    length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
509    base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
510    if (base == NULL) {
511        return NULL;
512    }
513
514    /* Create an unlocked dlmalloc mspace to use as
515     * a heap source.
516     */
517    msp = createMspace(base, startSize, maximumSize);
518    if (msp == NULL) {
519        goto fail;
520    }
521
522    gcHeap = (GcHeap *)malloc(sizeof(*gcHeap));
523    if (gcHeap == NULL) {
524        LOGE_HEAP("Can't allocate heap descriptor\n");
525        goto fail;
526    }
527    memset(gcHeap, 0, sizeof(*gcHeap));
528
529    hs = (HeapSource *)malloc(sizeof(*hs));
530    if (hs == NULL) {
531        LOGE_HEAP("Can't allocate heap source\n");
532        free(gcHeap);
533        goto fail;
534    }
535    memset(hs, 0, sizeof(*hs));
536
537    hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
538    hs->startSize = startSize;
539    hs->maximumSize = maximumSize;
540    hs->growthLimit = growthLimit;
541    hs->idealSize = startSize;
542    hs->softLimit = SIZE_MAX;    // no soft limit at first
543    hs->numHeaps = 0;
544    hs->sawZygote = gDvm.zygote;
545    hs->hasGcThread = false;
546    hs->heapBase = (char *)base;
547    hs->heapLength = length;
548    if (!addInitialHeap(hs, msp, growthLimit)) {
549        LOGE_HEAP("Can't add initial heap\n");
550        goto fail;
551    }
552    if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
553        LOGE_HEAP("Can't create liveBits\n");
554        goto fail;
555    }
556    if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
557        LOGE_HEAP("Can't create markBits\n");
558        dvmHeapBitmapDelete(&hs->liveBits);
559        goto fail;
560    }
561    if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
562        LOGE("Can't create markStack");
563        dvmHeapBitmapDelete(&hs->markBits);
564        dvmHeapBitmapDelete(&hs->liveBits);
565        goto fail;
566    }
567    gcHeap->markContext.bitmap = &hs->markBits;
568    gcHeap->heapSource = hs;
569
570    gHs = hs;
571    return gcHeap;
572
573fail:
574    munmap(base, length);
575    return NULL;
576}
577
578bool dvmHeapSourceStartupAfterZygote()
579{
580    return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
581}
582
583/*
584 * This is called while in zygote mode, right before we fork() for the
585 * first time.  We create a heap for all future zygote process allocations,
586 * in an attempt to avoid touching pages in the zygote heap.  (This would
587 * probably be unnecessary if we had a compacting GC -- the source of our
588 * troubles is small allocations filling in the gaps from larger ones.)
589 */
590bool
591dvmHeapSourceStartupBeforeFork()
592{
593    HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
594
595    HS_BOILERPLATE();
596
597    assert(gDvm.zygote);
598
599    if (!gDvm.newZygoteHeapAllocated) {
600        /* Create a new heap for post-fork zygote allocations.  We only
601         * try once, even if it fails.
602         */
603        LOGV("Splitting out new zygote heap\n");
604        gDvm.newZygoteHeapAllocated = true;
605        dvmClearCardTable();
606        return addNewHeap(hs);
607    }
608    return true;
609}
610
611void dvmHeapSourceThreadShutdown()
612{
613    if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
614        gcDaemonShutdown();
615    }
616}
617
618/*
619 * Tears down the entire GcHeap structure and all of the substructures
620 * attached to it.  This call has the side effect of setting the given
621 * gcHeap pointer and gHs to NULL.
622 */
623void
624dvmHeapSourceShutdown(GcHeap **gcHeap)
625{
626    assert(gcHeap != NULL);
627    if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
628        HeapSource *hs = (*gcHeap)->heapSource;
629        dvmHeapBitmapDelete(&hs->liveBits);
630        dvmHeapBitmapDelete(&hs->markBits);
631        freeMarkStack(&(*gcHeap)->markContext.stack);
632        munmap(hs->heapBase, hs->heapLength);
633        free(hs);
634        gHs = NULL;
635        free(*gcHeap);
636        *gcHeap = NULL;
637    }
638}
639
640/*
641 * Gets the begining of the allocation for the HeapSource.
642 */
643void *dvmHeapSourceGetBase()
644{
645    return gHs->heapBase;
646}
647
648/*
649 * Returns the requested value. If the per-heap stats are requested, fill
650 * them as well.
651 *
652 * Caller must hold the heap lock.
653 */
654size_t
655dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[],
656                      size_t arrayLen)
657{
658    HeapSource *hs = gHs;
659    size_t value = 0;
660    size_t total = 0;
661
662    HS_BOILERPLATE();
663
664    assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
665    for (size_t i = 0; i < hs->numHeaps; i++) {
666        Heap *const heap = &hs->heaps[i];
667
668        switch (spec) {
669        case HS_FOOTPRINT:
670            value = mspace_footprint(heap->msp);
671            break;
672        case HS_ALLOWED_FOOTPRINT:
673            value = mspace_max_allowed_footprint(heap->msp);
674            break;
675        case HS_BYTES_ALLOCATED:
676            value = heap->bytesAllocated;
677            break;
678        case HS_OBJECTS_ALLOCATED:
679            value = heap->objectsAllocated;
680            break;
681        default:
682            // quiet gcc
683            break;
684        }
685        if (perHeapStats) {
686            perHeapStats[i] = value;
687        }
688        total += value;
689    }
690    return total;
691}
692
693void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, uintptr_t *limit,
694                             size_t numHeaps)
695{
696    HeapSource *hs = gHs;
697
698    HS_BOILERPLATE();
699
700    assert(numHeaps <= hs->numHeaps);
701    for (size_t i = 0; i < numHeaps; ++i) {
702        base[i] = (uintptr_t)hs->heaps[i].base;
703        if (max != NULL) {
704            max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
705        }
706        if (limit != NULL) {
707            limit[i] = (uintptr_t)hs->heaps[i].limit;
708        }
709    }
710}
711
712/*
713 * Get the bitmap representing all live objects.
714 */
715HeapBitmap *dvmHeapSourceGetLiveBits()
716{
717    HS_BOILERPLATE();
718
719    return &gHs->liveBits;
720}
721
722/*
723 * Get the bitmap representing all marked objects.
724 */
725HeapBitmap *dvmHeapSourceGetMarkBits()
726{
727    HS_BOILERPLATE();
728
729    return &gHs->markBits;
730}
731
732void dvmHeapSourceSwapBitmaps()
733{
734    HeapBitmap tmp;
735
736    tmp = gHs->liveBits;
737    gHs->liveBits = gHs->markBits;
738    gHs->markBits = tmp;
739}
740
741void dvmHeapSourceZeroMarkBitmap()
742{
743    HS_BOILERPLATE();
744
745    dvmHeapBitmapZero(&gHs->markBits);
746}
747
748void dvmMarkImmuneObjects(const char *immuneLimit)
749{
750    /*
751     * Copy the contents of the live bit vector for immune object
752     * range into the mark bit vector.
753     */
754    /* The only values generated by dvmHeapSourceGetImmuneLimit() */
755    assert(immuneLimit == gHs->heaps[0].base ||
756           immuneLimit == NULL);
757    assert(gHs->liveBits.base == gHs->markBits.base);
758    assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
759    /* heap[0] is never immune */
760    assert(gHs->heaps[0].base >= immuneLimit);
761    assert(gHs->heaps[0].limit > immuneLimit);
762
763    for (size_t i = 1; i < gHs->numHeaps; ++i) {
764        if (gHs->heaps[i].base < immuneLimit) {
765            assert(gHs->heaps[i].limit <= immuneLimit);
766            /* Compute the number of words to copy in the bitmap. */
767            size_t index = HB_OFFSET_TO_INDEX(
768                (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
769            /* Compute the starting offset in the live and mark bits. */
770            char *src = (char *)(gHs->liveBits.bits + index);
771            char *dst = (char *)(gHs->markBits.bits + index);
772            /* Compute the number of bytes of the live bitmap to copy. */
773            size_t length = HB_OFFSET_TO_BYTE_INDEX(
774                gHs->heaps[i].limit - gHs->heaps[i].base);
775            /* Do the copy. */
776            memcpy(dst, src, length);
777            /* Make sure max points to the address of the highest set bit. */
778            if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
779                gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
780            }
781        }
782    }
783}
784
785/*
786 * Allocates <n> bytes of zeroed data.
787 */
788void *
789dvmHeapSourceAlloc(size_t n)
790{
791    HeapSource *hs = gHs;
792    Heap *heap;
793    void *ptr;
794
795    HS_BOILERPLATE();
796    heap = hs2heap(hs);
797    if (heap->bytesAllocated + n > hs->softLimit) {
798        /*
799         * This allocation would push us over the soft limit; act as
800         * if the heap is full.
801         */
802        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation\n",
803                  FRACTIONAL_MB(hs->softLimit), n);
804        return NULL;
805    }
806    ptr = mspace_calloc(heap->msp, 1, n);
807    if (ptr == NULL) {
808        return NULL;
809    }
810    countAllocation(heap, ptr);
811    /*
812     * Check to see if a concurrent GC should be initiated.
813     */
814    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
815        /*
816         * The garbage collector thread is already running or has yet
817         * to be started.  Do nothing.
818         */
819        return ptr;
820    }
821    if (heap->bytesAllocated > heap->concurrentStartBytes) {
822        /*
823         * We have exceeded the allocation threshold.  Wake up the
824         * garbage collector.
825         */
826        dvmSignalCond(&gHs->gcThreadCond);
827    }
828    return ptr;
829}
830
831/* Remove any hard limits, try to allocate, and shrink back down.
832 * Last resort when trying to allocate an object.
833 */
834static void *
835heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
836{
837    void *ptr;
838    size_t max;
839
840    /* Grow as much as possible, but don't let the real footprint
841     * go over the absolute max.
842     */
843    max = heap->maximumSize;
844
845    mspace_set_max_allowed_footprint(heap->msp, max);
846    ptr = dvmHeapSourceAlloc(n);
847
848    /* Shrink back down as small as possible.  Our caller may
849     * readjust max_allowed to a more appropriate value.
850     */
851    mspace_set_max_allowed_footprint(heap->msp,
852                                     mspace_footprint(heap->msp));
853    return ptr;
854}
855
856/*
857 * Allocates <n> bytes of zeroed data, growing as much as possible
858 * if necessary.
859 */
860void *
861dvmHeapSourceAllocAndGrow(size_t n)
862{
863    HeapSource *hs = gHs;
864    Heap *heap;
865    void *ptr;
866    size_t oldIdealSize;
867
868    HS_BOILERPLATE();
869    heap = hs2heap(hs);
870
871    ptr = dvmHeapSourceAlloc(n);
872    if (ptr != NULL) {
873        return ptr;
874    }
875
876    oldIdealSize = hs->idealSize;
877    if (isSoftLimited(hs)) {
878        /* We're soft-limited.  Try removing the soft limit to
879         * see if we can allocate without actually growing.
880         */
881        hs->softLimit = SIZE_MAX;
882        ptr = dvmHeapSourceAlloc(n);
883        if (ptr != NULL) {
884            /* Removing the soft limit worked;  fix things up to
885             * reflect the new effective ideal size.
886             */
887            snapIdealFootprint();
888            return ptr;
889        }
890        // softLimit intentionally left at SIZE_MAX.
891    }
892
893    /* We're not soft-limited.  Grow the heap to satisfy the request.
894     * If this call fails, no footprints will have changed.
895     */
896    ptr = heapAllocAndGrow(hs, heap, n);
897    if (ptr != NULL) {
898        /* The allocation succeeded.  Fix up the ideal size to
899         * reflect any footprint modifications that had to happen.
900         */
901        snapIdealFootprint();
902    } else {
903        /* We just couldn't do it.  Restore the original ideal size,
904         * fixing up softLimit if necessary.
905         */
906        setIdealFootprint(oldIdealSize);
907    }
908    return ptr;
909}
910
911/*
912 * Frees the first numPtrs objects in the ptrs list and returns the
913 * amount of reclaimed storage. The list must contain addresses all in
914 * the same mspace, and must be in increasing order. This implies that
915 * there are no duplicates, and no entries are NULL.
916 */
917size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
918{
919    Heap *heap;
920    size_t numBytes;
921
922    HS_BOILERPLATE();
923
924    if (numPtrs == 0) {
925        return 0;
926    }
927
928    assert(ptrs != NULL);
929    assert(*ptrs != NULL);
930    heap = ptr2heap(gHs, *ptrs);
931    numBytes = 0;
932    if (heap != NULL) {
933        mspace msp = heap->msp;
934        // Calling mspace_free on shared heaps disrupts sharing too
935        // much. For heap[0] -- the 'active heap' -- we call
936        // mspace_free, but on the other heaps we only do some
937        // accounting.
938        if (heap == gHs->heaps) {
939            // mspace_merge_objects takes two allocated objects, and
940            // if the second immediately follows the first, will merge
941            // them, returning a larger object occupying the same
942            // memory. This is a local operation, and doesn't require
943            // dlmalloc to manipulate any freelists. It's pretty
944            // inexpensive compared to free().
945
946            // ptrs is an array of objects all in memory order, and if
947            // client code has been allocating lots of short-lived
948            // objects, this is likely to contain runs of objects all
949            // now garbage, and thus highly amenable to this optimization.
950
951            // Unroll the 0th iteration around the loop below,
952            // countFree ptrs[0] and initializing merged.
953            assert(ptrs[0] != NULL);
954            assert(ptr2heap(gHs, ptrs[0]) == heap);
955            countFree(heap, ptrs[0], &numBytes);
956            void *merged = ptrs[0];
957            for (size_t i = 1; i < numPtrs; i++) {
958                assert(merged != NULL);
959                assert(ptrs[i] != NULL);
960                assert((intptr_t)merged < (intptr_t)ptrs[i]);
961                assert(ptr2heap(gHs, ptrs[i]) == heap);
962                countFree(heap, ptrs[i], &numBytes);
963                // Try to merge. If it works, merged now includes the
964                // memory of ptrs[i]. If it doesn't, free merged, and
965                // see if ptrs[i] starts a new run of adjacent
966                // objects to merge.
967                if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
968                    mspace_free(msp, merged);
969                    merged = ptrs[i];
970                }
971            }
972            assert(merged != NULL);
973            mspace_free(msp, merged);
974        } else {
975            // This is not an 'active heap'. Only do the accounting.
976            for (size_t i = 0; i < numPtrs; i++) {
977                assert(ptrs[i] != NULL);
978                assert(ptr2heap(gHs, ptrs[i]) == heap);
979                countFree(heap, ptrs[i], &numBytes);
980            }
981        }
982    }
983    return numBytes;
984}
985
986/*
987 * Returns true iff <ptr> is in the heap source.
988 */
989bool
990dvmHeapSourceContainsAddress(const void *ptr)
991{
992    HS_BOILERPLATE();
993
994    return (dvmHeapBitmapCoversAddress(&gHs->liveBits, ptr));
995}
996
997/*
998 * Returns true iff <ptr> was allocated from the heap source.
999 */
1000bool
1001dvmHeapSourceContains(const void *ptr)
1002{
1003    HS_BOILERPLATE();
1004
1005    if (dvmHeapSourceContainsAddress(ptr)) {
1006        return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
1007    }
1008    return false;
1009}
1010
1011/*
1012 * Returns the value of the requested flag.
1013 */
1014bool
1015dvmHeapSourceGetPtrFlag(const void *ptr, HeapSourcePtrFlag flag)
1016{
1017    if (ptr == NULL) {
1018        return false;
1019    }
1020
1021    if (flag == HS_CONTAINS) {
1022        return dvmHeapSourceContains(ptr);
1023    } else if (flag == HS_ALLOCATED_IN_ZYGOTE) {
1024        HeapSource *hs = gHs;
1025
1026        HS_BOILERPLATE();
1027
1028        if (hs->sawZygote) {
1029            Heap *heap;
1030
1031            heap = ptr2heap(hs, ptr);
1032            if (heap != NULL) {
1033                /* If the object is not in the active heap, we assume that
1034                 * it was allocated as part of zygote.
1035                 */
1036                return heap != hs->heaps;
1037            }
1038        }
1039        /* The pointer is outside of any known heap, or we are not
1040         * running in zygote mode.
1041         */
1042        return false;
1043    }
1044
1045    return false;
1046}
1047
1048/*
1049 * Returns the number of usable bytes in an allocated chunk; the size
1050 * may be larger than the size passed to dvmHeapSourceAlloc().
1051 */
1052size_t
1053dvmHeapSourceChunkSize(const void *ptr)
1054{
1055    Heap *heap;
1056
1057    HS_BOILERPLATE();
1058
1059    heap = ptr2heap(gHs, ptr);
1060    if (heap != NULL) {
1061        return mspace_usable_size(heap->msp, ptr);
1062    }
1063    return 0;
1064}
1065
1066/*
1067 * Returns the number of bytes that the heap source has allocated
1068 * from the system using sbrk/mmap, etc.
1069 *
1070 * Caller must hold the heap lock.
1071 */
1072size_t
1073dvmHeapSourceFootprint()
1074{
1075    HS_BOILERPLATE();
1076
1077//TODO: include size of bitmaps?
1078    return oldHeapOverhead(gHs, true);
1079}
1080
1081static size_t getMaximumSize(const HeapSource *hs)
1082{
1083    return hs->growthLimit;
1084}
1085
1086/*
1087 * Returns the current maximum size of the heap source respecting any
1088 * growth limits.
1089 */
1090size_t dvmHeapSourceGetMaximumSize()
1091{
1092    HS_BOILERPLATE();
1093    return getMaximumSize(gHs);
1094}
1095
1096/*
1097 * Removes any growth limits.  Allows the user to allocate up to the
1098 * maximum heap size.
1099 */
1100void dvmClearGrowthLimit()
1101{
1102    size_t overhead;
1103
1104    HS_BOILERPLATE();
1105    dvmLockHeap();
1106    dvmWaitForConcurrentGcToComplete();
1107    gHs->growthLimit = gHs->maximumSize;
1108    overhead = oldHeapOverhead(gHs, false);
1109    gHs->heaps[0].maximumSize = gHs->maximumSize - overhead;
1110    dvmUnlockHeap();
1111}
1112
1113/*
1114 * Return the real bytes used by old heaps plus the soft usage of the
1115 * current heap.  When a soft limit is in effect, this is effectively
1116 * what it's compared against (though, in practice, it only looks at
1117 * the current heap).
1118 */
1119static size_t
1120getSoftFootprint(bool includeActive)
1121{
1122    HeapSource *hs = gHs;
1123    size_t ret;
1124
1125    HS_BOILERPLATE();
1126
1127    ret = oldHeapOverhead(hs, false);
1128    if (includeActive) {
1129        ret += hs->heaps[0].bytesAllocated;
1130    }
1131
1132    return ret;
1133}
1134
1135/*
1136 * Gets the maximum number of bytes that the heap source is allowed
1137 * to allocate from the system.
1138 */
1139size_t
1140dvmHeapSourceGetIdealFootprint()
1141{
1142    HeapSource *hs = gHs;
1143
1144    HS_BOILERPLATE();
1145
1146    return hs->idealSize;
1147}
1148
1149/*
1150 * Sets the soft limit, handling any necessary changes to the allowed
1151 * footprint of the active heap.
1152 */
1153static void
1154setSoftLimit(HeapSource *hs, size_t softLimit)
1155{
1156    /* Compare against the actual footprint, rather than the
1157     * max_allowed, because the heap may not have grown all the
1158     * way to the allowed size yet.
1159     */
1160    mspace msp = hs->heaps[0].msp;
1161    size_t currentHeapSize = mspace_footprint(msp);
1162    if (softLimit < currentHeapSize) {
1163        /* Don't let the heap grow any more, and impose a soft limit.
1164         */
1165        mspace_set_max_allowed_footprint(msp, currentHeapSize);
1166        hs->softLimit = softLimit;
1167    } else {
1168        /* Let the heap grow to the requested max, and remove any
1169         * soft limit, if set.
1170         */
1171        mspace_set_max_allowed_footprint(msp, softLimit);
1172        hs->softLimit = SIZE_MAX;
1173    }
1174}
1175
1176/*
1177 * Sets the maximum number of bytes that the heap source is allowed
1178 * to allocate from the system.  Clamps to the appropriate maximum
1179 * value.
1180 */
1181static void
1182setIdealFootprint(size_t max)
1183{
1184    HeapSource *hs = gHs;
1185#if DEBUG_HEAP_SOURCE
1186    HeapSource oldHs = *hs;
1187    mspace msp = hs->heaps[0].msp;
1188    size_t oldAllowedFootprint =
1189            mspace_max_allowed_footprint(msp);
1190#endif
1191    size_t maximumSize;
1192
1193    HS_BOILERPLATE();
1194
1195    maximumSize = getMaximumSize(hs);
1196    if (max > maximumSize) {
1197        LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB\n",
1198                FRACTIONAL_MB(max),
1199                FRACTIONAL_MB(maximumSize));
1200        max = maximumSize;
1201    }
1202
1203    /* Convert max into a size that applies to the active heap.
1204     * Old heaps will count against the ideal size.
1205     */
1206    size_t overhead = getSoftFootprint(false);
1207    size_t activeMax;
1208    if (overhead < max) {
1209        activeMax = max - overhead;
1210    } else {
1211        activeMax = 0;
1212    }
1213
1214    setSoftLimit(hs, activeMax);
1215    hs->idealSize = max;
1216
1217    HSTRACE("IDEAL %zd->%zd (%d), soft %zd->%zd (%d), allowed %zd->%zd (%d), "
1218            oldHs.idealSize, hs->idealSize, hs->idealSize - oldHs.idealSize,
1219            oldHs.softLimit, hs->softLimit, hs->softLimit - oldHs.softLimit,
1220            oldAllowedFootprint, mspace_max_allowed_footprint(msp),
1221            mspace_max_allowed_footprint(msp) - oldAllowedFootprint);
1222
1223}
1224
1225/*
1226 * Make the ideal footprint equal to the current footprint.
1227 */
1228static void
1229snapIdealFootprint()
1230{
1231    HS_BOILERPLATE();
1232
1233    setIdealFootprint(getSoftFootprint(true));
1234}
1235
1236/*
1237 * Gets the current ideal heap utilization, represented as a number
1238 * between zero and one.
1239 */
1240float dvmGetTargetHeapUtilization()
1241{
1242    HeapSource *hs = gHs;
1243
1244    HS_BOILERPLATE();
1245
1246    return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1247}
1248
1249/*
1250 * Sets the new ideal heap utilization, represented as a number
1251 * between zero and one.
1252 */
1253void dvmSetTargetHeapUtilization(float newTarget)
1254{
1255    HeapSource *hs = gHs;
1256
1257    HS_BOILERPLATE();
1258
1259    /* Clamp it to a reasonable range.
1260     */
1261    // TODO: This may need some tuning.
1262    if (newTarget < 0.2) {
1263        newTarget = 0.2;
1264    } else if (newTarget > 0.8) {
1265        newTarget = 0.8;
1266    }
1267
1268    hs->targetUtilization =
1269            (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1270    LOGV("Set heap target utilization to %zd/%d (%f)\n",
1271            hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1272}
1273
1274/*
1275 * Given the size of a live set, returns the ideal heap size given
1276 * the current target utilization and MIN/MAX values.
1277 *
1278 * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
1279 */
1280static size_t
1281getUtilizationTarget(size_t liveSize, size_t targetUtilization)
1282{
1283    size_t targetSize;
1284
1285    /* Use the current target utilization ratio to determine the
1286     * ideal heap size based on the size of the live set.
1287     */
1288    targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
1289
1290    /* Cap the amount of free space, though, so we don't end up
1291     * with, e.g., 8MB of free space when the live set size hits 8MB.
1292     */
1293    if (targetSize > liveSize + HEAP_IDEAL_FREE) {
1294        targetSize = liveSize + HEAP_IDEAL_FREE;
1295    } else if (targetSize < liveSize + HEAP_MIN_FREE) {
1296        targetSize = liveSize + HEAP_MIN_FREE;
1297    }
1298    return targetSize;
1299}
1300
1301/*
1302 * Given the current contents of the active heap, increase the allowed
1303 * heap footprint to match the target utilization ratio.  This
1304 * should only be called immediately after a full mark/sweep.
1305 */
1306void dvmHeapSourceGrowForUtilization()
1307{
1308    HeapSource *hs = gHs;
1309    Heap *heap;
1310    size_t targetHeapSize;
1311    size_t currentHeapUsed;
1312    size_t oldIdealSize;
1313    size_t newHeapMax;
1314    size_t overhead;
1315    size_t freeBytes;
1316
1317    HS_BOILERPLATE();
1318    heap = hs2heap(hs);
1319
1320    /* Use the current target utilization ratio to determine the
1321     * ideal heap size based on the size of the live set.
1322     * Note that only the active heap plays any part in this.
1323     *
1324     * Avoid letting the old heaps influence the target free size,
1325     * because they may be full of objects that aren't actually
1326     * in the working set.  Just look at the allocated size of
1327     * the current heap.
1328     */
1329    currentHeapUsed = heap->bytesAllocated;
1330    targetHeapSize =
1331            getUtilizationTarget(currentHeapUsed, hs->targetUtilization);
1332
1333    /* The ideal size includes the old heaps; add overhead so that
1334     * it can be immediately subtracted again in setIdealFootprint().
1335     * If the target heap size would exceed the max, setIdealFootprint()
1336     * will clamp it to a legal value.
1337     */
1338    overhead = getSoftFootprint(false);
1339    oldIdealSize = hs->idealSize;
1340    setIdealFootprint(targetHeapSize + overhead);
1341
1342    freeBytes = getAllocLimit(hs);
1343    if (freeBytes < CONCURRENT_MIN_FREE) {
1344        /* Not enough free memory to allow a concurrent GC. */
1345        heap->concurrentStartBytes = SIZE_MAX;
1346    } else {
1347        heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
1348    }
1349    newHeapMax = mspace_max_allowed_footprint(heap->msp);
1350    if (isSoftLimited(hs)) {
1351        LOGD_HEAP("GC old usage %zd.%zd%%; now "
1352                "%zd.%03zdMB used / %zd.%03zdMB soft max "
1353                "(%zd.%03zdMB over, "
1354                "%zd.%03zdMB real max)\n",
1355                FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
1356                FRACTIONAL_MB(currentHeapUsed),
1357                FRACTIONAL_MB(hs->softLimit),
1358                FRACTIONAL_MB(overhead),
1359                FRACTIONAL_MB(newHeapMax));
1360    } else {
1361        LOGD_HEAP("GC old usage %zd.%zd%%; now "
1362                "%zd.%03zdMB used / %zd.%03zdMB real max "
1363                "(%zd.%03zdMB over)\n",
1364                FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
1365                FRACTIONAL_MB(currentHeapUsed),
1366                FRACTIONAL_MB(newHeapMax),
1367                FRACTIONAL_MB(overhead));
1368    }
1369}
1370
1371/*
1372 * Return free pages to the system.
1373 * TODO: move this somewhere else, especially the native heap part.
1374 */
1375static void releasePagesInRange(void *start, void *end, void *nbytes)
1376{
1377    /* Linux requires that the madvise() start address is page-aligned.
1378    * We also align the end address.
1379    */
1380    start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1381    end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1382    if (start < end) {
1383        size_t length = (char *)end - (char *)start;
1384        madvise(start, length, MADV_DONTNEED);
1385        *(size_t *)nbytes += length;
1386    }
1387}
1388
1389/*
1390 * Return unused memory to the system if possible.
1391 */
1392void
1393dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
1394{
1395    HeapSource *hs = gHs;
1396
1397    HS_BOILERPLATE();
1398
1399    assert(arrayLen >= hs->numHeaps);
1400
1401    size_t heapBytes = 0;
1402    for (size_t i = 0; i < hs->numHeaps; i++) {
1403        Heap *heap = &hs->heaps[i];
1404
1405        /* Return the wilderness chunk to the system.
1406         */
1407        mspace_trim(heap->msp, 0);
1408
1409        /* Return any whole free pages to the system.
1410         */
1411        bytesTrimmed[i] = 0;
1412        mspace_walk_free_pages(heap->msp, releasePagesInRange,
1413                               &bytesTrimmed[i]);
1414        heapBytes += bytesTrimmed[i];
1415    }
1416
1417    /* Same for the native heap.
1418     */
1419    dlmalloc_trim(0);
1420    size_t nativeBytes = 0;
1421    dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
1422
1423    LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes\n",
1424            heapBytes, nativeBytes, heapBytes + nativeBytes);
1425}
1426
1427/*
1428 * Walks over the heap source and passes every allocated and
1429 * free chunk to the callback.
1430 */
1431void
1432dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
1433                                      const void *userptr, size_t userlen,
1434                                      void *arg),
1435                  void *arg)
1436{
1437    HeapSource *hs = gHs;
1438
1439    HS_BOILERPLATE();
1440
1441    /* Walk the heaps from oldest to newest.
1442     */
1443//TODO: do this in address order
1444    for (size_t i = hs->numHeaps; i > 0; --i) {
1445        mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
1446    }
1447}
1448
1449/*
1450 * Gets the number of heaps available in the heap source.
1451 *
1452 * Caller must hold the heap lock, because gHs caches a field
1453 * in gDvm.gcHeap.
1454 */
1455size_t
1456dvmHeapSourceGetNumHeaps()
1457{
1458    HeapSource *hs = gHs;
1459
1460    HS_BOILERPLATE();
1461
1462    return hs->numHeaps;
1463}
1464
1465void *dvmHeapSourceGetImmuneLimit(bool isPartial)
1466{
1467    if (isPartial) {
1468        return hs2heap(gHs)->base;
1469    } else {
1470        return NULL;
1471    }
1472}
1473