Copying.cpp revision 729eebbb4e4ec5b826b7230b4c02267da341b70b
1/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <errno.h>
18#include <limits.h>
19#include <sys/mman.h>
20
21#include "Dalvik.h"
22#include "alloc/Heap.h"
23#include "alloc/HeapBitmap.h"
24#include "alloc/HeapInternal.h"
25#include "alloc/HeapSource.h"
26#include "alloc/Verify.h"
27
28/*
29 * A "mostly copying", generational, garbage collector.
30 *
31 * TODO: we allocate our own contiguous tract of page frames to back
32 * object allocations.  To cooperate with other heaps active in the
33 * virtual machine we need to move the responsibility of allocating
34 * pages someplace outside of this code.
35 *
36 * The other major data structures that maintain the state of the heap
37 * are the block space table and the block queue.
38 *
39 * The block space table records the state of a block.  We must track
40 * whether a block is:
41 *
42 * - Free or allocated in some space.
43 *
44 * - If the block holds part of a large object allocation, whether the
45 *   block is the initial or a continued block of the allocation.
46 *
47 * - Whether the block is pinned, that is to say whether at least one
48 *   object in the block must remain stationary.  Only needed during a
49 *   GC.
50 *
51 * - Which space the object belongs to.  At present this means
52 *   from-space or to-space.
53 *
54 * The block queue is used during garbage collection.  Unlike Cheney's
55 * algorithm, from-space and to-space are not contiguous.  Therefore,
56 * one cannot maintain the state of the copy with just two pointers.
57 * The block queue exists to thread lists of blocks from the various
58 * spaces together.
59 *
60 * Additionally, we record the free space frontier of the heap, as
61 * well as the address of the first object within a block, which is
62 * required to copy objects following a large object (not currently
63 * implemented).  This is stored in the heap source structure.  This
64 * should be moved elsewhere to support in-line allocations from Java
65 * threads.
66 *
67 * Allocation requests are satisfied by reserving storage from one or
68 * more contiguous blocks.  Objects that are small enough to fit
69 * inside a block are packed together within a block.  Objects that
70 * are larger than a block are allocated from contiguous sequences of
71 * blocks.  When half the available blocks are filled, a garbage
72 * collection occurs.  We "flip" spaces (exchange from- and to-space),
73 * copy live objects into to space, and perform pointer adjustment.
74 *
75 * Copying is made more complicated by the requirement that some
76 * objects must not be moved.  This property is known as "pinning".
77 * These objects must be dealt with specially.  We use Bartlett's
78 * scheme; blocks containing such objects are grayed (promoted) at the
79 * start of a garbage collection.  By virtue of this trick, tracing
80 * from the roots proceeds as usual but all objects on those pages are
81 * considered promoted and therefore not moved.
82 *
83 * TODO: there is sufficient information within the garbage collector
84 * to implement Attardi's scheme for evacuating unpinned objects from
85 * a page that is otherwise pinned.  This would eliminate false
86 * retention caused by the large pinning granularity.
87 *
88 * We need a scheme for medium and large objects.  Ignore that for
89 * now, we can return to this later.
90 *
91 * Eventually we need to worry about promoting objects out of the
92 * copy-collected heap (tenuring) into a less volatile space.  Copying
93 * may not always be the best policy for such spaces.  We should
94 * consider a variant of mark, sweep, compact.
95 *
96 * The block scheme allows us to use VM page faults to maintain a
97 * write barrier.  Consider having a special leaf state for a page.
98 *
99 * Bibliography:
100 *
101 * C. J. Cheney. 1970. A non-recursive list compacting
102 * algorithm. CACM. 13-11 pp677--678.
103 *
104 * Joel F. Bartlett. 1988. Compacting Garbage Collection with
105 * Ambiguous Roots. Digital Equipment Corporation.
106 *
107 * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up
108 * Generations and C++. Digital Equipment Corporation.
109 *
110 * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage
111 * Collection in Uncooperative Environments. Digital Equipment
112 * Corporation.
113 *
114 * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory
115 * Management Framework. TR-94-010
116 *
117 * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable
118 * memory management framework for C++. Software -- Practice and
119 * Experience. 28(11), 1143-1183.
120 *
121 */
122
123#define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0]))
124
125#if 0
126#define LOG_ALLOC ALOGI
127#define LOG_PIN ALOGI
128#define LOG_PROM ALOGI
129#define LOG_REF ALOGI
130#define LOG_SCAV ALOGI
131#define LOG_TRAN ALOGI
132#define LOG_VER ALOGI
133#else
134#define LOG_ALLOC(...) ((void)0)
135#define LOG_PIN(...) ((void)0)
136#define LOG_PROM(...) ((void)0)
137#define LOG_REF(...) ((void)0)
138#define LOG_SCAV(...) ((void)0)
139#define LOG_TRAN(...) ((void)0)
140#define LOG_VER(...) ((void)0)
141#endif
142
143static void enqueueBlock(HeapSource *heapSource, size_t block);
144static void scavengeReference(Object **obj);
145static bool toSpaceContains(const void *addr);
146static bool fromSpaceContains(const void *addr);
147static size_t sumHeapBitmap(const HeapBitmap *bitmap);
148static size_t objectSize(const Object *obj);
149static void scavengeDataObject(Object *obj);
150static void scavengeBlockQueue();
151
152/*
153 * We use 512-byte blocks.
154 */
155enum { BLOCK_SHIFT = 9 };
156enum { BLOCK_SIZE = 1 << BLOCK_SHIFT };
157
158/*
159 * Space identifiers, stored into the blockSpace array.
160 */
161enum {
162    BLOCK_FREE = 0,
163    BLOCK_FROM_SPACE = 1,
164    BLOCK_TO_SPACE = 2,
165    BLOCK_CONTINUED = 7
166};
167
168/*
169 * Alignment for all allocations, in bytes.
170 */
171enum { ALLOC_ALIGNMENT = 8 };
172
173/*
174 * Sentinel value for the queue end.
175 */
176#define QUEUE_TAIL (~(size_t)0)
177
178struct HeapSource {
179
180    /* The base address of backing store. */
181    u1 *blockBase;
182
183    /* Total number of blocks available for allocation. */
184    size_t totalBlocks;
185    size_t allocBlocks;
186
187    /*
188     * The scavenger work queue.  Implemented as an array of index
189     * values into the queue.
190     */
191    size_t *blockQueue;
192
193    /*
194     * Base and limit blocks.  Basically the shifted start address of
195     * the block.  We convert blocks to a relative number when
196     * indexing in the block queue.  TODO: make the block queue base
197     * relative rather than the index into the block queue.
198     */
199    size_t baseBlock, limitBlock;
200
201    size_t queueHead;
202    size_t queueTail;
203    size_t queueSize;
204
205    /* The space of the current block 0 (free), 1 or 2. */
206    char *blockSpace;
207
208    /* Start of free space in the current block. */
209    u1 *allocPtr;
210    /* Exclusive limit of free space in the current block. */
211    u1 *allocLimit;
212
213    HeapBitmap allocBits;
214
215    /*
216     * The starting size of the heap.  This value is the same as the
217     * value provided to the -Xms flag.
218     */
219    size_t minimumSize;
220
221    /*
222     * The maximum size of the heap.  This value is the same as the
223     * -Xmx flag.
224     */
225    size_t maximumSize;
226
227    /*
228     * The current, committed size of the heap.  At present, this is
229     * equivalent to the maximumSize.
230     */
231    size_t currentSize;
232
233    size_t bytesAllocated;
234};
235
236static unsigned long alignDown(unsigned long x, unsigned long n)
237{
238    return x & -n;
239}
240
241static unsigned long alignUp(unsigned long x, unsigned long n)
242{
243    return alignDown(x + (n - 1), n);
244}
245
246static void describeBlocks(const HeapSource *heapSource)
247{
248    for (size_t i = 0; i < heapSource->totalBlocks; ++i) {
249        if ((i % 32) == 0) putchar('\n');
250        printf("%d ", heapSource->blockSpace[i]);
251    }
252    putchar('\n');
253}
254
255/*
256 * Virtual memory interface.
257 */
258
259static void *virtualAlloc(size_t length)
260{
261    int flags = MAP_PRIVATE | MAP_ANONYMOUS;
262    int prot = PROT_READ | PROT_WRITE;
263    void *addr = mmap(NULL, length, prot, flags, -1, 0);
264    if (addr == MAP_FAILED) {
265        LOGE_HEAP("mmap: %s", strerror(errno));
266        addr = NULL;
267    }
268    return addr;
269}
270
271static void virtualFree(void *addr, size_t length)
272{
273    assert(addr != NULL);
274    assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0);
275    int res = munmap(addr, length);
276    if (res == -1) {
277        LOGE_HEAP("munmap: %s", strerror(errno));
278    }
279}
280
281#ifndef NDEBUG
282static int isValidAddress(const HeapSource *heapSource, const u1 *addr)
283{
284    size_t block;
285
286    block = (uintptr_t)addr >> BLOCK_SHIFT;
287    return heapSource->baseBlock <= block &&
288           heapSource->limitBlock > block;
289}
290#endif
291
292/*
293 * Iterate over the block map looking for a contiguous run of free
294 * blocks.
295 */
296static void *allocateBlocks(HeapSource *heapSource, size_t blocks)
297{
298    size_t allocBlocks = heapSource->allocBlocks;
299    size_t totalBlocks = heapSource->totalBlocks;
300    /* Check underflow. */
301    assert(blocks != 0);
302    /* Check overflow. */
303    if (allocBlocks + blocks > totalBlocks / 2) {
304        return NULL;
305    }
306    /* Scan block map. */
307    for (size_t i = 0; i < totalBlocks; ++i) {
308        /* Check fit. */
309        for (size_t j = 0; j < blocks; ++j) { /* runs over totalBlocks */
310            if (heapSource->blockSpace[i+j] != BLOCK_FREE) {
311                break;
312            }
313        }
314        /* No fit? */
315        if (j != blocks) {
316            i += j;
317            continue;
318        }
319        /* Fit, allocate. */
320        heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */
321        for (size_t j = 1; j < blocks; ++j) {
322            heapSource->blockSpace[i+j] = BLOCK_CONTINUED;
323        }
324        heapSource->allocBlocks += blocks;
325        void *addr = &heapSource->blockBase[i*BLOCK_SIZE];
326        memset(addr, 0, blocks*BLOCK_SIZE);
327        /* Collecting? */
328        if (heapSource->queueHead != QUEUE_TAIL) {
329            LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
330            /*
331             * This allocated was on behalf of the transporter when it
332             * shaded a white object gray.  We enqueue the block so
333             * the scavenger can further shade the gray objects black.
334             */
335            enqueueBlock(heapSource, i);
336        }
337
338        return addr;
339    }
340    /* Insufficient space, fail. */
341    ALOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated",
342         heapSource->totalBlocks,
343         heapSource->allocBlocks,
344         heapSource->bytesAllocated);
345    return NULL;
346}
347
348/* Converts an absolute address to a relative block number. */
349static size_t addressToBlock(const HeapSource *heapSource, const void *addr)
350{
351    assert(heapSource != NULL);
352    assert(isValidAddress(heapSource, addr));
353    return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock;
354}
355
356/* Converts a relative block number to an absolute address. */
357static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
358{
359    u1 *addr;
360
361    addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
362    assert(isValidAddress(heapSource, addr));
363    return addr;
364}
365
366static void clearBlock(HeapSource *heapSource, size_t block)
367{
368    assert(heapSource != NULL);
369    assert(block < heapSource->totalBlocks);
370    u1 *addr = heapSource->blockBase + block*BLOCK_SIZE;
371    memset(addr, 0xCC, BLOCK_SIZE);
372    for (size_t i = 0; i < BLOCK_SIZE; i += 8) {
373        dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i);
374    }
375}
376
377static void clearFromSpace(HeapSource *heapSource)
378{
379    assert(heapSource != NULL);
380    size_t i = 0;
381    size_t count = 0;
382    while (i < heapSource->totalBlocks) {
383        if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) {
384            ++i;
385            continue;
386        }
387        heapSource->blockSpace[i] = BLOCK_FREE;
388        clearBlock(heapSource, i);
389        ++i;
390        ++count;
391        while (i < heapSource->totalBlocks &&
392               heapSource->blockSpace[i] == BLOCK_CONTINUED) {
393            heapSource->blockSpace[i] = BLOCK_FREE;
394            clearBlock(heapSource, i);
395            ++i;
396            ++count;
397        }
398    }
399    LOG_SCAV("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE);
400}
401
402/*
403 * Appends the given block to the block queue.  The block queue is
404 * processed in-order by the scavenger.
405 */
406static void enqueueBlock(HeapSource *heapSource, size_t block)
407{
408    assert(heapSource != NULL);
409    assert(block < heapSource->totalBlocks);
410    if (heapSource->queueHead != QUEUE_TAIL) {
411        heapSource->blockQueue[heapSource->queueTail] = block;
412    } else {
413        heapSource->queueHead = block;
414    }
415    heapSource->blockQueue[block] = QUEUE_TAIL;
416    heapSource->queueTail = block;
417    ++heapSource->queueSize;
418}
419
420/*
421 * Grays all objects within the block corresponding to the given
422 * address.
423 */
424static void promoteBlockByAddr(HeapSource *heapSource, const void *addr)
425{
426    size_t block;
427
428    block = addressToBlock(heapSource, (const u1 *)addr);
429    if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
430        // LOG_PROM("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
431        heapSource->blockSpace[block] = BLOCK_TO_SPACE;
432        enqueueBlock(heapSource, block);
433        /* TODO(cshapiro): count continued blocks?*/
434        heapSource->allocBlocks += 1;
435    } else {
436        // LOG_PROM("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
437    }
438}
439
440GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
441{
442    GcHeap* gcHeap;
443    HeapSource *heapSource;
444
445    assert(startSize <= absoluteMaxSize);
446
447    heapSource = calloc(1, sizeof(*heapSource));
448    assert(heapSource != NULL);
449
450    heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE);
451    heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE);
452
453    heapSource->currentSize = heapSource->maximumSize;
454
455    /* Allocate underlying storage for blocks. */
456    heapSource->blockBase = virtualAlloc(heapSource->maximumSize);
457    assert(heapSource->blockBase != NULL);
458    heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT;
459    heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT;
460
461    heapSource->allocBlocks = 0;
462    heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock);
463
464    assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE);
465
466    {
467        size_t size = sizeof(heapSource->blockQueue[0]);
468        heapSource->blockQueue = malloc(heapSource->totalBlocks*size);
469        assert(heapSource->blockQueue != NULL);
470        memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size);
471        heapSource->queueHead = QUEUE_TAIL;
472    }
473
474    /* Byte indicating space residence or free status of block. */
475    {
476        size_t size = sizeof(heapSource->blockSpace[0]);
477        heapSource->blockSpace = calloc(1, heapSource->totalBlocks*size);
478        assert(heapSource->blockSpace != NULL);
479    }
480
481    dvmHeapBitmapInit(&heapSource->allocBits,
482                      heapSource->blockBase,
483                      heapSource->maximumSize,
484                      "blockBase");
485
486    /* Initialize allocation pointers. */
487    heapSource->allocPtr = allocateBlocks(heapSource, 1);
488    heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE;
489
490    gcHeap = calloc(1, sizeof(*gcHeap));
491    assert(gcHeap != NULL);
492    gcHeap->heapSource = heapSource;
493
494    return gcHeap;
495}
496
497/*
498 * Perform any required heap initializations after forking from the
499 * zygote process.  This is a no-op for the time being.  Eventually
500 * this will demarcate the shared region of the heap.
501 */
502bool dvmHeapSourceStartupAfterZygote()
503{
504    return true;
505}
506
507bool dvmHeapSourceStartupBeforeFork()
508{
509    assert(!"implemented");
510    return false;
511}
512
513void dvmHeapSourceShutdown(GcHeap **gcHeap)
514{
515    if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL)
516        return;
517    free((*gcHeap)->heapSource->blockQueue);
518    free((*gcHeap)->heapSource->blockSpace);
519    virtualFree((*gcHeap)->heapSource->blockBase,
520                (*gcHeap)->heapSource->maximumSize);
521    free((*gcHeap)->heapSource);
522    (*gcHeap)->heapSource = NULL;
523    free(*gcHeap);
524    *gcHeap = NULL;
525}
526
527size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec,
528                             size_t perHeapStats[],
529                             size_t arrayLen)
530{
531    HeapSource *heapSource;
532    size_t value;
533
534    heapSource = gDvm.gcHeap->heapSource;
535    switch (spec) {
536    case HS_FOOTPRINT:
537        value = heapSource->maximumSize;
538        break;
539    case HS_ALLOWED_FOOTPRINT:
540        value = heapSource->maximumSize;
541        break;
542    case HS_BYTES_ALLOCATED:
543        value = heapSource->bytesAllocated;
544        break;
545    case HS_OBJECTS_ALLOCATED:
546        value = sumHeapBitmap(&heapSource->allocBits);
547        break;
548    default:
549        assert(!"implemented");
550        value = 0;
551    }
552    if (perHeapStats) {
553        *perHeapStats = value;
554    }
555    return value;
556}
557
558/*
559 * Performs a shallow copy of the allocation bitmap into the given
560 * vector of heap bitmaps.
561 */
562void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
563                                   size_t numHeaps)
564{
565    assert(!"implemented");
566}
567
568HeapBitmap *dvmHeapSourceGetLiveBits()
569{
570    return &gDvm.gcHeap->heapSource->allocBits;
571}
572
573/*
574 * Allocate the specified number of bytes from the heap.  The
575 * allocation cursor points into a block of free storage.  If the
576 * given allocation fits in the remaining space of the block, we
577 * advance the cursor and return a pointer to the free storage.  If
578 * the allocation cannot fit in the current block but is smaller than
579 * a block we request a new block and allocate from it instead.  If
580 * the allocation is larger than a block we must allocate from a span
581 * of contiguous blocks.
582 */
583void *dvmHeapSourceAlloc(size_t length)
584{
585    HeapSource *heapSource;
586    unsigned char *addr;
587    size_t aligned, available, blocks;
588
589    heapSource = gDvm.gcHeap->heapSource;
590    assert(heapSource != NULL);
591    assert(heapSource->allocPtr != NULL);
592    assert(heapSource->allocLimit != NULL);
593
594    aligned = alignUp(length, ALLOC_ALIGNMENT);
595    available = heapSource->allocLimit - heapSource->allocPtr;
596
597    /* Try allocating inside the current block. */
598    if (aligned <= available) {
599        addr = heapSource->allocPtr;
600        heapSource->allocPtr += aligned;
601        heapSource->bytesAllocated += aligned;
602        dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
603        return addr;
604    }
605
606    /* Try allocating in a new block. */
607    if (aligned <= BLOCK_SIZE) {
608        addr =  allocateBlocks(heapSource, 1);
609        if (addr != NULL) {
610            heapSource->allocLimit = addr + BLOCK_SIZE;
611            heapSource->allocPtr = addr + aligned;
612            heapSource->bytesAllocated += aligned;
613            dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
614            /* TODO(cshapiro): pad out the current block. */
615        }
616        return addr;
617    }
618
619    /* Try allocating in a span of blocks. */
620    blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE;
621
622    addr = allocateBlocks(heapSource, blocks);
623    /* Propagate failure upward. */
624    if (addr != NULL) {
625        heapSource->bytesAllocated += aligned;
626        dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
627        /* TODO(cshapiro): pad out free space in the last block. */
628    }
629    return addr;
630}
631
632void *dvmHeapSourceAllocAndGrow(size_t size)
633{
634    return dvmHeapSourceAlloc(size);
635}
636
637/* TODO: refactor along with dvmHeapSourceAlloc */
638void *allocateGray(size_t size)
639{
640    HeapSource *heapSource;
641    void *addr;
642    size_t block;
643
644    /* TODO: add a check that we are in a GC. */
645    heapSource = gDvm.gcHeap->heapSource;
646    addr = dvmHeapSourceAlloc(size);
647    assert(addr != NULL);
648    block = addressToBlock(heapSource, (const u1 *)addr);
649    if (heapSource->queueHead == QUEUE_TAIL) {
650        /*
651         * Forcibly append the underlying block to the queue.  This
652         * condition occurs when referents are transported following
653         * the initial trace.
654         */
655        enqueueBlock(heapSource, block);
656        LOG_PROM("forced promoting block %zu %d @ %p", block, heapSource->blockSpace[block], addr);
657    }
658    return addr;
659}
660
661bool dvmHeapSourceContainsAddress(const void *ptr)
662{
663    HeapSource *heapSource = gDvm.gcHeap->heapSource;
664    return dvmHeapBitmapCoversAddress(&heapSource->allocBits, ptr);
665}
666
667/*
668 * Returns true if the given address is within the heap and points to
669 * the header of a live object.
670 */
671bool dvmHeapSourceContains(const void *addr)
672{
673    HeapSource *heapSource;
674    HeapBitmap *bitmap;
675
676    heapSource = gDvm.gcHeap->heapSource;
677    bitmap = &heapSource->allocBits;
678    if (!dvmHeapBitmapCoversAddress(bitmap, addr)) {
679        return false;
680    } else {
681        return dvmHeapBitmapIsObjectBitSet(bitmap, addr);
682    }
683}
684
685bool dvmHeapSourceGetPtrFlag(const void *ptr, HeapSourcePtrFlag flag)
686{
687    assert(!"implemented");
688    return false;
689}
690
691size_t dvmHeapSourceChunkSize(const void *ptr)
692{
693    assert(!"implemented");
694    return 0;
695}
696
697size_t dvmHeapSourceFootprint()
698{
699    assert(!"implemented");
700    return 0;
701}
702
703/*
704 * Returns the "ideal footprint" which appears to be the number of
705 * bytes currently committed to the heap.  This starts out at the
706 * start size of the heap and grows toward the maximum size.
707 */
708size_t dvmHeapSourceGetIdealFootprint()
709{
710    return gDvm.gcHeap->heapSource->currentSize;
711}
712
713float dvmGetTargetHeapUtilization()
714{
715    return 0.5f;
716}
717
718void dvmSetTargetHeapUtilization(float newTarget)
719{
720    assert(newTarget > 0.0f && newTarget < 1.0f);
721}
722
723/*
724 * Expands the size of the heap after a collection.  At present we
725 * commit the pages for maximum size of the heap so this routine is
726 * just a no-op.  Eventually, we will either allocate or commit pages
727 * on an as-need basis.
728 */
729void dvmHeapSourceGrowForUtilization()
730{
731    /* do nothing */
732}
733
734void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen,
735                                        const void *userptr, size_t userlen,
736                                        void *arg),
737                       void *arg)
738{
739    assert(!"implemented");
740}
741
742size_t dvmHeapSourceGetNumHeaps()
743{
744    return 1;
745}
746
747bool dvmTrackExternalAllocation(size_t n)
748{
749    /* do nothing */
750    return true;
751}
752
753void dvmTrackExternalFree(size_t n)
754{
755    /* do nothing */
756}
757
758size_t dvmGetExternalBytesAllocated()
759{
760    assert(!"implemented");
761    return 0;
762}
763
764void dvmHeapSourceFlip()
765{
766    HeapSource *heapSource = gDvm.gcHeap->heapSource;
767
768    /* Reset the block queue. */
769    heapSource->allocBlocks = 0;
770    heapSource->queueSize = 0;
771    heapSource->queueHead = QUEUE_TAIL;
772
773    /* TODO(cshapiro): pad the current (prev) block. */
774
775    heapSource->allocPtr = NULL;
776    heapSource->allocLimit = NULL;
777
778    /* Whiten all allocated blocks. */
779    for (size_t i = 0; i < heapSource->totalBlocks; ++i) {
780        if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) {
781            heapSource->blockSpace[i] = BLOCK_FROM_SPACE;
782        }
783    }
784}
785
786static void room(size_t *alloc, size_t *avail, size_t *total)
787{
788    HeapSource *heapSource = gDvm.gcHeap->heapSource;
789    *total = heapSource->totalBlocks*BLOCK_SIZE;
790    *alloc = heapSource->allocBlocks*BLOCK_SIZE;
791    *avail = *total - *alloc;
792}
793
794static bool isSpaceInternal(u1 *addr, int space)
795{
796    HeapSource *heapSource;
797    u1 *base, *limit;
798    size_t offset;
799    char space2;
800
801    heapSource = gDvm.gcHeap->heapSource;
802    base = heapSource->blockBase;
803    assert(addr >= base);
804    limit = heapSource->blockBase + heapSource->maximumSize;
805    assert(addr < limit);
806    offset = addr - base;
807    space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT];
808    return space == space2;
809}
810
811static bool fromSpaceContains(const void *addr)
812{
813    return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE);
814}
815
816static bool toSpaceContains(const void *addr)
817{
818    return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE);
819}
820
821/*
822 * Notifies the collector that the object at the given address must
823 * remain stationary during the current collection.
824 */
825static void pinObject(const Object *obj)
826{
827    promoteBlockByAddr(gDvm.gcHeap->heapSource, obj);
828}
829
830static size_t sumHeapBitmap(const HeapBitmap *bitmap)
831{
832    size_t sum = 0;
833    for (size_t i = 0; i < bitmap->bitsLen >> 2; ++i) {
834        sum += CLZ(bitmap->bits[i]);
835    }
836    return sum;
837}
838
839/*
840 * Miscellaneous functionality.
841 */
842
843static int isForward(const void *addr)
844{
845    return (uintptr_t)addr & 0x1;
846}
847
848static void setForward(const void *toObj, void *fromObj)
849{
850    *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1;
851}
852
853static void* getForward(const void *fromObj)
854{
855    return (void *)((uintptr_t)fromObj & ~0x1);
856}
857
858/* Beware, uses the same encoding as a forwarding pointers! */
859static int isPermanentString(const StringObject *obj) {
860    return (uintptr_t)obj & 0x1;
861}
862
863static void* getPermanentString(const StringObject *obj)
864{
865    return (void *)((uintptr_t)obj & ~0x1);
866}
867
868
869/*
870 * Scavenging and transporting routines follow.  A transporter grays
871 * an object.  A scavenger blackens an object.  We define these
872 * routines for each fundamental object type.  Dispatch is performed
873 * in scavengeObject.
874 */
875
876/*
877 * Class object scavenging.
878 */
879static void scavengeClassObject(ClassObject *obj)
880{
881    LOG_SCAV("scavengeClassObject(obj=%p)", obj);
882    assert(obj != NULL);
883    assert(obj->obj.clazz != NULL);
884    assert(obj->obj.clazz->descriptor != NULL);
885    assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;"));
886    assert(obj->descriptor != NULL);
887    LOG_SCAV("scavengeClassObject: descriptor='%s',vtableCount=%zu",
888             obj->descriptor, obj->vtableCount);
889    /* Delegate class object and instance field scavenging. */
890    scavengeDataObject((Object *)obj);
891    /* Scavenge the array element class object. */
892    if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) {
893        scavengeReference((Object **)(void *)&obj->elementClass);
894    }
895    /* Scavenge the superclass. */
896    scavengeReference((Object **)(void *)&obj->super);
897    /* Scavenge the class loader. */
898    scavengeReference(&obj->classLoader);
899    /* Scavenge static fields. */
900    for (int i = 0; i < obj->sfieldCount; ++i) {
901        char ch = obj->sfields[i].field.signature[0];
902        if (ch == '[' || ch == 'L') {
903            scavengeReference((Object **)(void *)&obj->sfields[i].value.l);
904        }
905    }
906    /* Scavenge interface class objects. */
907    for (int i = 0; i < obj->interfaceCount; ++i) {
908        scavengeReference((Object **) &obj->interfaces[i]);
909    }
910}
911
912/*
913 * Array object scavenging.
914 */
915static size_t scavengeArrayObject(ArrayObject *array)
916{
917    LOG_SCAV("scavengeArrayObject(array=%p)", array);
918    /* Scavenge the class object. */
919    assert(toSpaceContains(array));
920    assert(array != NULL);
921    assert(array->obj.clazz != NULL);
922    scavengeReference((Object **) array);
923    size_t length = dvmArrayObjectSize(array);
924    /* Scavenge the array contents. */
925    if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) {
926        Object **contents = (Object **)array->contents;
927        for (size_t i = 0; i < array->length; ++i) {
928            scavengeReference(&contents[i]);
929        }
930    }
931    return length;
932}
933
934/*
935 * Reference object scavenging.
936 */
937
938static int getReferenceFlags(const Object *obj)
939{
940    int flags;
941
942    flags = CLASS_ISREFERENCE |
943            CLASS_ISWEAKREFERENCE |
944            CLASS_ISPHANTOMREFERENCE;
945    return GET_CLASS_FLAG_GROUP(obj->clazz, flags);
946}
947
948static int isSoftReference(const Object *obj)
949{
950    return getReferenceFlags(obj) == CLASS_ISREFERENCE;
951}
952
953static int isWeakReference(const Object *obj)
954{
955    return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE;
956}
957
958#ifndef NDEBUG
959static bool isPhantomReference(const Object *obj)
960{
961    return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE;
962}
963#endif
964
965/*
966 * Returns true if the reference was registered with a reference queue
967 * but has not yet been appended to it.
968 */
969static bool isReferenceEnqueuable(const Object *ref)
970{
971    Object *queue, *queueNext;
972
973    queue = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue);
974    queueNext = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext);
975    if (queue == NULL || queueNext != NULL) {
976        /*
977         * There is no queue, or the reference has already
978         * been enqueued.  The Reference.enqueue() method
979         * will do nothing even if we call it.
980         */
981        return false;
982    }
983
984    /*
985     * We need to call enqueue(), but if we called it from
986     * here we'd probably deadlock.  Schedule a call.
987     */
988    return true;
989}
990
991/*
992 * Schedules a reference to be appended to its reference queue.
993 */
994static void enqueueReference(Object *ref)
995{
996    assert(ref != NULL);
997    assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue) != NULL);
998    assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext) == NULL);
999    if (!dvmHeapAddRefToLargeTable(&gDvm.gcHeap->referenceOperations, ref)) {
1000        ALOGE("no room for any more reference operations");
1001        dvmAbort();
1002    }
1003}
1004
1005/*
1006 * Sets the referent field of a reference object to NULL.
1007 */
1008static void clearReference(Object *obj)
1009{
1010    dvmSetFieldObject(obj, gDvm.offJavaLangRefReference_referent, NULL);
1011}
1012
1013/*
1014 * Clears reference objects with white referents.
1015 */
1016void clearWhiteReferences(Object **list)
1017{
1018    size_t referentOffset, queueNextOffset;
1019    bool doSignal;
1020
1021    queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1022    referentOffset = gDvm.offJavaLangRefReference_referent;
1023    doSignal = false;
1024    while (*list != NULL) {
1025        Object *ref = *list;
1026        JValue *field = dvmFieldPtr(ref, referentOffset);
1027        Object *referent = field->l;
1028        *list = dvmGetFieldObject(ref, queueNextOffset);
1029        dvmSetFieldObject(ref, queueNextOffset, NULL);
1030        assert(referent != NULL);
1031        if (isForward(referent->clazz)) {
1032            field->l = referent = getForward(referent->clazz);
1033            continue;
1034        }
1035        if (fromSpaceContains(referent)) {
1036            /* Referent is white, clear it. */
1037            clearReference(ref);
1038            if (isReferenceEnqueuable(ref)) {
1039                enqueueReference(ref);
1040                doSignal = true;
1041            }
1042        }
1043    }
1044    /*
1045     * If we cleared a reference with a reference queue we must notify
1046     * the heap worker to append the reference.
1047     */
1048    if (doSignal) {
1049        dvmSignalHeapWorker(false);
1050    }
1051    assert(*list == NULL);
1052}
1053
1054/*
1055 * Blackens referents subject to the soft reference preservation
1056 * policy.
1057 */
1058void preserveSoftReferences(Object **list)
1059{
1060    Object *ref;
1061    Object *prev, *next;
1062    size_t referentOffset, queueNextOffset;
1063    unsigned counter;
1064    bool white;
1065
1066    queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1067    referentOffset = gDvm.offJavaLangRefReference_referent;
1068    counter = 0;
1069    prev = next = NULL;
1070    ref = *list;
1071    while (ref != NULL) {
1072        JValue *field = dvmFieldPtr(ref, referentOffset);
1073        Object *referent = field->l;
1074        next = dvmGetFieldObject(ref, queueNextOffset);
1075        assert(referent != NULL);
1076        if (isForward(referent->clazz)) {
1077            /* Referent is black. */
1078            field->l = referent = getForward(referent->clazz);
1079            white = false;
1080        } else {
1081            white = fromSpaceContains(referent);
1082        }
1083        if (!white && ((++counter) & 1)) {
1084            /* Referent is white and biased toward saving, gray it. */
1085            scavengeReference((Object **)(void *)&field->l);
1086            white = true;
1087        }
1088        if (white) {
1089            /* Referent is black, unlink it. */
1090            if (prev != NULL) {
1091                dvmSetFieldObject(ref, queueNextOffset, NULL);
1092                dvmSetFieldObject(prev, queueNextOffset, next);
1093            }
1094        } else {
1095            /* Referent is white, skip over it. */
1096            prev = ref;
1097        }
1098        ref = next;
1099    }
1100    /*
1101     * Restart the trace with the newly gray references added to the
1102     * root set.
1103     */
1104    scavengeBlockQueue();
1105}
1106
1107void processFinalizableReferences()
1108{
1109    HeapRefTable newPendingRefs;
1110    LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
1111    Object **ref;
1112    Object **lastRef;
1113    size_t totalPendCount;
1114
1115    /*
1116     * All strongly, reachable objects are black.
1117     * Any white finalizable objects need to be finalized.
1118     */
1119
1120    /* Create a table that the new pending refs will
1121     * be added to.
1122     */
1123    if (!dvmHeapInitHeapRefTable(&newPendingRefs)) {
1124        //TODO: mark all finalizable refs and hope that
1125        //      we can schedule them next time.  Watch out,
1126        //      because we may be expecting to free up space
1127        //      by calling finalizers.
1128        LOG_REF("no room for pending finalizations");
1129        dvmAbort();
1130    }
1131
1132    /*
1133     * Walk through finalizableRefs and move any white references to
1134     * the list of new pending refs.
1135     */
1136    totalPendCount = 0;
1137    while (finRefs != NULL) {
1138        Object **gapRef;
1139        size_t newPendCount = 0;
1140
1141        gapRef = ref = finRefs->refs.table;
1142        lastRef = finRefs->refs.nextEntry;
1143        while (ref < lastRef) {
1144            if (fromSpaceContains(*ref)) {
1145                if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
1146                    //TODO: add the current table and allocate
1147                    //      a new, smaller one.
1148                    LOG_REF("no room for any more pending finalizations: %zd",
1149                            dvmHeapNumHeapRefTableEntries(&newPendingRefs));
1150                    dvmAbort();
1151                }
1152                newPendCount++;
1153            } else {
1154                /* This ref is black, so will remain on finalizableRefs.
1155                 */
1156                if (newPendCount > 0) {
1157                    /* Copy it up to fill the holes.
1158                     */
1159                    *gapRef++ = *ref;
1160                } else {
1161                    /* No holes yet; don't bother copying.
1162                     */
1163                    gapRef++;
1164                }
1165            }
1166            ref++;
1167        }
1168        finRefs->refs.nextEntry = gapRef;
1169        //TODO: if the table is empty when we're done, free it.
1170        totalPendCount += newPendCount;
1171        finRefs = finRefs->next;
1172    }
1173    LOG_REF("%zd finalizers triggered.", totalPendCount);
1174    if (totalPendCount == 0) {
1175        /* No objects required finalization.
1176         * Free the empty temporary table.
1177         */
1178        dvmClearReferenceTable(&newPendingRefs);
1179        return;
1180    }
1181
1182    /* Add the new pending refs to the main list.
1183     */
1184    if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
1185                &newPendingRefs))
1186    {
1187        LOG_REF("can't insert new pending finalizations");
1188        dvmAbort();
1189    }
1190
1191    //TODO: try compacting the main list with a memcpy loop
1192
1193    /* Blacken the refs we just moved; we don't want them or their
1194     * children to get swept yet.
1195     */
1196    ref = newPendingRefs.table;
1197    lastRef = newPendingRefs.nextEntry;
1198    assert(ref < lastRef);
1199    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
1200    while (ref < lastRef) {
1201        scavengeReference(ref);
1202        ref++;
1203    }
1204    HPROF_CLEAR_GC_SCAN_STATE();
1205    scavengeBlockQueue();
1206    dvmSignalHeapWorker(false);
1207}
1208
1209/*
1210 * If a reference points to from-space and has been forwarded, we snap
1211 * the pointer to its new to-space address.  If the reference points
1212 * to an unforwarded from-space address we must enqueue the reference
1213 * for later processing.  TODO: implement proper reference processing
1214 * and move the referent scavenging elsewhere.
1215 */
1216static void scavengeReferenceObject(Object *obj)
1217{
1218    Object *referent;
1219    Object **queue;
1220    size_t referentOffset, queueNextOffset;
1221
1222    assert(obj != NULL);
1223    LOG_SCAV("scavengeReferenceObject(obj=%p),'%s'", obj, obj->clazz->descriptor);
1224    scavengeDataObject(obj);
1225    referentOffset = gDvm.offJavaLangRefReference_referent;
1226    referent = dvmGetFieldObject(obj, referentOffset);
1227    if (referent == NULL || toSpaceContains(referent)) {
1228        return;
1229    }
1230    if (isSoftReference(obj)) {
1231        queue = &gDvm.gcHeap->softReferences;
1232    } else if (isWeakReference(obj)) {
1233        queue = &gDvm.gcHeap->weakReferences;
1234    } else {
1235        assert(isPhantomReference(obj));
1236        queue = &gDvm.gcHeap->phantomReferences;
1237    }
1238    queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1239    dvmSetFieldObject(obj, queueNextOffset, *queue);
1240    *queue = obj;
1241    LOG_SCAV("scavengeReferenceObject: enqueueing %p", obj);
1242}
1243
1244/*
1245 * Data object scavenging.
1246 */
1247static void scavengeDataObject(Object *obj)
1248{
1249    // LOG_SCAV("scavengeDataObject(obj=%p)", obj);
1250    assert(obj != NULL);
1251    assert(obj->clazz != NULL);
1252    assert(obj->clazz->objectSize != 0);
1253    assert(toSpaceContains(obj));
1254    /* Scavenge the class object. */
1255    ClassObject *clazz = obj->clazz;
1256    scavengeReference((Object **) obj);
1257    /* Scavenge instance fields. */
1258    if (clazz->refOffsets != CLASS_WALK_SUPER) {
1259        size_t refOffsets = clazz->refOffsets;
1260        while (refOffsets != 0) {
1261            size_t rshift = CLZ(refOffsets);
1262            size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
1263            Object **ref = (Object **)((u1 *)obj + offset);
1264            scavengeReference(ref);
1265            refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
1266        }
1267    } else {
1268        for (; clazz != NULL; clazz = clazz->super) {
1269            InstField *field = clazz->ifields;
1270            for (int i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
1271                size_t offset = field->byteOffset;
1272                Object **ref = (Object **)((u1 *)obj + offset);
1273                scavengeReference(ref);
1274            }
1275        }
1276    }
1277}
1278
1279static Object *transportObject(const Object *fromObj)
1280{
1281    Object *toObj;
1282    size_t allocSize, copySize;
1283
1284    LOG_TRAN("transportObject(fromObj=%p) allocBlocks=%zu",
1285                  fromObj,
1286                  gDvm.gcHeap->heapSource->allocBlocks);
1287    assert(fromObj != NULL);
1288    assert(fromSpaceContains(fromObj));
1289    allocSize = copySize = objectSize(fromObj);
1290    if (LW_HASH_STATE(fromObj->lock) != LW_HASH_STATE_UNHASHED) {
1291        /*
1292         * The object has been hashed or hashed and moved.  We must
1293         * reserve an additional word for a hash code.
1294         */
1295        allocSize += sizeof(u4);
1296    }
1297    if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1298        /*
1299         * The object has its hash code allocated.  Ensure the hash
1300         * code is copied along with the instance data.
1301         */
1302        copySize += sizeof(u4);
1303    }
1304    /* TODO(cshapiro): don't copy, re-map large data objects. */
1305    assert(copySize <= allocSize);
1306    toObj = allocateGray(allocSize);
1307    assert(toObj != NULL);
1308    assert(toSpaceContains(toObj));
1309    memcpy(toObj, fromObj, copySize);
1310    if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED) {
1311        /*
1312         * The object has had its hash code exposed.  Append it to the
1313         * instance and set a bit so we know to look for it there.
1314         */
1315        *(u4 *)(((char *)toObj) + copySize) = (u4)fromObj >> 3;
1316        toObj->lock |= LW_HASH_STATE_HASHED_AND_MOVED << LW_HASH_STATE_SHIFT;
1317    }
1318    LOG_TRAN("transportObject: from %p/%zu to %p/%zu (%zu,%zu) %s",
1319             fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj),
1320             toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj),
1321             copySize, allocSize, copySize < allocSize ? "DIFFERENT" : "");
1322    return toObj;
1323}
1324
1325/*
1326 * Generic reference scavenging.
1327 */
1328
1329/*
1330 * Given a reference to an object, the scavenge routine will gray the
1331 * reference.  Any objects pointed to by the scavenger object will be
1332 * transported to new space and a forwarding pointer will be installed
1333 * in the header of the object.
1334 */
1335
1336/*
1337 * Blacken the given pointer.  If the pointer is in from space, it is
1338 * transported to new space.  If the object has a forwarding pointer
1339 * installed it has already been transported and the referent is
1340 * snapped to the new address.
1341 */
1342static void scavengeReference(Object **obj)
1343{
1344    ClassObject *clazz;
1345    Object *fromObj, *toObj;
1346
1347    assert(obj);
1348
1349    if (*obj == NULL) return;
1350
1351    assert(dvmIsValidObject(*obj));
1352
1353    /* The entire block is black. */
1354    if (toSpaceContains(*obj)) {
1355        LOG_SCAV("scavengeReference skipping pinned object @ %p", *obj);
1356        return;
1357    }
1358    LOG_SCAV("scavengeReference(*obj=%p)", *obj);
1359
1360    assert(fromSpaceContains(*obj));
1361
1362    clazz = (*obj)->clazz;
1363
1364    if (isForward(clazz)) {
1365        // LOG_SCAV("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1));
1366        *obj = (Object *)getForward(clazz);
1367        return;
1368    }
1369    fromObj = *obj;
1370    if (clazz == NULL) {
1371        // LOG_SCAV("scavangeReference %p has a NULL class object", fromObj);
1372        assert(!"implemented");
1373        toObj = NULL;
1374    } else {
1375        toObj = transportObject(fromObj);
1376    }
1377    setForward(toObj, fromObj);
1378    *obj = (Object *)toObj;
1379}
1380
1381/*
1382 * Generic object scavenging.
1383 */
1384static void scavengeObject(Object *obj)
1385{
1386    ClassObject *clazz;
1387
1388    assert(obj != NULL);
1389    assert(obj->clazz != NULL);
1390    assert(!((uintptr_t)obj->clazz & 0x1));
1391    clazz = obj->clazz;
1392    if (dvmIsTheClassClass(clazz)) {
1393        scavengeClassObject((ClassObject *)obj);
1394    } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
1395        scavengeArrayObject((ArrayObject *)obj);
1396    } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
1397        scavengeReferenceObject(obj);
1398    } else {
1399        scavengeDataObject(obj);
1400    }
1401}
1402
1403/*
1404 * External root scavenging routines.
1405 */
1406
1407static void pinHashTableEntries(HashTable *table)
1408{
1409    LOG_PIN(">>> pinHashTableEntries(table=%p)", table);
1410    if (table == NULL) {
1411        return;
1412    }
1413    dvmHashTableLock(table);
1414    for (int i = 0; i < table->tableSize; ++i) {
1415        HashEntry *entry = &table->pEntries[i];
1416        void *obj = entry->data;
1417        if (obj == NULL || obj == HASH_TOMBSTONE) {
1418            continue;
1419        }
1420        pinObject(entry->data);
1421    }
1422    dvmHashTableUnlock(table);
1423    LOG_PIN("<<< pinHashTableEntries(table=%p)", table);
1424}
1425
1426static void pinPrimitiveClasses()
1427{
1428    size_t length = ARRAYSIZE(gDvm.primitiveClass);
1429    for (size_t i = 0; i < length; i++) {
1430        if (gDvm.primitiveClass[i] != NULL) {
1431            pinObject((Object *)gDvm.primitiveClass[i]);
1432        }
1433    }
1434}
1435
1436/*
1437 * Scavenge interned strings.  Permanent interned strings will have
1438 * been pinned and are therefore ignored.  Non-permanent strings that
1439 * have been forwarded are snapped.  All other entries are removed.
1440 */
1441static void scavengeInternedStrings()
1442{
1443    HashTable *table = gDvm.internedStrings;
1444    if (table == NULL) {
1445        return;
1446    }
1447    dvmHashTableLock(table);
1448    for (int i = 0; i < table->tableSize; ++i) {
1449        HashEntry *entry = &table->pEntries[i];
1450        Object *obj = (Object *)entry->data;
1451        if (obj == NULL || obj == HASH_TOMBSTONE) {
1452            continue;
1453        } else if (!isPermanentString((StringObject *)obj)) {
1454            // LOG_SCAV("entry->data=%p", entry->data);
1455            LOG_SCAV(">>> string obj=%p", entry->data);
1456            /* TODO(cshapiro): detach white string objects */
1457            scavengeReference((Object **)(void *)&entry->data);
1458            LOG_SCAV("<<< string obj=%p", entry->data);
1459        }
1460    }
1461    dvmHashTableUnlock(table);
1462}
1463
1464static void pinInternedStrings()
1465{
1466    HashTable *table = gDvm.internedStrings;
1467    if (table == NULL) {
1468        return;
1469    }
1470    dvmHashTableLock(table);
1471    for (int i = 0; i < table->tableSize; ++i) {
1472        HashEntry *entry = &table->pEntries[i];
1473        Object *obj = (Object *)entry->data;
1474        if (obj == NULL || obj == HASH_TOMBSTONE) {
1475            continue;
1476        } else if (isPermanentString((StringObject *)obj)) {
1477            obj = (Object *)getPermanentString((StringObject*)obj);
1478            LOG_PROM(">>> pin string obj=%p", obj);
1479            pinObject(obj);
1480            LOG_PROM("<<< pin string obj=%p", obj);
1481        }
1482     }
1483    dvmHashTableUnlock(table);
1484}
1485
1486/*
1487 * At present, reference tables contain references that must not be
1488 * moved by the collector.  Instead of scavenging each reference in
1489 * the table we pin each referenced object.
1490 */
1491static void pinReferenceTable(const ReferenceTable *table)
1492{
1493    assert(table != NULL);
1494    assert(table->table != NULL);
1495    assert(table->nextEntry != NULL);
1496    for (Object **entry = table->table; entry < table->nextEntry; ++entry) {
1497        assert(entry != NULL);
1498        assert(!isForward(*entry));
1499        pinObject(*entry);
1500    }
1501}
1502
1503static void scavengeLargeHeapRefTable(LargeHeapRefTable *table)
1504{
1505    for (; table != NULL; table = table->next) {
1506        Object **ref = table->refs.table;
1507        for (; ref < table->refs.nextEntry; ++ref) {
1508            scavengeReference(ref);
1509        }
1510    }
1511}
1512
1513/* This code was copied from Thread.c */
1514static void scavengeThreadStack(Thread *thread)
1515{
1516    const u4 *framePtr;
1517#if WITH_EXTRA_GC_CHECKS > 1
1518    bool first = true;
1519#endif
1520
1521    framePtr = (const u4 *)thread->interpSave.curFrame;
1522    while (framePtr != NULL) {
1523        const StackSaveArea *saveArea;
1524        const Method *method;
1525
1526        saveArea = SAVEAREA_FROM_FP(framePtr);
1527        method = saveArea->method;
1528        if (method != NULL && !dvmIsNativeMethod(method)) {
1529#ifdef COUNT_PRECISE_METHODS
1530            /* the GC is running, so no lock required */
1531            if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
1532                LOG_SCAV("PGC: added %s.%s %p",
1533                             method->clazz->descriptor, method->name, method);
1534#endif
1535#if WITH_EXTRA_GC_CHECKS > 1
1536            /*
1537             * May also want to enable the memset() in the "invokeMethod"
1538             * goto target in the portable interpreter.  That sets the stack
1539             * to a pattern that makes referring to uninitialized data
1540             * very obvious.
1541             */
1542
1543            if (first) {
1544                /*
1545                 * First frame, isn't native, check the "alternate" saved PC
1546                 * as a sanity check.
1547                 *
1548                 * It seems like we could check the second frame if the first
1549                 * is native, since the PCs should be the same.  It turns out
1550                 * this doesn't always work.  The problem is that we could
1551                 * have calls in the sequence:
1552                 *   interp method #2
1553                 *   native method
1554                 *   interp method #1
1555                 *
1556                 * and then GC while in the native method after returning
1557                 * from interp method #2.  The currentPc on the stack is
1558                 * for interp method #1, but thread->currentPc2 is still
1559                 * set for the last thing interp method #2 did.
1560                 *
1561                 * This can also happen in normal execution:
1562                 * - sget-object on not-yet-loaded class
1563                 * - class init updates currentPc2
1564                 * - static field init is handled by parsing annotations;
1565                 *   static String init requires creation of a String object,
1566                 *   which can cause a GC
1567                 *
1568                 * Essentially, any pattern that involves executing
1569                 * interpreted code and then causes an allocation without
1570                 * executing instructions in the original method will hit
1571                 * this.  These are rare enough that the test still has
1572                 * some value.
1573                 */
1574                if (saveArea->xtra.currentPc != thread->currentPc2) {
1575                    ALOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p",
1576                        saveArea->xtra.currentPc, thread->currentPc2,
1577                        method->clazz->descriptor, method->name, method->insns);
1578                    if (saveArea->xtra.currentPc != NULL)
1579                        ALOGE("  pc inst = 0x%04x", *saveArea->xtra.currentPc);
1580                    if (thread->currentPc2 != NULL)
1581                        ALOGE("  pc2 inst = 0x%04x", *thread->currentPc2);
1582                    dvmDumpThread(thread, false);
1583                }
1584            } else {
1585                /*
1586                 * It's unusual, but not impossible, for a non-first frame
1587                 * to be at something other than a method invocation.  For
1588                 * example, if we do a new-instance on a nonexistent class,
1589                 * we'll have a lot of class loader activity on the stack
1590                 * above the frame with the "new" operation.  Could also
1591                 * happen while we initialize a Throwable when an instruction
1592                 * fails.
1593                 *
1594                 * So there's not much we can do here to verify the PC,
1595                 * except to verify that it's a GC point.
1596                 */
1597            }
1598            assert(saveArea->xtra.currentPc != NULL);
1599#endif
1600
1601            const RegisterMap* pMap;
1602            const u1* regVector;
1603
1604            Method* nonConstMethod = (Method*) method;  // quiet gcc
1605            pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1606
1607            //LOG_SCAV("PGC: %s.%s", method->clazz->descriptor, method->name);
1608
1609            if (pMap != NULL) {
1610                /* found map, get registers for this address */
1611                int addr = saveArea->xtra.currentPc - method->insns;
1612                regVector = dvmRegisterMapGetLine(pMap, addr);
1613                /*
1614                if (regVector == NULL) {
1615                    LOG_SCAV("PGC: map but no entry for %s.%s addr=0x%04x",
1616                                 method->clazz->descriptor, method->name, addr);
1617                } else {
1618                    LOG_SCAV("PGC: found map for %s.%s 0x%04x (t=%d)",
1619                                 method->clazz->descriptor, method->name, addr,
1620                                 thread->threadId);
1621                }
1622                */
1623            } else {
1624                /*
1625                 * No map found.  If precise GC is disabled this is
1626                 * expected -- we don't create pointers to the map data even
1627                 * if it's present -- but if it's enabled it means we're
1628                 * unexpectedly falling back on a conservative scan, so it's
1629                 * worth yelling a little.
1630                 */
1631                if (gDvm.preciseGc) {
1632                    LOG_SCAV("PGC: no map for %s.%s", method->clazz->descriptor, method->name);
1633                }
1634                regVector = NULL;
1635            }
1636            if (regVector == NULL) {
1637                /*
1638                 * There are no roots to scavenge.  Skip over the entire frame.
1639                 */
1640                framePtr += method->registersSize;
1641            } else {
1642                /*
1643                 * Precise scan.  v0 is at the lowest address on the
1644                 * interpreted stack, and is the first bit in the register
1645                 * vector, so we can walk through the register map and
1646                 * memory in the same direction.
1647                 *
1648                 * A '1' bit indicates a live reference.
1649                 */
1650                u2 bits = 1 << 1;
1651                for (int i = method->registersSize - 1; i >= 0; i--) {
1652                    u4 rval = *framePtr;
1653
1654                    bits >>= 1;
1655                    if (bits == 1) {
1656                        /* set bit 9 so we can tell when we're empty */
1657                        bits = *regVector++ | 0x0100;
1658                    }
1659
1660                    if (rval != 0 && (bits & 0x01) != 0) {
1661                        /*
1662                         * Non-null, register marked as live reference.  This
1663                         * should always be a valid object.
1664                         */
1665#if WITH_EXTRA_GC_CHECKS > 0
1666                        if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) {
1667                            /* this is very bad */
1668                            ALOGE("PGC: invalid ref in reg %d: 0x%08x",
1669                                method->registersSize-1 - i, rval);
1670                        } else
1671#endif
1672                        {
1673
1674                            // LOG_SCAV("stack reference %u@%p", *framePtr, framePtr);
1675                            /* dvmMarkObjectNonNull((Object *)rval); */
1676                            scavengeReference((Object **) framePtr);
1677                        }
1678                    } else {
1679                        /*
1680                         * Null or non-reference, do nothing at all.
1681                         */
1682#if WITH_EXTRA_GC_CHECKS > 1
1683                        if (dvmIsValidObject((Object*) rval)) {
1684                            /* this is normal, but we feel chatty */
1685                            ALOGD("PGC: ignoring valid ref in reg %d: 0x%08x",
1686                                 method->registersSize-1 - i, rval);
1687                        }
1688#endif
1689                    }
1690                    ++framePtr;
1691                }
1692                dvmReleaseRegisterMapLine(pMap, regVector);
1693            }
1694        }
1695        /* else this is a break frame and there is nothing to gray, or
1696         * this is a native method and the registers are just the "ins",
1697         * copied from various registers in the caller's set.
1698         */
1699
1700#if WITH_EXTRA_GC_CHECKS > 1
1701        first = false;
1702#endif
1703
1704        /* Don't fall into an infinite loop if things get corrupted.
1705         */
1706        assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1707               saveArea->prevFrame == NULL);
1708        framePtr = saveArea->prevFrame;
1709    }
1710}
1711
1712static void scavengeThread(Thread *thread)
1713{
1714    // LOG_SCAV("scavengeThread(thread=%p)", thread);
1715
1716    // LOG_SCAV("Scavenging threadObj=%p", thread->threadObj);
1717    scavengeReference(&thread->threadObj);
1718
1719    // LOG_SCAV("Scavenging exception=%p", thread->exception);
1720    scavengeReference(&thread->exception);
1721
1722    scavengeThreadStack(thread);
1723}
1724
1725static void scavengeThreadList()
1726{
1727    Thread *thread;
1728
1729    dvmLockThreadList(dvmThreadSelf());
1730    thread = gDvm.threadList;
1731    while (thread) {
1732        scavengeThread(thread);
1733        thread = thread->next;
1734    }
1735    dvmUnlockThreadList();
1736}
1737
1738static void pinThreadStack(const Thread *thread)
1739{
1740    const u4 *framePtr;
1741    const StackSaveArea *saveArea;
1742    Method *method;
1743    const char *shorty;
1744    Object *obj;
1745
1746    saveArea = NULL;
1747    framePtr = (const u4 *)thread->interpSave.curFrame;
1748    for (; framePtr != NULL; framePtr = saveArea->prevFrame) {
1749        saveArea = SAVEAREA_FROM_FP(framePtr);
1750        method = (Method *)saveArea->method;
1751        if (method != NULL && dvmIsNativeMethod(method)) {
1752            /*
1753             * This is native method, pin its arguments.
1754             *
1755             * For purposes of graying references, we don't need to do
1756             * anything here, because all of the native "ins" were copied
1757             * from registers in the caller's stack frame and won't be
1758             * changed (an interpreted method can freely use registers
1759             * with parameters like any other register, but natives don't
1760             * work that way).
1761             *
1762             * However, we need to ensure that references visible to
1763             * native methods don't move around.  We can do a precise scan
1764             * of the arguments by examining the method signature.
1765             */
1766            LOG_PIN("+++ native scan %s.%s",
1767                    method->clazz->descriptor, method->name);
1768            assert(method->registersSize == method->insSize);
1769            if (!dvmIsStaticMethod(method)) {
1770                /* grab the "this" pointer */
1771                obj = (Object *)*framePtr++;
1772                if (obj == NULL) {
1773                    /*
1774                     * This can happen for the "fake" entry frame inserted
1775                     * for threads created outside the VM.  There's no actual
1776                     * call so there's no object.  If we changed the fake
1777                     * entry method to be declared "static" then this
1778                     * situation should never occur.
1779                     */
1780                } else {
1781                    assert(dvmIsValidObject(obj));
1782                    pinObject(obj);
1783                }
1784            }
1785            shorty = method->shorty+1;      // skip return value
1786            for (int i = method->registersSize - 1; i >= 0; i--, framePtr++) {
1787                switch (*shorty++) {
1788                case 'L':
1789                    obj = (Object *)*framePtr;
1790                    if (obj != NULL) {
1791                        assert(dvmIsValidObject(obj));
1792                        pinObject(obj);
1793                    }
1794                    break;
1795                case 'D':
1796                case 'J':
1797                    framePtr++;
1798                    break;
1799                default:
1800                    /* 32-bit non-reference value */
1801                    obj = (Object *)*framePtr;          // debug, remove
1802                    if (dvmIsValidObject(obj)) {        // debug, remove
1803                        /* if we see a lot of these, our scan might be off */
1804                        LOG_PIN("+++ did NOT pin obj %p", obj);
1805                    }
1806                    break;
1807                }
1808            }
1809        } else if (method != NULL && !dvmIsNativeMethod(method)) {
1810            const RegisterMap* pMap = dvmGetExpandedRegisterMap(method);
1811            const u1* regVector = NULL;
1812
1813            ALOGI("conservative : %s.%s", method->clazz->descriptor, method->name);
1814
1815            if (pMap != NULL) {
1816                int addr = saveArea->xtra.currentPc - method->insns;
1817                regVector = dvmRegisterMapGetLine(pMap, addr);
1818            }
1819            if (regVector == NULL) {
1820                /*
1821                 * No register info for this frame, conservatively pin.
1822                 */
1823                for (int i = 0; i < method->registersSize; ++i) {
1824                    u4 regValue = framePtr[i];
1825                    if (regValue != 0 && (regValue & 0x3) == 0 && dvmIsValidObject((Object *)regValue)) {
1826                        pinObject((Object *)regValue);
1827                    }
1828                }
1829            }
1830        }
1831        /*
1832         * Don't fall into an infinite loop if things get corrupted.
1833         */
1834        assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1835               saveArea->prevFrame == NULL);
1836    }
1837}
1838
1839static void pinThread(const Thread *thread)
1840{
1841    assert(thread != NULL);
1842    LOG_PIN("pinThread(thread=%p)", thread);
1843
1844    LOG_PIN("Pin native method arguments");
1845    pinThreadStack(thread);
1846
1847    LOG_PIN("Pin internalLocalRefTable");
1848    pinReferenceTable(&thread->internalLocalRefTable);
1849
1850    LOG_PIN("Pin jniLocalRefTable");
1851    pinReferenceTable(&thread->jniLocalRefTable);
1852
1853    /* Can the check be pushed into the promote routine? */
1854    if (thread->jniMonitorRefTable.table) {
1855        LOG_PIN("Pin jniMonitorRefTable");
1856        pinReferenceTable(&thread->jniMonitorRefTable);
1857    }
1858}
1859
1860static void pinThreadList()
1861{
1862    Thread *thread;
1863
1864    dvmLockThreadList(dvmThreadSelf());
1865    thread = gDvm.threadList;
1866    while (thread) {
1867        pinThread(thread);
1868        thread = thread->next;
1869    }
1870    dvmUnlockThreadList();
1871}
1872
1873/*
1874 * Heap block scavenging.
1875 */
1876
1877/*
1878 * Scavenge objects in the current block.  Scavenging terminates when
1879 * the pointer reaches the highest address in the block or when a run
1880 * of zero words that continues to the highest address is reached.
1881 */
1882static void scavengeBlock(HeapSource *heapSource, size_t block)
1883{
1884    u1 *cursor;
1885    u1 *end;
1886    size_t size;
1887
1888    LOG_SCAV("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
1889
1890    assert(heapSource != NULL);
1891    assert(block < heapSource->totalBlocks);
1892    assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
1893
1894    cursor = blockToAddress(heapSource, block);
1895    end = cursor + BLOCK_SIZE;
1896    LOG_SCAV("scavengeBlock start=%p, end=%p", cursor, end);
1897
1898    /* Parse and scavenge the current block. */
1899    size = 0;
1900    while (cursor < end) {
1901        u4 word = *(u4 *)cursor;
1902        if (word != 0) {
1903            scavengeObject((Object *)cursor);
1904            size = objectSize((Object *)cursor);
1905            size = alignUp(size, ALLOC_ALIGNMENT);
1906            cursor += size;
1907        } else {
1908            /* Check for padding. */
1909            while (*(u4 *)cursor == 0) {
1910                cursor += 4;
1911                if (cursor == end) break;
1912            }
1913            /* Punt if something went wrong. */
1914            assert(cursor == end);
1915        }
1916    }
1917}
1918
1919static size_t objectSize(const Object *obj)
1920{
1921    size_t size;
1922
1923    assert(obj != NULL);
1924    assert(obj->clazz != NULL);
1925    if (obj->clazz == gDvm.classJavaLangClass) {
1926        size = dvmClassObjectSize((ClassObject *)obj);
1927    } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
1928        size = dvmArrayObjectSize((ArrayObject *)obj);
1929    } else {
1930        assert(obj->clazz->objectSize != 0);
1931        size = obj->clazz->objectSize;
1932    }
1933    if (LW_HASH_STATE(obj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1934        size += sizeof(u4);
1935    }
1936    return size;
1937}
1938
1939static void verifyBlock(HeapSource *heapSource, size_t block)
1940{
1941    u1 *cursor;
1942    u1 *end;
1943    size_t size;
1944
1945    // LOG_VER("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
1946
1947    assert(heapSource != NULL);
1948    assert(block < heapSource->totalBlocks);
1949    assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
1950
1951    cursor = blockToAddress(heapSource, block);
1952    end = cursor + BLOCK_SIZE;
1953    // LOG_VER("verifyBlock start=%p, end=%p", cursor, end);
1954
1955    /* Parse and scavenge the current block. */
1956    size = 0;
1957    while (cursor < end) {
1958        u4 word = *(u4 *)cursor;
1959        if (word != 0) {
1960            dvmVerifyObject((Object *)cursor);
1961            size = objectSize((Object *)cursor);
1962            size = alignUp(size, ALLOC_ALIGNMENT);
1963            cursor += size;
1964        } else {
1965            /* Check for padding. */
1966            while (*(unsigned long *)cursor == 0) {
1967                cursor += 4;
1968                if (cursor == end) break;
1969            }
1970            /* Punt if something went wrong. */
1971            assert(cursor == end);
1972        }
1973    }
1974}
1975
1976static void describeBlockQueue(const HeapSource *heapSource)
1977{
1978    size_t block, count;
1979    char space;
1980
1981    block = heapSource->queueHead;
1982    count = 0;
1983    LOG_SCAV(">>> describeBlockQueue(heapSource=%p)", heapSource);
1984    /* Count the number of blocks enqueued. */
1985    while (block != QUEUE_TAIL) {
1986        block = heapSource->blockQueue[block];
1987        ++count;
1988    }
1989    LOG_SCAV("blockQueue %zu elements, enqueued %zu",
1990                 count, heapSource->queueSize);
1991    block = heapSource->queueHead;
1992    while (block != QUEUE_TAIL) {
1993        space = heapSource->blockSpace[block];
1994        LOG_SCAV("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
1995        block = heapSource->blockQueue[block];
1996    }
1997
1998    LOG_SCAV("<<< describeBlockQueue(heapSource=%p)", heapSource);
1999}
2000
2001/*
2002 * Blackens promoted objects.
2003 */
2004static void scavengeBlockQueue()
2005{
2006    HeapSource *heapSource;
2007    size_t block;
2008
2009    LOG_SCAV(">>> scavengeBlockQueue()");
2010    heapSource = gDvm.gcHeap->heapSource;
2011    describeBlockQueue(heapSource);
2012    while (heapSource->queueHead != QUEUE_TAIL) {
2013        block = heapSource->queueHead;
2014        LOG_SCAV("Dequeueing block %zu", block);
2015        scavengeBlock(heapSource, block);
2016        heapSource->queueHead = heapSource->blockQueue[block];
2017        LOG_SCAV("New queue head is %zu", heapSource->queueHead);
2018    }
2019    LOG_SCAV("<<< scavengeBlockQueue()");
2020}
2021
2022/*
2023 * Scan the block list and verify all blocks that are marked as being
2024 * in new space.  This should be parametrized so we can invoke this
2025 * routine outside of the context of a collection.
2026 */
2027static void verifyNewSpace()
2028{
2029    HeapSource *heapSource = gDvm.gcHeap->heapSource;
2030    size_t c0 = 0, c1 = 0, c2 = 0, c7 = 0;
2031    for (size_t i = 0; i < heapSource->totalBlocks; ++i) {
2032        switch (heapSource->blockSpace[i]) {
2033        case BLOCK_FREE: ++c0; break;
2034        case BLOCK_TO_SPACE: ++c1; break;
2035        case BLOCK_FROM_SPACE: ++c2; break;
2036        case BLOCK_CONTINUED: ++c7; break;
2037        default: assert(!"reached");
2038        }
2039    }
2040    LOG_VER("Block Demographics: "
2041            "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu",
2042            c0, c1, c2, c7);
2043    for (size_t i = 0; i < heapSource->totalBlocks; ++i) {
2044        if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) {
2045            continue;
2046        }
2047        verifyBlock(heapSource, i);
2048    }
2049}
2050
2051void describeHeap()
2052{
2053    HeapSource *heapSource = gDvm.gcHeap->heapSource;
2054    describeBlocks(heapSource);
2055}
2056
2057/*
2058 * The collection interface.  Collection has a few distinct phases.
2059 * The first is flipping AKA condemning AKA whitening the heap.  The
2060 * second is to promote all objects which are pointed to by pinned or
2061 * ambiguous references.  The third phase is tracing from the stacks,
2062 * registers and various globals.  Lastly, a verification of the heap
2063 * is performed.  The last phase should be optional.
2064 */
2065void dvmScavengeRoots()  /* Needs a new name badly */
2066{
2067    GcHeap *gcHeap;
2068
2069    {
2070        size_t alloc, unused, total;
2071
2072        room(&alloc, &unused, &total);
2073        LOG_SCAV("BEFORE GC: %zu alloc, %zu free, %zu total.",
2074                     alloc, unused, total);
2075    }
2076
2077    gcHeap = gDvm.gcHeap;
2078    dvmHeapSourceFlip();
2079
2080    /*
2081     * Promote blocks with stationary objects.
2082     */
2083    pinThreadList();
2084    pinReferenceTable(&gDvm.jniGlobalRefTable);
2085    pinReferenceTable(&gDvm.jniPinRefTable);
2086    pinHashTableEntries(gDvm.loadedClasses);
2087    pinHashTableEntries(gDvm.dbgRegistry);
2088    pinPrimitiveClasses();
2089    pinInternedStrings();
2090
2091    // describeBlocks(gcHeap->heapSource);
2092
2093    /*
2094     * Create first, open new-space page right here.
2095     */
2096
2097    /* Reset allocation to an unallocated block. */
2098    gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1);
2099    gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE;
2100    /*
2101     * Hack: promote the empty block allocated above.  If the
2102     * promotions that occurred above did not actually gray any
2103     * objects, the block queue may be empty.  We must force a
2104     * promotion to be safe.
2105     */
2106    promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr);
2107
2108    /*
2109     * Scavenge blocks and relocate movable objects.
2110     */
2111
2112    LOG_SCAV("Scavenging gDvm.threadList");
2113    scavengeThreadList();
2114
2115    LOG_SCAV("Scavenging gDvm.gcHeap->referenceOperations");
2116    scavengeLargeHeapRefTable(gcHeap->referenceOperations);
2117
2118    LOG_SCAV("Scavenging gDvm.gcHeap->pendingFinalizationRefs");
2119    scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs);
2120
2121    LOG_SCAV("Scavenging random global stuff");
2122    scavengeReference(&gDvm.outOfMemoryObj);
2123    scavengeReference(&gDvm.internalErrorObj);
2124    scavengeReference(&gDvm.noClassDefFoundErrorObj);
2125
2126    // LOG_SCAV("Scavenging gDvm.internedString");
2127    scavengeInternedStrings();
2128
2129    LOG_SCAV("Root scavenge has completed.");
2130
2131    scavengeBlockQueue();
2132
2133    // LOG_SCAV("Re-snap global class pointers.");
2134    // scavengeGlobals();
2135
2136    LOG_SCAV("New space scavenge has completed.");
2137
2138    /*
2139     * Process reference objects in strength order.
2140     */
2141
2142    LOG_REF("Processing soft references...");
2143    preserveSoftReferences(&gDvm.gcHeap->softReferences);
2144    clearWhiteReferences(&gDvm.gcHeap->softReferences);
2145
2146    LOG_REF("Processing weak references...");
2147    clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2148
2149    LOG_REF("Finding finalizations...");
2150    processFinalizableReferences();
2151
2152    LOG_REF("Processing f-reachable soft references...");
2153    clearWhiteReferences(&gDvm.gcHeap->softReferences);
2154
2155    LOG_REF("Processing f-reachable weak references...");
2156    clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2157
2158    LOG_REF("Processing phantom references...");
2159    clearWhiteReferences(&gDvm.gcHeap->phantomReferences);
2160
2161    /*
2162     * Verify the stack and heap.
2163     */
2164    dvmVerifyRoots();
2165    verifyNewSpace();
2166
2167    //describeBlocks(gcHeap->heapSource);
2168
2169    clearFromSpace(gcHeap->heapSource);
2170
2171    {
2172        size_t alloc, rem, total;
2173
2174        room(&alloc, &rem, &total);
2175        LOG_SCAV("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total);
2176    }
2177}
2178
2179/*
2180 * Interface compatibility routines.
2181 */
2182
2183void dvmClearWhiteRefs(Object **list)
2184{
2185    /* do nothing */
2186    assert(*list == NULL);
2187}
2188
2189void dvmHandleSoftRefs(Object **list)
2190{
2191    /* do nothing */
2192    assert(*list == NULL);
2193}
2194
2195bool dvmHeapBeginMarkStep(GcMode mode)
2196{
2197    /* do nothing */
2198    return true;
2199}
2200
2201void dvmHeapFinishMarkStep()
2202{
2203    /* do nothing */
2204}
2205
2206void dvmHeapMarkRootSet()
2207{
2208    /* do nothing */
2209}
2210
2211void dvmHeapScanMarkedObjects()
2212{
2213    dvmScavengeRoots();
2214}
2215
2216void dvmHeapScheduleFinalizations()
2217{
2218    /* do nothing */
2219}
2220
2221void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
2222{
2223    *numFreed = 0;
2224    *sizeFreed = 0;
2225    /* do nothing */
2226}
2227
2228void dvmMarkDirtyObjects()
2229{
2230    assert(!"implemented");
2231}
2232
2233void dvmHeapSourceThreadShutdown()
2234{
2235    /* do nothing */
2236}
2237