Copying.cpp revision 60fc806b679a3655c228b4093058c59941a49cfe
1/* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <errno.h> 18#include <limits.h> 19#include <sys/mman.h> 20 21#include "Dalvik.h" 22#include "alloc/Heap.h" 23#include "alloc/HeapBitmap.h" 24#include "alloc/HeapInternal.h" 25#include "alloc/HeapSource.h" 26#include "alloc/Verify.h" 27 28/* 29 * A "mostly copying", generational, garbage collector. 30 * 31 * TODO: we allocate our own contiguous tract of page frames to back 32 * object allocations. To cooperate with other heaps active in the 33 * virtual machine we need to move the responsibility of allocating 34 * pages someplace outside of this code. 35 * 36 * The other major data structures that maintain the state of the heap 37 * are the block space table and the block queue. 38 * 39 * The block space table records the state of a block. We must track 40 * whether a block is: 41 * 42 * - Free or allocated in some space. 43 * 44 * - If the block holds part of a large object allocation, whether the 45 * block is the initial or a continued block of the allocation. 46 * 47 * - Whether the block is pinned, that is to say whether at least one 48 * object in the block must remain stationary. Only needed during a 49 * GC. 50 * 51 * - Which space the object belongs to. At present this means 52 * from-space or to-space. 53 * 54 * The block queue is used during garbage collection. Unlike Cheney's 55 * algorithm, from-space and to-space are not contiguous. Therefore, 56 * one cannot maintain the state of the copy with just two pointers. 57 * The block queue exists to thread lists of blocks from the various 58 * spaces together. 59 * 60 * Additionally, we record the free space frontier of the heap, as 61 * well as the address of the first object within a block, which is 62 * required to copy objects following a large object (not currently 63 * implemented). This is stored in the heap source structure. This 64 * should be moved elsewhere to support in-line allocations from Java 65 * threads. 66 * 67 * Allocation requests are satisfied by reserving storage from one or 68 * more contiguous blocks. Objects that are small enough to fit 69 * inside a block are packed together within a block. Objects that 70 * are larger than a block are allocated from contiguous sequences of 71 * blocks. When half the available blocks are filled, a garbage 72 * collection occurs. We "flip" spaces (exchange from- and to-space), 73 * copy live objects into to space, and perform pointer adjustment. 74 * 75 * Copying is made more complicated by the requirement that some 76 * objects must not be moved. This property is known as "pinning". 77 * These objects must be dealt with specially. We use Bartlett's 78 * scheme; blocks containing such objects are grayed (promoted) at the 79 * start of a garbage collection. By virtue of this trick, tracing 80 * from the roots proceeds as usual but all objects on those pages are 81 * considered promoted and therefore not moved. 82 * 83 * TODO: there is sufficient information within the garbage collector 84 * to implement Attardi's scheme for evacuating unpinned objects from 85 * a page that is otherwise pinned. This would eliminate false 86 * retention caused by the large pinning granularity. 87 * 88 * We need a scheme for medium and large objects. Ignore that for 89 * now, we can return to this later. 90 * 91 * Eventually we need to worry about promoting objects out of the 92 * copy-collected heap (tenuring) into a less volatile space. Copying 93 * may not always be the best policy for such spaces. We should 94 * consider a variant of mark, sweep, compact. 95 * 96 * The block scheme allows us to use VM page faults to maintain a 97 * write barrier. Consider having a special leaf state for a page. 98 * 99 * Bibliography: 100 * 101 * C. J. Cheney. 1970. A non-recursive list compacting 102 * algorithm. CACM. 13-11 pp677--678. 103 * 104 * Joel F. Bartlett. 1988. Compacting Garbage Collection with 105 * Ambiguous Roots. Digital Equipment Corporation. 106 * 107 * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up 108 * Generations and C++. Digital Equipment Corporation. 109 * 110 * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage 111 * Collection in Uncooperative Environments. Digital Equipment 112 * Corporation. 113 * 114 * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory 115 * Management Framework. TR-94-010 116 * 117 * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable 118 * memory management framework for C++. Software -- Practice and 119 * Experience. 28(11), 1143-1183. 120 * 121 */ 122 123#define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0])) 124 125#if 0 126#define LOG_ALLOC LOGI 127#define LOG_PIN LOGI 128#define LOG_PROM LOGI 129#define LOG_REF LOGI 130#define LOG_SCAV LOGI 131#define LOG_TRAN LOGI 132#define LOG_VER LOGI 133#else 134#define LOG_ALLOC(...) ((void)0) 135#define LOG_PIN(...) ((void)0) 136#define LOG_PROM(...) ((void)0) 137#define LOG_REF(...) ((void)0) 138#define LOG_SCAV(...) ((void)0) 139#define LOG_TRAN(...) ((void)0) 140#define LOG_VER(...) ((void)0) 141#endif 142 143static void enqueueBlock(HeapSource *heapSource, size_t block); 144static void scavengeReference(Object **obj); 145static bool toSpaceContains(const void *addr); 146static bool fromSpaceContains(const void *addr); 147static size_t sumHeapBitmap(const HeapBitmap *bitmap); 148static size_t objectSize(const Object *obj); 149static void scavengeDataObject(Object *obj); 150static void scavengeBlockQueue(); 151 152/* 153 * We use 512-byte blocks. 154 */ 155enum { BLOCK_SHIFT = 9 }; 156enum { BLOCK_SIZE = 1 << BLOCK_SHIFT }; 157 158/* 159 * Space identifiers, stored into the blockSpace array. 160 */ 161enum { 162 BLOCK_FREE = 0, 163 BLOCK_FROM_SPACE = 1, 164 BLOCK_TO_SPACE = 2, 165 BLOCK_CONTINUED = 7 166}; 167 168/* 169 * Alignment for all allocations, in bytes. 170 */ 171enum { ALLOC_ALIGNMENT = 8 }; 172 173/* 174 * Sentinel value for the queue end. 175 */ 176#define QUEUE_TAIL (~(size_t)0) 177 178struct HeapSource { 179 180 /* The base address of backing store. */ 181 u1 *blockBase; 182 183 /* Total number of blocks available for allocation. */ 184 size_t totalBlocks; 185 size_t allocBlocks; 186 187 /* 188 * The scavenger work queue. Implemented as an array of index 189 * values into the queue. 190 */ 191 size_t *blockQueue; 192 193 /* 194 * Base and limit blocks. Basically the shifted start address of 195 * the block. We convert blocks to a relative number when 196 * indexing in the block queue. TODO: make the block queue base 197 * relative rather than the index into the block queue. 198 */ 199 size_t baseBlock, limitBlock; 200 201 size_t queueHead; 202 size_t queueTail; 203 size_t queueSize; 204 205 /* The space of the current block 0 (free), 1 or 2. */ 206 char *blockSpace; 207 208 /* Start of free space in the current block. */ 209 u1 *allocPtr; 210 /* Exclusive limit of free space in the current block. */ 211 u1 *allocLimit; 212 213 HeapBitmap allocBits; 214 215 /* 216 * The starting size of the heap. This value is the same as the 217 * value provided to the -Xms flag. 218 */ 219 size_t minimumSize; 220 221 /* 222 * The maximum size of the heap. This value is the same as the 223 * -Xmx flag. 224 */ 225 size_t maximumSize; 226 227 /* 228 * The current, committed size of the heap. At present, this is 229 * equivalent to the maximumSize. 230 */ 231 size_t currentSize; 232 233 size_t bytesAllocated; 234}; 235 236static unsigned long alignDown(unsigned long x, unsigned long n) 237{ 238 return x & -n; 239} 240 241static unsigned long alignUp(unsigned long x, unsigned long n) 242{ 243 return alignDown(x + (n - 1), n); 244} 245 246static void describeBlocks(const HeapSource *heapSource) 247{ 248 for (size_t i = 0; i < heapSource->totalBlocks; ++i) { 249 if ((i % 32) == 0) putchar('\n'); 250 printf("%d ", heapSource->blockSpace[i]); 251 } 252 putchar('\n'); 253} 254 255/* 256 * Virtual memory interface. 257 */ 258 259static void *virtualAlloc(size_t length) 260{ 261 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 262 int prot = PROT_READ | PROT_WRITE; 263 void *addr = mmap(NULL, length, prot, flags, -1, 0); 264 if (addr == MAP_FAILED) { 265 LOGE_HEAP("mmap: %s", strerror(errno)); 266 addr = NULL; 267 } 268 return addr; 269} 270 271static void virtualFree(void *addr, size_t length) 272{ 273 assert(addr != NULL); 274 assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0); 275 int res = munmap(addr, length); 276 if (res == -1) { 277 LOGE_HEAP("munmap: %s", strerror(errno)); 278 } 279} 280 281#ifndef NDEBUG 282static int isValidAddress(const HeapSource *heapSource, const u1 *addr) 283{ 284 size_t block; 285 286 block = (uintptr_t)addr >> BLOCK_SHIFT; 287 return heapSource->baseBlock <= block && 288 heapSource->limitBlock > block; 289} 290#endif 291 292/* 293 * Iterate over the block map looking for a contiguous run of free 294 * blocks. 295 */ 296static void *allocateBlocks(HeapSource *heapSource, size_t blocks) 297{ 298 size_t allocBlocks = heapSource->allocBlocks; 299 size_t totalBlocks = heapSource->totalBlocks; 300 /* Check underflow. */ 301 assert(blocks != 0); 302 /* Check overflow. */ 303 if (allocBlocks + blocks > totalBlocks / 2) { 304 return NULL; 305 } 306 /* Scan block map. */ 307 for (size_t i = 0; i < totalBlocks; ++i) { 308 /* Check fit. */ 309 for (size_t j = 0; j < blocks; ++j) { /* runs over totalBlocks */ 310 if (heapSource->blockSpace[i+j] != BLOCK_FREE) { 311 break; 312 } 313 } 314 /* No fit? */ 315 if (j != blocks) { 316 i += j; 317 continue; 318 } 319 /* Fit, allocate. */ 320 heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */ 321 for (size_t j = 1; j < blocks; ++j) { 322 heapSource->blockSpace[i+j] = BLOCK_CONTINUED; 323 } 324 heapSource->allocBlocks += blocks; 325 void *addr = &heapSource->blockBase[i*BLOCK_SIZE]; 326 memset(addr, 0, blocks*BLOCK_SIZE); 327 /* Collecting? */ 328 if (heapSource->queueHead != QUEUE_TAIL) { 329 LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i); 330 /* 331 * This allocated was on behalf of the transporter when it 332 * shaded a white object gray. We enqueue the block so 333 * the scavenger can further shade the gray objects black. 334 */ 335 enqueueBlock(heapSource, i); 336 } 337 338 return addr; 339 } 340 /* Insufficient space, fail. */ 341 LOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated", 342 heapSource->totalBlocks, 343 heapSource->allocBlocks, 344 heapSource->bytesAllocated); 345 return NULL; 346} 347 348/* Converts an absolute address to a relative block number. */ 349static size_t addressToBlock(const HeapSource *heapSource, const void *addr) 350{ 351 assert(heapSource != NULL); 352 assert(isValidAddress(heapSource, addr)); 353 return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock; 354} 355 356/* Converts a relative block number to an absolute address. */ 357static u1 *blockToAddress(const HeapSource *heapSource, size_t block) 358{ 359 u1 *addr; 360 361 addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE); 362 assert(isValidAddress(heapSource, addr)); 363 return addr; 364} 365 366static void clearBlock(HeapSource *heapSource, size_t block) 367{ 368 assert(heapSource != NULL); 369 assert(block < heapSource->totalBlocks); 370 u1 *addr = heapSource->blockBase + block*BLOCK_SIZE; 371 memset(addr, 0xCC, BLOCK_SIZE); 372 for (size_t i = 0; i < BLOCK_SIZE; i += 8) { 373 dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i); 374 } 375} 376 377static void clearFromSpace(HeapSource *heapSource) 378{ 379 assert(heapSource != NULL); 380 size_t i = 0; 381 size_t count = 0; 382 while (i < heapSource->totalBlocks) { 383 if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) { 384 ++i; 385 continue; 386 } 387 heapSource->blockSpace[i] = BLOCK_FREE; 388 clearBlock(heapSource, i); 389 ++i; 390 ++count; 391 while (i < heapSource->totalBlocks && 392 heapSource->blockSpace[i] == BLOCK_CONTINUED) { 393 heapSource->blockSpace[i] = BLOCK_FREE; 394 clearBlock(heapSource, i); 395 ++i; 396 ++count; 397 } 398 } 399 LOG_SCAV("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE); 400} 401 402/* 403 * Appends the given block to the block queue. The block queue is 404 * processed in-order by the scavenger. 405 */ 406static void enqueueBlock(HeapSource *heapSource, size_t block) 407{ 408 assert(heapSource != NULL); 409 assert(block < heapSource->totalBlocks); 410 if (heapSource->queueHead != QUEUE_TAIL) { 411 heapSource->blockQueue[heapSource->queueTail] = block; 412 } else { 413 heapSource->queueHead = block; 414 } 415 heapSource->blockQueue[block] = QUEUE_TAIL; 416 heapSource->queueTail = block; 417 ++heapSource->queueSize; 418} 419 420/* 421 * Grays all objects within the block corresponding to the given 422 * address. 423 */ 424static void promoteBlockByAddr(HeapSource *heapSource, const void *addr) 425{ 426 size_t block; 427 428 block = addressToBlock(heapSource, (const u1 *)addr); 429 if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) { 430 // LOG_PROM("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj); 431 heapSource->blockSpace[block] = BLOCK_TO_SPACE; 432 enqueueBlock(heapSource, block); 433 /* TODO(cshapiro): count continued blocks?*/ 434 heapSource->allocBlocks += 1; 435 } else { 436 // LOG_PROM("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj); 437 } 438} 439 440GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize) 441{ 442 GcHeap* gcHeap; 443 HeapSource *heapSource; 444 445 assert(startSize <= absoluteMaxSize); 446 447 heapSource = malloc(sizeof(*heapSource)); 448 assert(heapSource != NULL); 449 memset(heapSource, 0, sizeof(*heapSource)); 450 451 heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE); 452 heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE); 453 454 heapSource->currentSize = heapSource->maximumSize; 455 456 /* Allocate underlying storage for blocks. */ 457 heapSource->blockBase = virtualAlloc(heapSource->maximumSize); 458 assert(heapSource->blockBase != NULL); 459 heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT; 460 heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT; 461 462 heapSource->allocBlocks = 0; 463 heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock); 464 465 assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE); 466 467 { 468 size_t size = sizeof(heapSource->blockQueue[0]); 469 heapSource->blockQueue = malloc(heapSource->totalBlocks*size); 470 assert(heapSource->blockQueue != NULL); 471 memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size); 472 heapSource->queueHead = QUEUE_TAIL; 473 } 474 475 /* Byte indicating space residence or free status of block. */ 476 { 477 size_t size = sizeof(heapSource->blockSpace[0]); 478 heapSource->blockSpace = malloc(heapSource->totalBlocks*size); 479 assert(heapSource->blockSpace != NULL); 480 memset(heapSource->blockSpace, 0, heapSource->totalBlocks*size); 481 } 482 483 dvmHeapBitmapInit(&heapSource->allocBits, 484 heapSource->blockBase, 485 heapSource->maximumSize, 486 "blockBase"); 487 488 /* Initialize allocation pointers. */ 489 heapSource->allocPtr = allocateBlocks(heapSource, 1); 490 heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE; 491 492 gcHeap = malloc(sizeof(*gcHeap)); 493 assert(gcHeap != NULL); 494 memset(gcHeap, 0, sizeof(*gcHeap)); 495 gcHeap->heapSource = heapSource; 496 497 return gcHeap; 498} 499 500/* 501 * Perform any required heap initializations after forking from the 502 * zygote process. This is a no-op for the time being. Eventually 503 * this will demarcate the shared region of the heap. 504 */ 505bool dvmHeapSourceStartupAfterZygote() 506{ 507 return true; 508} 509 510bool dvmHeapSourceStartupBeforeFork() 511{ 512 assert(!"implemented"); 513 return false; 514} 515 516void dvmHeapSourceShutdown(GcHeap **gcHeap) 517{ 518 if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL) 519 return; 520 free((*gcHeap)->heapSource->blockQueue); 521 free((*gcHeap)->heapSource->blockSpace); 522 virtualFree((*gcHeap)->heapSource->blockBase, 523 (*gcHeap)->heapSource->maximumSize); 524 free((*gcHeap)->heapSource); 525 (*gcHeap)->heapSource = NULL; 526 free(*gcHeap); 527 *gcHeap = NULL; 528} 529 530size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, 531 size_t perHeapStats[], 532 size_t arrayLen) 533{ 534 HeapSource *heapSource; 535 size_t value; 536 537 heapSource = gDvm.gcHeap->heapSource; 538 switch (spec) { 539 case HS_FOOTPRINT: 540 value = heapSource->maximumSize; 541 break; 542 case HS_ALLOWED_FOOTPRINT: 543 value = heapSource->maximumSize; 544 break; 545 case HS_BYTES_ALLOCATED: 546 value = heapSource->bytesAllocated; 547 break; 548 case HS_OBJECTS_ALLOCATED: 549 value = sumHeapBitmap(&heapSource->allocBits); 550 break; 551 default: 552 assert(!"implemented"); 553 value = 0; 554 } 555 if (perHeapStats) { 556 *perHeapStats = value; 557 } 558 return value; 559} 560 561/* 562 * Performs a shallow copy of the allocation bitmap into the given 563 * vector of heap bitmaps. 564 */ 565void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[], 566 size_t numHeaps) 567{ 568 assert(!"implemented"); 569} 570 571HeapBitmap *dvmHeapSourceGetLiveBits() 572{ 573 return &gDvm.gcHeap->heapSource->allocBits; 574} 575 576/* 577 * Allocate the specified number of bytes from the heap. The 578 * allocation cursor points into a block of free storage. If the 579 * given allocation fits in the remaining space of the block, we 580 * advance the cursor and return a pointer to the free storage. If 581 * the allocation cannot fit in the current block but is smaller than 582 * a block we request a new block and allocate from it instead. If 583 * the allocation is larger than a block we must allocate from a span 584 * of contiguous blocks. 585 */ 586void *dvmHeapSourceAlloc(size_t length) 587{ 588 HeapSource *heapSource; 589 unsigned char *addr; 590 size_t aligned, available, blocks; 591 592 heapSource = gDvm.gcHeap->heapSource; 593 assert(heapSource != NULL); 594 assert(heapSource->allocPtr != NULL); 595 assert(heapSource->allocLimit != NULL); 596 597 aligned = alignUp(length, ALLOC_ALIGNMENT); 598 available = heapSource->allocLimit - heapSource->allocPtr; 599 600 /* Try allocating inside the current block. */ 601 if (aligned <= available) { 602 addr = heapSource->allocPtr; 603 heapSource->allocPtr += aligned; 604 heapSource->bytesAllocated += aligned; 605 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr); 606 return addr; 607 } 608 609 /* Try allocating in a new block. */ 610 if (aligned <= BLOCK_SIZE) { 611 addr = allocateBlocks(heapSource, 1); 612 if (addr != NULL) { 613 heapSource->allocLimit = addr + BLOCK_SIZE; 614 heapSource->allocPtr = addr + aligned; 615 heapSource->bytesAllocated += aligned; 616 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr); 617 /* TODO(cshapiro): pad out the current block. */ 618 } 619 return addr; 620 } 621 622 /* Try allocating in a span of blocks. */ 623 blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE; 624 625 addr = allocateBlocks(heapSource, blocks); 626 /* Propagate failure upward. */ 627 if (addr != NULL) { 628 heapSource->bytesAllocated += aligned; 629 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr); 630 /* TODO(cshapiro): pad out free space in the last block. */ 631 } 632 return addr; 633} 634 635void *dvmHeapSourceAllocAndGrow(size_t size) 636{ 637 return dvmHeapSourceAlloc(size); 638} 639 640/* TODO: refactor along with dvmHeapSourceAlloc */ 641void *allocateGray(size_t size) 642{ 643 HeapSource *heapSource; 644 void *addr; 645 size_t block; 646 647 /* TODO: add a check that we are in a GC. */ 648 heapSource = gDvm.gcHeap->heapSource; 649 addr = dvmHeapSourceAlloc(size); 650 assert(addr != NULL); 651 block = addressToBlock(heapSource, (const u1 *)addr); 652 if (heapSource->queueHead == QUEUE_TAIL) { 653 /* 654 * Forcibly append the underlying block to the queue. This 655 * condition occurs when referents are transported following 656 * the initial trace. 657 */ 658 enqueueBlock(heapSource, block); 659 LOG_PROM("forced promoting block %zu %d @ %p", block, heapSource->blockSpace[block], addr); 660 } 661 return addr; 662} 663 664bool dvmHeapSourceContainsAddress(const void *ptr) 665{ 666 HeapSource *heapSource = gDvm.gcHeap->heapSource; 667 return dvmHeapBitmapCoversAddress(&heapSource->allocBits, ptr); 668} 669 670/* 671 * Returns true if the given address is within the heap and points to 672 * the header of a live object. 673 */ 674bool dvmHeapSourceContains(const void *addr) 675{ 676 HeapSource *heapSource; 677 HeapBitmap *bitmap; 678 679 heapSource = gDvm.gcHeap->heapSource; 680 bitmap = &heapSource->allocBits; 681 if (!dvmHeapBitmapCoversAddress(bitmap, addr)) { 682 return false; 683 } else { 684 return dvmHeapBitmapIsObjectBitSet(bitmap, addr); 685 } 686} 687 688bool dvmHeapSourceGetPtrFlag(const void *ptr, HeapSourcePtrFlag flag) 689{ 690 assert(!"implemented"); 691 return false; 692} 693 694size_t dvmHeapSourceChunkSize(const void *ptr) 695{ 696 assert(!"implemented"); 697 return 0; 698} 699 700size_t dvmHeapSourceFootprint() 701{ 702 assert(!"implemented"); 703 return 0; 704} 705 706/* 707 * Returns the "ideal footprint" which appears to be the number of 708 * bytes currently committed to the heap. This starts out at the 709 * start size of the heap and grows toward the maximum size. 710 */ 711size_t dvmHeapSourceGetIdealFootprint() 712{ 713 return gDvm.gcHeap->heapSource->currentSize; 714} 715 716float dvmGetTargetHeapUtilization() 717{ 718 return 0.5f; 719} 720 721void dvmSetTargetHeapUtilization(float newTarget) 722{ 723 assert(newTarget > 0.0f && newTarget < 1.0f); 724} 725 726/* 727 * Expands the size of the heap after a collection. At present we 728 * commit the pages for maximum size of the heap so this routine is 729 * just a no-op. Eventually, we will either allocate or commit pages 730 * on an as-need basis. 731 */ 732void dvmHeapSourceGrowForUtilization() 733{ 734 /* do nothing */ 735} 736 737void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen) 738{ 739 /* do nothing */ 740} 741 742void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen, 743 const void *userptr, size_t userlen, 744 void *arg), 745 void *arg) 746{ 747 assert(!"implemented"); 748} 749 750size_t dvmHeapSourceGetNumHeaps() 751{ 752 return 1; 753} 754 755bool dvmTrackExternalAllocation(size_t n) 756{ 757 /* do nothing */ 758 return true; 759} 760 761void dvmTrackExternalFree(size_t n) 762{ 763 /* do nothing */ 764} 765 766size_t dvmGetExternalBytesAllocated() 767{ 768 assert(!"implemented"); 769 return 0; 770} 771 772void dvmHeapSourceFlip() 773{ 774 HeapSource *heapSource = gDvm.gcHeap->heapSource; 775 776 /* Reset the block queue. */ 777 heapSource->allocBlocks = 0; 778 heapSource->queueSize = 0; 779 heapSource->queueHead = QUEUE_TAIL; 780 781 /* TODO(cshapiro): pad the current (prev) block. */ 782 783 heapSource->allocPtr = NULL; 784 heapSource->allocLimit = NULL; 785 786 /* Whiten all allocated blocks. */ 787 for (size_t i = 0; i < heapSource->totalBlocks; ++i) { 788 if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) { 789 heapSource->blockSpace[i] = BLOCK_FROM_SPACE; 790 } 791 } 792} 793 794static void room(size_t *alloc, size_t *avail, size_t *total) 795{ 796 HeapSource *heapSource = gDvm.gcHeap->heapSource; 797 *total = heapSource->totalBlocks*BLOCK_SIZE; 798 *alloc = heapSource->allocBlocks*BLOCK_SIZE; 799 *avail = *total - *alloc; 800} 801 802static bool isSpaceInternal(u1 *addr, int space) 803{ 804 HeapSource *heapSource; 805 u1 *base, *limit; 806 size_t offset; 807 char space2; 808 809 heapSource = gDvm.gcHeap->heapSource; 810 base = heapSource->blockBase; 811 assert(addr >= base); 812 limit = heapSource->blockBase + heapSource->maximumSize; 813 assert(addr < limit); 814 offset = addr - base; 815 space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT]; 816 return space == space2; 817} 818 819static bool fromSpaceContains(const void *addr) 820{ 821 return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE); 822} 823 824static bool toSpaceContains(const void *addr) 825{ 826 return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE); 827} 828 829/* 830 * Notifies the collector that the object at the given address must 831 * remain stationary during the current collection. 832 */ 833static void pinObject(const Object *obj) 834{ 835 promoteBlockByAddr(gDvm.gcHeap->heapSource, obj); 836} 837 838static size_t sumHeapBitmap(const HeapBitmap *bitmap) 839{ 840 size_t sum = 0; 841 for (size_t i = 0; i < bitmap->bitsLen >> 2; ++i) { 842 sum += CLZ(bitmap->bits[i]); 843 } 844 return sum; 845} 846 847/* 848 * Miscellaneous functionality. 849 */ 850 851static int isForward(const void *addr) 852{ 853 return (uintptr_t)addr & 0x1; 854} 855 856static void setForward(const void *toObj, void *fromObj) 857{ 858 *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1; 859} 860 861static void* getForward(const void *fromObj) 862{ 863 return (void *)((uintptr_t)fromObj & ~0x1); 864} 865 866/* Beware, uses the same encoding as a forwarding pointers! */ 867static int isPermanentString(const StringObject *obj) { 868 return (uintptr_t)obj & 0x1; 869} 870 871static void* getPermanentString(const StringObject *obj) 872{ 873 return (void *)((uintptr_t)obj & ~0x1); 874} 875 876 877/* 878 * Scavenging and transporting routines follow. A transporter grays 879 * an object. A scavenger blackens an object. We define these 880 * routines for each fundamental object type. Dispatch is performed 881 * in scavengeObject. 882 */ 883 884/* 885 * Class object scavenging. 886 */ 887static void scavengeClassObject(ClassObject *obj) 888{ 889 LOG_SCAV("scavengeClassObject(obj=%p)", obj); 890 assert(obj != NULL); 891 assert(obj->obj.clazz != NULL); 892 assert(obj->obj.clazz->descriptor != NULL); 893 assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;")); 894 assert(obj->descriptor != NULL); 895 LOG_SCAV("scavengeClassObject: descriptor='%s',vtableCount=%zu", 896 obj->descriptor, obj->vtableCount); 897 /* Delegate class object and instance field scavenging. */ 898 scavengeDataObject((Object *)obj); 899 /* Scavenge the array element class object. */ 900 if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) { 901 scavengeReference((Object **)(void *)&obj->elementClass); 902 } 903 /* Scavenge the superclass. */ 904 scavengeReference((Object **)(void *)&obj->super); 905 /* Scavenge the class loader. */ 906 scavengeReference(&obj->classLoader); 907 /* Scavenge static fields. */ 908 for (int i = 0; i < obj->sfieldCount; ++i) { 909 char ch = obj->sfields[i].field.signature[0]; 910 if (ch == '[' || ch == 'L') { 911 scavengeReference((Object **)(void *)&obj->sfields[i].value.l); 912 } 913 } 914 /* Scavenge interface class objects. */ 915 for (int i = 0; i < obj->interfaceCount; ++i) { 916 scavengeReference((Object **) &obj->interfaces[i]); 917 } 918} 919 920/* 921 * Array object scavenging. 922 */ 923static size_t scavengeArrayObject(ArrayObject *array) 924{ 925 LOG_SCAV("scavengeArrayObject(array=%p)", array); 926 /* Scavenge the class object. */ 927 assert(toSpaceContains(array)); 928 assert(array != NULL); 929 assert(array->obj.clazz != NULL); 930 scavengeReference((Object **) array); 931 size_t length = dvmArrayObjectSize(array); 932 /* Scavenge the array contents. */ 933 if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) { 934 Object **contents = (Object **)array->contents; 935 for (size_t i = 0; i < array->length; ++i) { 936 scavengeReference(&contents[i]); 937 } 938 } 939 return length; 940} 941 942/* 943 * Reference object scavenging. 944 */ 945 946static int getReferenceFlags(const Object *obj) 947{ 948 int flags; 949 950 flags = CLASS_ISREFERENCE | 951 CLASS_ISWEAKREFERENCE | 952 CLASS_ISPHANTOMREFERENCE; 953 return GET_CLASS_FLAG_GROUP(obj->clazz, flags); 954} 955 956static int isSoftReference(const Object *obj) 957{ 958 return getReferenceFlags(obj) == CLASS_ISREFERENCE; 959} 960 961static int isWeakReference(const Object *obj) 962{ 963 return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE; 964} 965 966#ifndef NDEBUG 967static bool isPhantomReference(const Object *obj) 968{ 969 return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE; 970} 971#endif 972 973/* 974 * Returns true if the reference was registered with a reference queue 975 * but has not yet been appended to it. 976 */ 977static bool isReferenceEnqueuable(const Object *ref) 978{ 979 Object *queue, *queueNext; 980 981 queue = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue); 982 queueNext = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext); 983 if (queue == NULL || queueNext != NULL) { 984 /* 985 * There is no queue, or the reference has already 986 * been enqueued. The Reference.enqueue() method 987 * will do nothing even if we call it. 988 */ 989 return false; 990 } 991 992 /* 993 * We need to call enqueue(), but if we called it from 994 * here we'd probably deadlock. Schedule a call. 995 */ 996 return true; 997} 998 999/* 1000 * Schedules a reference to be appended to its reference queue. 1001 */ 1002static void enqueueReference(Object *ref) 1003{ 1004 assert(ref != NULL); 1005 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue) != NULL); 1006 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext) == NULL); 1007 if (!dvmHeapAddRefToLargeTable(&gDvm.gcHeap->referenceOperations, ref)) { 1008 LOGE("no room for any more reference operations"); 1009 dvmAbort(); 1010 } 1011} 1012 1013/* 1014 * Sets the referent field of a reference object to NULL. 1015 */ 1016static void clearReference(Object *obj) 1017{ 1018 dvmSetFieldObject(obj, gDvm.offJavaLangRefReference_referent, NULL); 1019} 1020 1021/* 1022 * Clears reference objects with white referents. 1023 */ 1024void clearWhiteReferences(Object **list) 1025{ 1026 size_t referentOffset, queueNextOffset; 1027 bool doSignal; 1028 1029 queueNextOffset = gDvm.offJavaLangRefReference_queueNext; 1030 referentOffset = gDvm.offJavaLangRefReference_referent; 1031 doSignal = false; 1032 while (*list != NULL) { 1033 Object *ref = *list; 1034 JValue *field = dvmFieldPtr(ref, referentOffset); 1035 Object *referent = field->l; 1036 *list = dvmGetFieldObject(ref, queueNextOffset); 1037 dvmSetFieldObject(ref, queueNextOffset, NULL); 1038 assert(referent != NULL); 1039 if (isForward(referent->clazz)) { 1040 field->l = referent = getForward(referent->clazz); 1041 continue; 1042 } 1043 if (fromSpaceContains(referent)) { 1044 /* Referent is white, clear it. */ 1045 clearReference(ref); 1046 if (isReferenceEnqueuable(ref)) { 1047 enqueueReference(ref); 1048 doSignal = true; 1049 } 1050 } 1051 } 1052 /* 1053 * If we cleared a reference with a reference queue we must notify 1054 * the heap worker to append the reference. 1055 */ 1056 if (doSignal) { 1057 dvmSignalHeapWorker(false); 1058 } 1059 assert(*list == NULL); 1060} 1061 1062/* 1063 * Blackens referents subject to the soft reference preservation 1064 * policy. 1065 */ 1066void preserveSoftReferences(Object **list) 1067{ 1068 Object *ref; 1069 Object *prev, *next; 1070 size_t referentOffset, queueNextOffset; 1071 unsigned counter; 1072 bool white; 1073 1074 queueNextOffset = gDvm.offJavaLangRefReference_queueNext; 1075 referentOffset = gDvm.offJavaLangRefReference_referent; 1076 counter = 0; 1077 prev = next = NULL; 1078 ref = *list; 1079 while (ref != NULL) { 1080 JValue *field = dvmFieldPtr(ref, referentOffset); 1081 Object *referent = field->l; 1082 next = dvmGetFieldObject(ref, queueNextOffset); 1083 assert(referent != NULL); 1084 if (isForward(referent->clazz)) { 1085 /* Referent is black. */ 1086 field->l = referent = getForward(referent->clazz); 1087 white = false; 1088 } else { 1089 white = fromSpaceContains(referent); 1090 } 1091 if (!white && ((++counter) & 1)) { 1092 /* Referent is white and biased toward saving, gray it. */ 1093 scavengeReference((Object **)(void *)&field->l); 1094 white = true; 1095 } 1096 if (white) { 1097 /* Referent is black, unlink it. */ 1098 if (prev != NULL) { 1099 dvmSetFieldObject(ref, queueNextOffset, NULL); 1100 dvmSetFieldObject(prev, queueNextOffset, next); 1101 } 1102 } else { 1103 /* Referent is white, skip over it. */ 1104 prev = ref; 1105 } 1106 ref = next; 1107 } 1108 /* 1109 * Restart the trace with the newly gray references added to the 1110 * root set. 1111 */ 1112 scavengeBlockQueue(); 1113} 1114 1115void processFinalizableReferences() 1116{ 1117 HeapRefTable newPendingRefs; 1118 LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs; 1119 Object **ref; 1120 Object **lastRef; 1121 size_t totalPendCount; 1122 1123 /* 1124 * All strongly, reachable objects are black. 1125 * Any white finalizable objects need to be finalized. 1126 */ 1127 1128 /* Create a table that the new pending refs will 1129 * be added to. 1130 */ 1131 if (!dvmHeapInitHeapRefTable(&newPendingRefs)) { 1132 //TODO: mark all finalizable refs and hope that 1133 // we can schedule them next time. Watch out, 1134 // because we may be expecting to free up space 1135 // by calling finalizers. 1136 LOG_REF("no room for pending finalizations"); 1137 dvmAbort(); 1138 } 1139 1140 /* 1141 * Walk through finalizableRefs and move any white references to 1142 * the list of new pending refs. 1143 */ 1144 totalPendCount = 0; 1145 while (finRefs != NULL) { 1146 Object **gapRef; 1147 size_t newPendCount = 0; 1148 1149 gapRef = ref = finRefs->refs.table; 1150 lastRef = finRefs->refs.nextEntry; 1151 while (ref < lastRef) { 1152 if (fromSpaceContains(*ref)) { 1153 if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) { 1154 //TODO: add the current table and allocate 1155 // a new, smaller one. 1156 LOG_REF("no room for any more pending finalizations: %zd", 1157 dvmHeapNumHeapRefTableEntries(&newPendingRefs)); 1158 dvmAbort(); 1159 } 1160 newPendCount++; 1161 } else { 1162 /* This ref is black, so will remain on finalizableRefs. 1163 */ 1164 if (newPendCount > 0) { 1165 /* Copy it up to fill the holes. 1166 */ 1167 *gapRef++ = *ref; 1168 } else { 1169 /* No holes yet; don't bother copying. 1170 */ 1171 gapRef++; 1172 } 1173 } 1174 ref++; 1175 } 1176 finRefs->refs.nextEntry = gapRef; 1177 //TODO: if the table is empty when we're done, free it. 1178 totalPendCount += newPendCount; 1179 finRefs = finRefs->next; 1180 } 1181 LOG_REF("%zd finalizers triggered.", totalPendCount); 1182 if (totalPendCount == 0) { 1183 /* No objects required finalization. 1184 * Free the empty temporary table. 1185 */ 1186 dvmClearReferenceTable(&newPendingRefs); 1187 return; 1188 } 1189 1190 /* Add the new pending refs to the main list. 1191 */ 1192 if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs, 1193 &newPendingRefs)) 1194 { 1195 LOG_REF("can't insert new pending finalizations"); 1196 dvmAbort(); 1197 } 1198 1199 //TODO: try compacting the main list with a memcpy loop 1200 1201 /* Blacken the refs we just moved; we don't want them or their 1202 * children to get swept yet. 1203 */ 1204 ref = newPendingRefs.table; 1205 lastRef = newPendingRefs.nextEntry; 1206 assert(ref < lastRef); 1207 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0); 1208 while (ref < lastRef) { 1209 scavengeReference(ref); 1210 ref++; 1211 } 1212 HPROF_CLEAR_GC_SCAN_STATE(); 1213 scavengeBlockQueue(); 1214 dvmSignalHeapWorker(false); 1215} 1216 1217/* 1218 * If a reference points to from-space and has been forwarded, we snap 1219 * the pointer to its new to-space address. If the reference points 1220 * to an unforwarded from-space address we must enqueue the reference 1221 * for later processing. TODO: implement proper reference processing 1222 * and move the referent scavenging elsewhere. 1223 */ 1224static void scavengeReferenceObject(Object *obj) 1225{ 1226 Object *referent; 1227 Object **queue; 1228 size_t referentOffset, queueNextOffset; 1229 1230 assert(obj != NULL); 1231 LOG_SCAV("scavengeReferenceObject(obj=%p),'%s'", obj, obj->clazz->descriptor); 1232 scavengeDataObject(obj); 1233 referentOffset = gDvm.offJavaLangRefReference_referent; 1234 referent = dvmGetFieldObject(obj, referentOffset); 1235 if (referent == NULL || toSpaceContains(referent)) { 1236 return; 1237 } 1238 if (isSoftReference(obj)) { 1239 queue = &gDvm.gcHeap->softReferences; 1240 } else if (isWeakReference(obj)) { 1241 queue = &gDvm.gcHeap->weakReferences; 1242 } else { 1243 assert(isPhantomReference(obj)); 1244 queue = &gDvm.gcHeap->phantomReferences; 1245 } 1246 queueNextOffset = gDvm.offJavaLangRefReference_queueNext; 1247 dvmSetFieldObject(obj, queueNextOffset, *queue); 1248 *queue = obj; 1249 LOG_SCAV("scavengeReferenceObject: enqueueing %p", obj); 1250} 1251 1252/* 1253 * Data object scavenging. 1254 */ 1255static void scavengeDataObject(Object *obj) 1256{ 1257 // LOG_SCAV("scavengeDataObject(obj=%p)", obj); 1258 assert(obj != NULL); 1259 assert(obj->clazz != NULL); 1260 assert(obj->clazz->objectSize != 0); 1261 assert(toSpaceContains(obj)); 1262 /* Scavenge the class object. */ 1263 ClassObject *clazz = obj->clazz; 1264 scavengeReference((Object **) obj); 1265 /* Scavenge instance fields. */ 1266 if (clazz->refOffsets != CLASS_WALK_SUPER) { 1267 size_t refOffsets = clazz->refOffsets; 1268 while (refOffsets != 0) { 1269 size_t rshift = CLZ(refOffsets); 1270 size_t offset = CLASS_OFFSET_FROM_CLZ(rshift); 1271 Object **ref = (Object **)((u1 *)obj + offset); 1272 scavengeReference(ref); 1273 refOffsets &= ~(CLASS_HIGH_BIT >> rshift); 1274 } 1275 } else { 1276 for (; clazz != NULL; clazz = clazz->super) { 1277 InstField *field = clazz->ifields; 1278 for (int i = 0; i < clazz->ifieldRefCount; ++i, ++field) { 1279 size_t offset = field->byteOffset; 1280 Object **ref = (Object **)((u1 *)obj + offset); 1281 scavengeReference(ref); 1282 } 1283 } 1284 } 1285} 1286 1287static Object *transportObject(const Object *fromObj) 1288{ 1289 Object *toObj; 1290 size_t allocSize, copySize; 1291 1292 LOG_TRAN("transportObject(fromObj=%p) allocBlocks=%zu", 1293 fromObj, 1294 gDvm.gcHeap->heapSource->allocBlocks); 1295 assert(fromObj != NULL); 1296 assert(fromSpaceContains(fromObj)); 1297 allocSize = copySize = objectSize(fromObj); 1298 if (LW_HASH_STATE(fromObj->lock) != LW_HASH_STATE_UNHASHED) { 1299 /* 1300 * The object has been hashed or hashed and moved. We must 1301 * reserve an additional word for a hash code. 1302 */ 1303 allocSize += sizeof(u4); 1304 } 1305 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) { 1306 /* 1307 * The object has its hash code allocated. Ensure the hash 1308 * code is copied along with the instance data. 1309 */ 1310 copySize += sizeof(u4); 1311 } 1312 /* TODO(cshapiro): don't copy, re-map large data objects. */ 1313 assert(copySize <= allocSize); 1314 toObj = allocateGray(allocSize); 1315 assert(toObj != NULL); 1316 assert(toSpaceContains(toObj)); 1317 memcpy(toObj, fromObj, copySize); 1318 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED) { 1319 /* 1320 * The object has had its hash code exposed. Append it to the 1321 * instance and set a bit so we know to look for it there. 1322 */ 1323 *(u4 *)(((char *)toObj) + copySize) = (u4)fromObj >> 3; 1324 toObj->lock |= LW_HASH_STATE_HASHED_AND_MOVED << LW_HASH_STATE_SHIFT; 1325 } 1326 LOG_TRAN("transportObject: from %p/%zu to %p/%zu (%zu,%zu) %s", 1327 fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj), 1328 toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj), 1329 copySize, allocSize, copySize < allocSize ? "DIFFERENT" : ""); 1330 return toObj; 1331} 1332 1333/* 1334 * Generic reference scavenging. 1335 */ 1336 1337/* 1338 * Given a reference to an object, the scavenge routine will gray the 1339 * reference. Any objects pointed to by the scavenger object will be 1340 * transported to new space and a forwarding pointer will be installed 1341 * in the header of the object. 1342 */ 1343 1344/* 1345 * Blacken the given pointer. If the pointer is in from space, it is 1346 * transported to new space. If the object has a forwarding pointer 1347 * installed it has already been transported and the referent is 1348 * snapped to the new address. 1349 */ 1350static void scavengeReference(Object **obj) 1351{ 1352 ClassObject *clazz; 1353 Object *fromObj, *toObj; 1354 1355 assert(obj); 1356 1357 if (*obj == NULL) return; 1358 1359 assert(dvmIsValidObject(*obj)); 1360 1361 /* The entire block is black. */ 1362 if (toSpaceContains(*obj)) { 1363 LOG_SCAV("scavengeReference skipping pinned object @ %p", *obj); 1364 return; 1365 } 1366 LOG_SCAV("scavengeReference(*obj=%p)", *obj); 1367 1368 assert(fromSpaceContains(*obj)); 1369 1370 clazz = (*obj)->clazz; 1371 1372 if (isForward(clazz)) { 1373 // LOG_SCAV("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1)); 1374 *obj = (Object *)getForward(clazz); 1375 return; 1376 } 1377 fromObj = *obj; 1378 if (clazz == NULL) { 1379 // LOG_SCAV("scavangeReference %p has a NULL class object", fromObj); 1380 assert(!"implemented"); 1381 toObj = NULL; 1382 } else { 1383 toObj = transportObject(fromObj); 1384 } 1385 setForward(toObj, fromObj); 1386 *obj = (Object *)toObj; 1387} 1388 1389/* 1390 * Generic object scavenging. 1391 */ 1392static void scavengeObject(Object *obj) 1393{ 1394 ClassObject *clazz; 1395 1396 assert(obj != NULL); 1397 assert(obj->clazz != NULL); 1398 assert(!((uintptr_t)obj->clazz & 0x1)); 1399 clazz = obj->clazz; 1400 if (dvmIsTheClassClass(clazz)) { 1401 scavengeClassObject((ClassObject *)obj); 1402 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) { 1403 scavengeArrayObject((ArrayObject *)obj); 1404 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) { 1405 scavengeReferenceObject(obj); 1406 } else { 1407 scavengeDataObject(obj); 1408 } 1409} 1410 1411/* 1412 * External root scavenging routines. 1413 */ 1414 1415static void pinHashTableEntries(HashTable *table) 1416{ 1417 LOG_PIN(">>> pinHashTableEntries(table=%p)", table); 1418 if (table == NULL) { 1419 return; 1420 } 1421 dvmHashTableLock(table); 1422 for (int i = 0; i < table->tableSize; ++i) { 1423 HashEntry *entry = &table->pEntries[i]; 1424 void *obj = entry->data; 1425 if (obj == NULL || obj == HASH_TOMBSTONE) { 1426 continue; 1427 } 1428 pinObject(entry->data); 1429 } 1430 dvmHashTableUnlock(table); 1431 LOG_PIN("<<< pinHashTableEntries(table=%p)", table); 1432} 1433 1434static void pinPrimitiveClasses() 1435{ 1436 size_t length = ARRAYSIZE(gDvm.primitiveClass); 1437 for (size_t i = 0; i < length; i++) { 1438 if (gDvm.primitiveClass[i] != NULL) { 1439 pinObject((Object *)gDvm.primitiveClass[i]); 1440 } 1441 } 1442} 1443 1444/* 1445 * Scavenge interned strings. Permanent interned strings will have 1446 * been pinned and are therefore ignored. Non-permanent strings that 1447 * have been forwarded are snapped. All other entries are removed. 1448 */ 1449static void scavengeInternedStrings() 1450{ 1451 HashTable *table = gDvm.internedStrings; 1452 if (table == NULL) { 1453 return; 1454 } 1455 dvmHashTableLock(table); 1456 for (int i = 0; i < table->tableSize; ++i) { 1457 HashEntry *entry = &table->pEntries[i]; 1458 Object *obj = (Object *)entry->data; 1459 if (obj == NULL || obj == HASH_TOMBSTONE) { 1460 continue; 1461 } else if (!isPermanentString((StringObject *)obj)) { 1462 // LOG_SCAV("entry->data=%p", entry->data); 1463 LOG_SCAV(">>> string obj=%p", entry->data); 1464 /* TODO(cshapiro): detach white string objects */ 1465 scavengeReference((Object **)(void *)&entry->data); 1466 LOG_SCAV("<<< string obj=%p", entry->data); 1467 } 1468 } 1469 dvmHashTableUnlock(table); 1470} 1471 1472static void pinInternedStrings() 1473{ 1474 HashTable *table = gDvm.internedStrings; 1475 if (table == NULL) { 1476 return; 1477 } 1478 dvmHashTableLock(table); 1479 for (int i = 0; i < table->tableSize; ++i) { 1480 HashEntry *entry = &table->pEntries[i]; 1481 Object *obj = (Object *)entry->data; 1482 if (obj == NULL || obj == HASH_TOMBSTONE) { 1483 continue; 1484 } else if (isPermanentString((StringObject *)obj)) { 1485 obj = (Object *)getPermanentString((StringObject*)obj); 1486 LOG_PROM(">>> pin string obj=%p", obj); 1487 pinObject(obj); 1488 LOG_PROM("<<< pin string obj=%p", obj); 1489 } 1490 } 1491 dvmHashTableUnlock(table); 1492} 1493 1494/* 1495 * At present, reference tables contain references that must not be 1496 * moved by the collector. Instead of scavenging each reference in 1497 * the table we pin each referenced object. 1498 */ 1499static void pinReferenceTable(const ReferenceTable *table) 1500{ 1501 assert(table != NULL); 1502 assert(table->table != NULL); 1503 assert(table->nextEntry != NULL); 1504 for (Object **entry = table->table; entry < table->nextEntry; ++entry) { 1505 assert(entry != NULL); 1506 assert(!isForward(*entry)); 1507 pinObject(*entry); 1508 } 1509} 1510 1511static void scavengeLargeHeapRefTable(LargeHeapRefTable *table) 1512{ 1513 for (; table != NULL; table = table->next) { 1514 Object **ref = table->refs.table; 1515 for (; ref < table->refs.nextEntry; ++ref) { 1516 scavengeReference(ref); 1517 } 1518 } 1519} 1520 1521/* This code was copied from Thread.c */ 1522static void scavengeThreadStack(Thread *thread) 1523{ 1524 const u4 *framePtr; 1525#if WITH_EXTRA_GC_CHECKS > 1 1526 bool first = true; 1527#endif 1528 1529 framePtr = (const u4 *)thread->interpSave.curFrame; 1530 while (framePtr != NULL) { 1531 const StackSaveArea *saveArea; 1532 const Method *method; 1533 1534 saveArea = SAVEAREA_FROM_FP(framePtr); 1535 method = saveArea->method; 1536 if (method != NULL && !dvmIsNativeMethod(method)) { 1537#ifdef COUNT_PRECISE_METHODS 1538 /* the GC is running, so no lock required */ 1539 if (dvmPointerSetAddEntry(gDvm.preciseMethods, method)) 1540 LOG_SCAV("PGC: added %s.%s %p", 1541 method->clazz->descriptor, method->name, method); 1542#endif 1543#if WITH_EXTRA_GC_CHECKS > 1 1544 /* 1545 * May also want to enable the memset() in the "invokeMethod" 1546 * goto target in the portable interpreter. That sets the stack 1547 * to a pattern that makes referring to uninitialized data 1548 * very obvious. 1549 */ 1550 1551 if (first) { 1552 /* 1553 * First frame, isn't native, check the "alternate" saved PC 1554 * as a sanity check. 1555 * 1556 * It seems like we could check the second frame if the first 1557 * is native, since the PCs should be the same. It turns out 1558 * this doesn't always work. The problem is that we could 1559 * have calls in the sequence: 1560 * interp method #2 1561 * native method 1562 * interp method #1 1563 * 1564 * and then GC while in the native method after returning 1565 * from interp method #2. The currentPc on the stack is 1566 * for interp method #1, but thread->currentPc2 is still 1567 * set for the last thing interp method #2 did. 1568 * 1569 * This can also happen in normal execution: 1570 * - sget-object on not-yet-loaded class 1571 * - class init updates currentPc2 1572 * - static field init is handled by parsing annotations; 1573 * static String init requires creation of a String object, 1574 * which can cause a GC 1575 * 1576 * Essentially, any pattern that involves executing 1577 * interpreted code and then causes an allocation without 1578 * executing instructions in the original method will hit 1579 * this. These are rare enough that the test still has 1580 * some value. 1581 */ 1582 if (saveArea->xtra.currentPc != thread->currentPc2) { 1583 LOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p", 1584 saveArea->xtra.currentPc, thread->currentPc2, 1585 method->clazz->descriptor, method->name, method->insns); 1586 if (saveArea->xtra.currentPc != NULL) 1587 LOGE(" pc inst = 0x%04x", *saveArea->xtra.currentPc); 1588 if (thread->currentPc2 != NULL) 1589 LOGE(" pc2 inst = 0x%04x", *thread->currentPc2); 1590 dvmDumpThread(thread, false); 1591 } 1592 } else { 1593 /* 1594 * It's unusual, but not impossible, for a non-first frame 1595 * to be at something other than a method invocation. For 1596 * example, if we do a new-instance on a nonexistent class, 1597 * we'll have a lot of class loader activity on the stack 1598 * above the frame with the "new" operation. Could also 1599 * happen while we initialize a Throwable when an instruction 1600 * fails. 1601 * 1602 * So there's not much we can do here to verify the PC, 1603 * except to verify that it's a GC point. 1604 */ 1605 } 1606 assert(saveArea->xtra.currentPc != NULL); 1607#endif 1608 1609 const RegisterMap* pMap; 1610 const u1* regVector; 1611 1612 Method* nonConstMethod = (Method*) method; // quiet gcc 1613 pMap = dvmGetExpandedRegisterMap(nonConstMethod); 1614 1615 //LOG_SCAV("PGC: %s.%s", method->clazz->descriptor, method->name); 1616 1617 if (pMap != NULL) { 1618 /* found map, get registers for this address */ 1619 int addr = saveArea->xtra.currentPc - method->insns; 1620 regVector = dvmRegisterMapGetLine(pMap, addr); 1621 /* 1622 if (regVector == NULL) { 1623 LOG_SCAV("PGC: map but no entry for %s.%s addr=0x%04x", 1624 method->clazz->descriptor, method->name, addr); 1625 } else { 1626 LOG_SCAV("PGC: found map for %s.%s 0x%04x (t=%d)", 1627 method->clazz->descriptor, method->name, addr, 1628 thread->threadId); 1629 } 1630 */ 1631 } else { 1632 /* 1633 * No map found. If precise GC is disabled this is 1634 * expected -- we don't create pointers to the map data even 1635 * if it's present -- but if it's enabled it means we're 1636 * unexpectedly falling back on a conservative scan, so it's 1637 * worth yelling a little. 1638 */ 1639 if (gDvm.preciseGc) { 1640 LOG_SCAV("PGC: no map for %s.%s", method->clazz->descriptor, method->name); 1641 } 1642 regVector = NULL; 1643 } 1644 if (regVector == NULL) { 1645 /* 1646 * There are no roots to scavenge. Skip over the entire frame. 1647 */ 1648 framePtr += method->registersSize; 1649 } else { 1650 /* 1651 * Precise scan. v0 is at the lowest address on the 1652 * interpreted stack, and is the first bit in the register 1653 * vector, so we can walk through the register map and 1654 * memory in the same direction. 1655 * 1656 * A '1' bit indicates a live reference. 1657 */ 1658 u2 bits = 1 << 1; 1659 for (int i = method->registersSize - 1; i >= 0; i--) { 1660 u4 rval = *framePtr; 1661 1662 bits >>= 1; 1663 if (bits == 1) { 1664 /* set bit 9 so we can tell when we're empty */ 1665 bits = *regVector++ | 0x0100; 1666 } 1667 1668 if (rval != 0 && (bits & 0x01) != 0) { 1669 /* 1670 * Non-null, register marked as live reference. This 1671 * should always be a valid object. 1672 */ 1673#if WITH_EXTRA_GC_CHECKS > 0 1674 if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) { 1675 /* this is very bad */ 1676 LOGE("PGC: invalid ref in reg %d: 0x%08x", 1677 method->registersSize-1 - i, rval); 1678 } else 1679#endif 1680 { 1681 1682 // LOG_SCAV("stack reference %u@%p", *framePtr, framePtr); 1683 /* dvmMarkObjectNonNull((Object *)rval); */ 1684 scavengeReference((Object **) framePtr); 1685 } 1686 } else { 1687 /* 1688 * Null or non-reference, do nothing at all. 1689 */ 1690#if WITH_EXTRA_GC_CHECKS > 1 1691 if (dvmIsValidObject((Object*) rval)) { 1692 /* this is normal, but we feel chatty */ 1693 LOGD("PGC: ignoring valid ref in reg %d: 0x%08x", 1694 method->registersSize-1 - i, rval); 1695 } 1696#endif 1697 } 1698 ++framePtr; 1699 } 1700 dvmReleaseRegisterMapLine(pMap, regVector); 1701 } 1702 } 1703 /* else this is a break frame and there is nothing to gray, or 1704 * this is a native method and the registers are just the "ins", 1705 * copied from various registers in the caller's set. 1706 */ 1707 1708#if WITH_EXTRA_GC_CHECKS > 1 1709 first = false; 1710#endif 1711 1712 /* Don't fall into an infinite loop if things get corrupted. 1713 */ 1714 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr || 1715 saveArea->prevFrame == NULL); 1716 framePtr = saveArea->prevFrame; 1717 } 1718} 1719 1720static void scavengeThread(Thread *thread) 1721{ 1722 // LOG_SCAV("scavengeThread(thread=%p)", thread); 1723 1724 // LOG_SCAV("Scavenging threadObj=%p", thread->threadObj); 1725 scavengeReference(&thread->threadObj); 1726 1727 // LOG_SCAV("Scavenging exception=%p", thread->exception); 1728 scavengeReference(&thread->exception); 1729 1730 scavengeThreadStack(thread); 1731} 1732 1733static void scavengeThreadList() 1734{ 1735 Thread *thread; 1736 1737 dvmLockThreadList(dvmThreadSelf()); 1738 thread = gDvm.threadList; 1739 while (thread) { 1740 scavengeThread(thread); 1741 thread = thread->next; 1742 } 1743 dvmUnlockThreadList(); 1744} 1745 1746static void pinThreadStack(const Thread *thread) 1747{ 1748 const u4 *framePtr; 1749 const StackSaveArea *saveArea; 1750 Method *method; 1751 const char *shorty; 1752 Object *obj; 1753 1754 saveArea = NULL; 1755 framePtr = (const u4 *)thread->interpSave.curFrame; 1756 for (; framePtr != NULL; framePtr = saveArea->prevFrame) { 1757 saveArea = SAVEAREA_FROM_FP(framePtr); 1758 method = (Method *)saveArea->method; 1759 if (method != NULL && dvmIsNativeMethod(method)) { 1760 /* 1761 * This is native method, pin its arguments. 1762 * 1763 * For purposes of graying references, we don't need to do 1764 * anything here, because all of the native "ins" were copied 1765 * from registers in the caller's stack frame and won't be 1766 * changed (an interpreted method can freely use registers 1767 * with parameters like any other register, but natives don't 1768 * work that way). 1769 * 1770 * However, we need to ensure that references visible to 1771 * native methods don't move around. We can do a precise scan 1772 * of the arguments by examining the method signature. 1773 */ 1774 LOG_PIN("+++ native scan %s.%s", 1775 method->clazz->descriptor, method->name); 1776 assert(method->registersSize == method->insSize); 1777 if (!dvmIsStaticMethod(method)) { 1778 /* grab the "this" pointer */ 1779 obj = (Object *)*framePtr++; 1780 if (obj == NULL) { 1781 /* 1782 * This can happen for the "fake" entry frame inserted 1783 * for threads created outside the VM. There's no actual 1784 * call so there's no object. If we changed the fake 1785 * entry method to be declared "static" then this 1786 * situation should never occur. 1787 */ 1788 } else { 1789 assert(dvmIsValidObject(obj)); 1790 pinObject(obj); 1791 } 1792 } 1793 shorty = method->shorty+1; // skip return value 1794 for (int i = method->registersSize - 1; i >= 0; i--, framePtr++) { 1795 switch (*shorty++) { 1796 case 'L': 1797 obj = (Object *)*framePtr; 1798 if (obj != NULL) { 1799 assert(dvmIsValidObject(obj)); 1800 pinObject(obj); 1801 } 1802 break; 1803 case 'D': 1804 case 'J': 1805 framePtr++; 1806 break; 1807 default: 1808 /* 32-bit non-reference value */ 1809 obj = (Object *)*framePtr; // debug, remove 1810 if (dvmIsValidObject(obj)) { // debug, remove 1811 /* if we see a lot of these, our scan might be off */ 1812 LOG_PIN("+++ did NOT pin obj %p", obj); 1813 } 1814 break; 1815 } 1816 } 1817 } else if (method != NULL && !dvmIsNativeMethod(method)) { 1818 const RegisterMap* pMap = dvmGetExpandedRegisterMap(method); 1819 const u1* regVector = NULL; 1820 1821 LOGI("conservative : %s.%s", method->clazz->descriptor, method->name); 1822 1823 if (pMap != NULL) { 1824 int addr = saveArea->xtra.currentPc - method->insns; 1825 regVector = dvmRegisterMapGetLine(pMap, addr); 1826 } 1827 if (regVector == NULL) { 1828 /* 1829 * No register info for this frame, conservatively pin. 1830 */ 1831 for (int i = 0; i < method->registersSize; ++i) { 1832 u4 regValue = framePtr[i]; 1833 if (regValue != 0 && (regValue & 0x3) == 0 && dvmIsValidObject((Object *)regValue)) { 1834 pinObject((Object *)regValue); 1835 } 1836 } 1837 } 1838 } 1839 /* 1840 * Don't fall into an infinite loop if things get corrupted. 1841 */ 1842 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr || 1843 saveArea->prevFrame == NULL); 1844 } 1845} 1846 1847static void pinThread(const Thread *thread) 1848{ 1849 assert(thread != NULL); 1850 LOG_PIN("pinThread(thread=%p)", thread); 1851 1852 LOG_PIN("Pin native method arguments"); 1853 pinThreadStack(thread); 1854 1855 LOG_PIN("Pin internalLocalRefTable"); 1856 pinReferenceTable(&thread->internalLocalRefTable); 1857 1858 LOG_PIN("Pin jniLocalRefTable"); 1859 pinReferenceTable(&thread->jniLocalRefTable); 1860 1861 /* Can the check be pushed into the promote routine? */ 1862 if (thread->jniMonitorRefTable.table) { 1863 LOG_PIN("Pin jniMonitorRefTable"); 1864 pinReferenceTable(&thread->jniMonitorRefTable); 1865 } 1866} 1867 1868static void pinThreadList() 1869{ 1870 Thread *thread; 1871 1872 dvmLockThreadList(dvmThreadSelf()); 1873 thread = gDvm.threadList; 1874 while (thread) { 1875 pinThread(thread); 1876 thread = thread->next; 1877 } 1878 dvmUnlockThreadList(); 1879} 1880 1881/* 1882 * Heap block scavenging. 1883 */ 1884 1885/* 1886 * Scavenge objects in the current block. Scavenging terminates when 1887 * the pointer reaches the highest address in the block or when a run 1888 * of zero words that continues to the highest address is reached. 1889 */ 1890static void scavengeBlock(HeapSource *heapSource, size_t block) 1891{ 1892 u1 *cursor; 1893 u1 *end; 1894 size_t size; 1895 1896 LOG_SCAV("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block); 1897 1898 assert(heapSource != NULL); 1899 assert(block < heapSource->totalBlocks); 1900 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE); 1901 1902 cursor = blockToAddress(heapSource, block); 1903 end = cursor + BLOCK_SIZE; 1904 LOG_SCAV("scavengeBlock start=%p, end=%p", cursor, end); 1905 1906 /* Parse and scavenge the current block. */ 1907 size = 0; 1908 while (cursor < end) { 1909 u4 word = *(u4 *)cursor; 1910 if (word != 0) { 1911 scavengeObject((Object *)cursor); 1912 size = objectSize((Object *)cursor); 1913 size = alignUp(size, ALLOC_ALIGNMENT); 1914 cursor += size; 1915 } else { 1916 /* Check for padding. */ 1917 while (*(u4 *)cursor == 0) { 1918 cursor += 4; 1919 if (cursor == end) break; 1920 } 1921 /* Punt if something went wrong. */ 1922 assert(cursor == end); 1923 } 1924 } 1925} 1926 1927static size_t objectSize(const Object *obj) 1928{ 1929 size_t size; 1930 1931 assert(obj != NULL); 1932 assert(obj->clazz != NULL); 1933 if (obj->clazz == gDvm.classJavaLangClass) { 1934 size = dvmClassObjectSize((ClassObject *)obj); 1935 } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) { 1936 size = dvmArrayObjectSize((ArrayObject *)obj); 1937 } else { 1938 assert(obj->clazz->objectSize != 0); 1939 size = obj->clazz->objectSize; 1940 } 1941 if (LW_HASH_STATE(obj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) { 1942 size += sizeof(u4); 1943 } 1944 return size; 1945} 1946 1947static void verifyBlock(HeapSource *heapSource, size_t block) 1948{ 1949 u1 *cursor; 1950 u1 *end; 1951 size_t size; 1952 1953 // LOG_VER("verifyBlock(heapSource=%p,block=%zu)", heapSource, block); 1954 1955 assert(heapSource != NULL); 1956 assert(block < heapSource->totalBlocks); 1957 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE); 1958 1959 cursor = blockToAddress(heapSource, block); 1960 end = cursor + BLOCK_SIZE; 1961 // LOG_VER("verifyBlock start=%p, end=%p", cursor, end); 1962 1963 /* Parse and scavenge the current block. */ 1964 size = 0; 1965 while (cursor < end) { 1966 u4 word = *(u4 *)cursor; 1967 if (word != 0) { 1968 dvmVerifyObject((Object *)cursor); 1969 size = objectSize((Object *)cursor); 1970 size = alignUp(size, ALLOC_ALIGNMENT); 1971 cursor += size; 1972 } else { 1973 /* Check for padding. */ 1974 while (*(unsigned long *)cursor == 0) { 1975 cursor += 4; 1976 if (cursor == end) break; 1977 } 1978 /* Punt if something went wrong. */ 1979 assert(cursor == end); 1980 } 1981 } 1982} 1983 1984static void describeBlockQueue(const HeapSource *heapSource) 1985{ 1986 size_t block, count; 1987 char space; 1988 1989 block = heapSource->queueHead; 1990 count = 0; 1991 LOG_SCAV(">>> describeBlockQueue(heapSource=%p)", heapSource); 1992 /* Count the number of blocks enqueued. */ 1993 while (block != QUEUE_TAIL) { 1994 block = heapSource->blockQueue[block]; 1995 ++count; 1996 } 1997 LOG_SCAV("blockQueue %zu elements, enqueued %zu", 1998 count, heapSource->queueSize); 1999 block = heapSource->queueHead; 2000 while (block != QUEUE_TAIL) { 2001 space = heapSource->blockSpace[block]; 2002 LOG_SCAV("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space); 2003 block = heapSource->blockQueue[block]; 2004 } 2005 2006 LOG_SCAV("<<< describeBlockQueue(heapSource=%p)", heapSource); 2007} 2008 2009/* 2010 * Blackens promoted objects. 2011 */ 2012static void scavengeBlockQueue() 2013{ 2014 HeapSource *heapSource; 2015 size_t block; 2016 2017 LOG_SCAV(">>> scavengeBlockQueue()"); 2018 heapSource = gDvm.gcHeap->heapSource; 2019 describeBlockQueue(heapSource); 2020 while (heapSource->queueHead != QUEUE_TAIL) { 2021 block = heapSource->queueHead; 2022 LOG_SCAV("Dequeueing block %zu", block); 2023 scavengeBlock(heapSource, block); 2024 heapSource->queueHead = heapSource->blockQueue[block]; 2025 LOG_SCAV("New queue head is %zu", heapSource->queueHead); 2026 } 2027 LOG_SCAV("<<< scavengeBlockQueue()"); 2028} 2029 2030/* 2031 * Scan the block list and verify all blocks that are marked as being 2032 * in new space. This should be parametrized so we can invoke this 2033 * routine outside of the context of a collection. 2034 */ 2035static void verifyNewSpace() 2036{ 2037 HeapSource *heapSource = gDvm.gcHeap->heapSource; 2038 size_t c0 = 0, c1 = 0, c2 = 0, c7 = 0; 2039 for (size_t i = 0; i < heapSource->totalBlocks; ++i) { 2040 switch (heapSource->blockSpace[i]) { 2041 case BLOCK_FREE: ++c0; break; 2042 case BLOCK_TO_SPACE: ++c1; break; 2043 case BLOCK_FROM_SPACE: ++c2; break; 2044 case BLOCK_CONTINUED: ++c7; break; 2045 default: assert(!"reached"); 2046 } 2047 } 2048 LOG_VER("Block Demographics: " 2049 "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu", 2050 c0, c1, c2, c7); 2051 for (size_t i = 0; i < heapSource->totalBlocks; ++i) { 2052 if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) { 2053 continue; 2054 } 2055 verifyBlock(heapSource, i); 2056 } 2057} 2058 2059void describeHeap() 2060{ 2061 HeapSource *heapSource = gDvm.gcHeap->heapSource; 2062 describeBlocks(heapSource); 2063} 2064 2065/* 2066 * The collection interface. Collection has a few distinct phases. 2067 * The first is flipping AKA condemning AKA whitening the heap. The 2068 * second is to promote all objects which are pointed to by pinned or 2069 * ambiguous references. The third phase is tracing from the stacks, 2070 * registers and various globals. Lastly, a verification of the heap 2071 * is performed. The last phase should be optional. 2072 */ 2073void dvmScavengeRoots() /* Needs a new name badly */ 2074{ 2075 GcHeap *gcHeap; 2076 2077 { 2078 size_t alloc, unused, total; 2079 2080 room(&alloc, &unused, &total); 2081 LOG_SCAV("BEFORE GC: %zu alloc, %zu free, %zu total.", 2082 alloc, unused, total); 2083 } 2084 2085 gcHeap = gDvm.gcHeap; 2086 dvmHeapSourceFlip(); 2087 2088 /* 2089 * Promote blocks with stationary objects. 2090 */ 2091 pinThreadList(); 2092 pinReferenceTable(&gDvm.jniGlobalRefTable); 2093 pinReferenceTable(&gDvm.jniPinRefTable); 2094 pinHashTableEntries(gDvm.loadedClasses); 2095 pinHashTableEntries(gDvm.dbgRegistry); 2096 pinPrimitiveClasses(); 2097 pinInternedStrings(); 2098 2099 // describeBlocks(gcHeap->heapSource); 2100 2101 /* 2102 * Create first, open new-space page right here. 2103 */ 2104 2105 /* Reset allocation to an unallocated block. */ 2106 gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1); 2107 gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE; 2108 /* 2109 * Hack: promote the empty block allocated above. If the 2110 * promotions that occurred above did not actually gray any 2111 * objects, the block queue may be empty. We must force a 2112 * promotion to be safe. 2113 */ 2114 promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr); 2115 2116 /* 2117 * Scavenge blocks and relocate movable objects. 2118 */ 2119 2120 LOG_SCAV("Scavenging gDvm.threadList"); 2121 scavengeThreadList(); 2122 2123 LOG_SCAV("Scavenging gDvm.gcHeap->referenceOperations"); 2124 scavengeLargeHeapRefTable(gcHeap->referenceOperations); 2125 2126 LOG_SCAV("Scavenging gDvm.gcHeap->pendingFinalizationRefs"); 2127 scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs); 2128 2129 LOG_SCAV("Scavenging random global stuff"); 2130 scavengeReference(&gDvm.outOfMemoryObj); 2131 scavengeReference(&gDvm.internalErrorObj); 2132 scavengeReference(&gDvm.noClassDefFoundErrorObj); 2133 2134 // LOG_SCAV("Scavenging gDvm.internedString"); 2135 scavengeInternedStrings(); 2136 2137 LOG_SCAV("Root scavenge has completed."); 2138 2139 scavengeBlockQueue(); 2140 2141 // LOG_SCAV("Re-snap global class pointers."); 2142 // scavengeGlobals(); 2143 2144 LOG_SCAV("New space scavenge has completed."); 2145 2146 /* 2147 * Process reference objects in strength order. 2148 */ 2149 2150 LOG_REF("Processing soft references..."); 2151 preserveSoftReferences(&gDvm.gcHeap->softReferences); 2152 clearWhiteReferences(&gDvm.gcHeap->softReferences); 2153 2154 LOG_REF("Processing weak references..."); 2155 clearWhiteReferences(&gDvm.gcHeap->weakReferences); 2156 2157 LOG_REF("Finding finalizations..."); 2158 processFinalizableReferences(); 2159 2160 LOG_REF("Processing f-reachable soft references..."); 2161 clearWhiteReferences(&gDvm.gcHeap->softReferences); 2162 2163 LOG_REF("Processing f-reachable weak references..."); 2164 clearWhiteReferences(&gDvm.gcHeap->weakReferences); 2165 2166 LOG_REF("Processing phantom references..."); 2167 clearWhiteReferences(&gDvm.gcHeap->phantomReferences); 2168 2169 /* 2170 * Verify the stack and heap. 2171 */ 2172 dvmVerifyRoots(); 2173 verifyNewSpace(); 2174 2175 //describeBlocks(gcHeap->heapSource); 2176 2177 clearFromSpace(gcHeap->heapSource); 2178 2179 { 2180 size_t alloc, rem, total; 2181 2182 room(&alloc, &rem, &total); 2183 LOG_SCAV("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total); 2184 } 2185} 2186 2187/* 2188 * Interface compatibility routines. 2189 */ 2190 2191void dvmClearWhiteRefs(Object **list) 2192{ 2193 /* do nothing */ 2194 assert(*list == NULL); 2195} 2196 2197void dvmHandleSoftRefs(Object **list) 2198{ 2199 /* do nothing */ 2200 assert(*list == NULL); 2201} 2202 2203bool dvmHeapBeginMarkStep(GcMode mode) 2204{ 2205 /* do nothing */ 2206 return true; 2207} 2208 2209void dvmHeapFinishMarkStep() 2210{ 2211 /* do nothing */ 2212} 2213 2214void dvmHeapMarkRootSet() 2215{ 2216 /* do nothing */ 2217} 2218 2219void dvmHeapScanMarkedObjects() 2220{ 2221 dvmScavengeRoots(); 2222} 2223 2224void dvmHeapScheduleFinalizations() 2225{ 2226 /* do nothing */ 2227} 2228 2229void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed) 2230{ 2231 *numFreed = 0; 2232 *sizeFreed = 0; 2233 /* do nothing */ 2234} 2235 2236void dvmMarkDirtyObjects() 2237{ 2238 assert(!"implemented"); 2239} 2240 2241void dvmHeapSourceThreadShutdown() 2242{ 2243 /* do nothing */ 2244} 2245