HeapSource.cpp revision 0c13923b0616dc79fcfc41b63119268273bdc8be
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <cutils/mspace.h> 18#include <stdint.h> 19#include <sys/mman.h> 20#include <errno.h> 21 22#define SIZE_MAX UINT_MAX // TODO: get SIZE_MAX from stdint.h 23 24#include "Dalvik.h" 25#include "alloc/Heap.h" 26#include "alloc/HeapInternal.h" 27#include "alloc/HeapSource.h" 28#include "alloc/HeapBitmap.h" 29#include "alloc/HeapBitmapInlines.h" 30 31// TODO: find a real header file for these. 32extern "C" int dlmalloc_trim(size_t); 33extern "C" void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*); 34 35static void snapIdealFootprint(); 36static void setIdealFootprint(size_t max); 37static size_t getMaximumSize(const HeapSource *hs); 38 39#define HEAP_UTILIZATION_MAX 1024 40#define DEFAULT_HEAP_UTILIZATION 512 // Range 1..HEAP_UTILIZATION_MAX 41#define HEAP_IDEAL_FREE (2 * 1024 * 1024) 42#define HEAP_MIN_FREE (HEAP_IDEAL_FREE / 4) 43 44/* Start a concurrent collection when free memory falls under this 45 * many bytes. 46 */ 47#define CONCURRENT_START (128 << 10) 48 49/* The next GC will not be concurrent when free memory after a GC is 50 * under this many bytes. 51 */ 52#define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10)) 53 54#define HS_BOILERPLATE() \ 55 do { \ 56 assert(gDvm.gcHeap != NULL); \ 57 assert(gDvm.gcHeap->heapSource != NULL); \ 58 assert(gHs == gDvm.gcHeap->heapSource); \ 59 } while (0) 60 61struct Heap { 62 /* The mspace to allocate from. 63 */ 64 mspace msp; 65 66 /* The largest size that this heap is allowed to grow to. 67 */ 68 size_t maximumSize; 69 70 /* Number of bytes allocated from this mspace for objects, 71 * including any overhead. This value is NOT exact, and 72 * should only be used as an input for certain heuristics. 73 */ 74 size_t bytesAllocated; 75 76 /* Number of bytes allocated from this mspace at which a 77 * concurrent garbage collection will be started. 78 */ 79 size_t concurrentStartBytes; 80 81 /* Number of objects currently allocated from this mspace. 82 */ 83 size_t objectsAllocated; 84 85 /* 86 * The lowest address of this heap, inclusive. 87 */ 88 char *base; 89 90 /* 91 * The highest address of this heap, exclusive. 92 */ 93 char *limit; 94}; 95 96struct HeapSource { 97 /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX 98 */ 99 size_t targetUtilization; 100 101 /* The starting heap size. 102 */ 103 size_t startSize; 104 105 /* The largest that the heap source as a whole is allowed to grow. 106 */ 107 size_t maximumSize; 108 109 /* 110 * The largest size we permit the heap to grow. This value allows 111 * the user to limit the heap growth below the maximum size. This 112 * is a work around until we can dynamically set the maximum size. 113 * This value can range between the starting size and the maximum 114 * size but should never be set below the current footprint of the 115 * heap. 116 */ 117 size_t growthLimit; 118 119 /* The desired max size of the heap source as a whole. 120 */ 121 size_t idealSize; 122 123 /* The maximum number of bytes allowed to be allocated from the 124 * active heap before a GC is forced. This is used to "shrink" the 125 * heap in lieu of actual compaction. 126 */ 127 size_t softLimit; 128 129 /* The heaps; heaps[0] is always the active heap, 130 * which new objects should be allocated from. 131 */ 132 Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT]; 133 134 /* The current number of heaps. 135 */ 136 size_t numHeaps; 137 138 /* True if zygote mode was active when the HeapSource was created. 139 */ 140 bool sawZygote; 141 142 /* 143 * The base address of the virtual memory reservation. 144 */ 145 char *heapBase; 146 147 /* 148 * The length in bytes of the virtual memory reservation. 149 */ 150 size_t heapLength; 151 152 /* 153 * The live object bitmap. 154 */ 155 HeapBitmap liveBits; 156 157 /* 158 * The mark bitmap. 159 */ 160 HeapBitmap markBits; 161 162 /* 163 * State for the GC daemon. 164 */ 165 bool hasGcThread; 166 pthread_t gcThread; 167 bool gcThreadShutdown; 168 pthread_mutex_t gcThreadMutex; 169 pthread_cond_t gcThreadCond; 170}; 171 172#define hs2heap(hs_) (&((hs_)->heaps[0])) 173 174/* 175 * Returns true iff a soft limit is in effect for the active heap. 176 */ 177static bool isSoftLimited(const HeapSource *hs) 178{ 179 /* softLimit will be either SIZE_MAX or the limit for the 180 * active mspace. idealSize can be greater than softLimit 181 * if there is more than one heap. If there is only one 182 * heap, a non-SIZE_MAX softLimit should always be the same 183 * as idealSize. 184 */ 185 return hs->softLimit <= hs->idealSize; 186} 187 188/* 189 * Returns approximately the maximum number of bytes allowed to be 190 * allocated from the active heap before a GC is forced. 191 */ 192static size_t getAllocLimit(const HeapSource *hs) 193{ 194 if (isSoftLimited(hs)) { 195 return hs->softLimit; 196 } else { 197 return mspace_max_allowed_footprint(hs2heap(hs)->msp); 198 } 199} 200 201/* 202 * Returns the current footprint of all heaps. If includeActive 203 * is false, don't count the heap at index 0. 204 */ 205static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive) 206{ 207 size_t footprint = 0; 208 size_t i; 209 210 if (includeActive) { 211 i = 0; 212 } else { 213 i = 1; 214 } 215 for (/* i = i */; i < hs->numHeaps; i++) { 216//TODO: include size of bitmaps? If so, don't use bitsLen, listen to .max 217 footprint += mspace_footprint(hs->heaps[i].msp); 218 } 219 return footprint; 220} 221 222/* 223 * Returns the heap that <ptr> could have come from, or NULL 224 * if it could not have come from any heap. 225 */ 226static Heap *ptr2heap(const HeapSource *hs, const void *ptr) 227{ 228 const size_t numHeaps = hs->numHeaps; 229 230 if (ptr != NULL) { 231 for (size_t i = 0; i < numHeaps; i++) { 232 const Heap *const heap = &hs->heaps[i]; 233 234 if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) { 235 return (Heap *)heap; 236 } 237 } 238 } 239 return NULL; 240} 241 242/* 243 * Functions to update heapSource->bytesAllocated when an object 244 * is allocated or freed. mspace_usable_size() will give 245 * us a much more accurate picture of heap utilization than 246 * the requested byte sizes would. 247 * 248 * These aren't exact, and should not be treated as such. 249 */ 250static void countAllocation(Heap *heap, const void *ptr) 251{ 252 assert(heap->bytesAllocated < mspace_footprint(heap->msp)); 253 254 heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) + 255 HEAP_SOURCE_CHUNK_OVERHEAD; 256 heap->objectsAllocated++; 257 HeapSource* hs = gDvm.gcHeap->heapSource; 258 dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr); 259 260 assert(heap->bytesAllocated < mspace_footprint(heap->msp)); 261} 262 263static void countFree(Heap *heap, const void *ptr, size_t *numBytes) 264{ 265 size_t delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD; 266 assert(delta > 0); 267 if (delta < heap->bytesAllocated) { 268 heap->bytesAllocated -= delta; 269 } else { 270 heap->bytesAllocated = 0; 271 } 272 HeapSource* hs = gDvm.gcHeap->heapSource; 273 dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr); 274 if (heap->objectsAllocated > 0) { 275 heap->objectsAllocated--; 276 } 277 *numBytes += delta; 278} 279 280static HeapSource *gHs = NULL; 281 282static mspace createMspace(void *base, size_t startSize, size_t maximumSize) 283{ 284 /* Create an unlocked dlmalloc mspace to use as 285 * a heap source. 286 * 287 * We start off reserving startSize / 2 bytes but 288 * letting the heap grow to startSize. This saves 289 * memory in the case where a process uses even less 290 * than the starting size. 291 */ 292 LOGV_HEAP("Creating VM heap of size %zu", startSize); 293 errno = 0; 294 295 mspace msp = create_contiguous_mspace_with_base(startSize/2, 296 maximumSize, /*locked=*/false, base); 297 if (msp != NULL) { 298 /* Don't let the heap grow past the starting size without 299 * our intervention. 300 */ 301 mspace_set_max_allowed_footprint(msp, startSize); 302 } else { 303 /* There's no guarantee that errno has meaning when the call 304 * fails, but it often does. 305 */ 306 LOGE_HEAP("Can't create VM heap of size (%zu,%zu): %s", 307 startSize/2, maximumSize, strerror(errno)); 308 } 309 310 return msp; 311} 312 313/* 314 * Add the initial heap. Returns false if the initial heap was 315 * already added to the heap source. 316 */ 317static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize) 318{ 319 assert(hs != NULL); 320 assert(msp != NULL); 321 if (hs->numHeaps != 0) { 322 return false; 323 } 324 hs->heaps[0].msp = msp; 325 hs->heaps[0].maximumSize = maximumSize; 326 hs->heaps[0].concurrentStartBytes = SIZE_MAX; 327 hs->heaps[0].base = hs->heapBase; 328 hs->heaps[0].limit = hs->heapBase + hs->heaps[0].maximumSize; 329 hs->numHeaps = 1; 330 return true; 331} 332 333/* 334 * Adds an additional heap to the heap source. Returns false if there 335 * are too many heaps or insufficient free space to add another heap. 336 */ 337static bool addNewHeap(HeapSource *hs) 338{ 339 Heap heap; 340 341 assert(hs != NULL); 342 if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) { 343 LOGE("Attempt to create too many heaps (%zd >= %zd)", 344 hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT); 345 dvmAbort(); 346 return false; 347 } 348 349 memset(&heap, 0, sizeof(heap)); 350 351 /* 352 * Heap storage comes from a common virtual memory reservation. 353 * The new heap will start on the page after the old heap. 354 */ 355 void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp); 356 char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0); 357 size_t overhead = base - hs->heaps[0].base; 358 assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0); 359 360 if (overhead + HEAP_MIN_FREE >= hs->maximumSize) { 361 LOGE_HEAP("No room to create any more heaps " 362 "(%zd overhead, %zd max)", 363 overhead, hs->maximumSize); 364 return false; 365 } 366 367 heap.maximumSize = hs->growthLimit - overhead; 368 heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START; 369 heap.base = base; 370 heap.limit = heap.base + heap.maximumSize; 371 heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead); 372 if (heap.msp == NULL) { 373 return false; 374 } 375 376 /* Don't let the soon-to-be-old heap grow any further. 377 */ 378 hs->heaps[0].maximumSize = overhead; 379 hs->heaps[0].limit = base; 380 mspace msp = hs->heaps[0].msp; 381 mspace_set_max_allowed_footprint(msp, mspace_footprint(msp)); 382 383 /* Put the new heap in the list, at heaps[0]. 384 * Shift existing heaps down. 385 */ 386 memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0])); 387 hs->heaps[0] = heap; 388 hs->numHeaps++; 389 390 return true; 391} 392 393/* 394 * The garbage collection daemon. Initiates a concurrent collection 395 * when signaled. 396 */ 397static void *gcDaemonThread(void* arg) 398{ 399 dvmChangeStatus(NULL, THREAD_VMWAIT); 400 dvmLockMutex(&gHs->gcThreadMutex); 401 while (gHs->gcThreadShutdown != true) { 402 dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex); 403 dvmLockHeap(); 404 dvmChangeStatus(NULL, THREAD_RUNNING); 405 dvmCollectGarbageInternal(GC_CONCURRENT); 406 dvmChangeStatus(NULL, THREAD_VMWAIT); 407 dvmUnlockHeap(); 408 } 409 dvmChangeStatus(NULL, THREAD_RUNNING); 410 return NULL; 411} 412 413static bool gcDaemonStartup() 414{ 415 dvmInitMutex(&gHs->gcThreadMutex); 416 pthread_cond_init(&gHs->gcThreadCond, NULL); 417 gHs->gcThreadShutdown = false; 418 gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC", 419 gcDaemonThread, NULL); 420 return gHs->hasGcThread; 421} 422 423static void gcDaemonShutdown() 424{ 425 if (gHs->hasGcThread) { 426 dvmLockMutex(&gHs->gcThreadMutex); 427 gHs->gcThreadShutdown = true; 428 dvmSignalCond(&gHs->gcThreadCond); 429 dvmUnlockMutex(&gHs->gcThreadMutex); 430 pthread_join(gHs->gcThread, NULL); 431 } 432} 433 434/* 435 * Create a stack big enough for the worst possible case, where the 436 * heap is perfectly full of the smallest object. 437 * TODO: be better about memory usage; use a smaller stack with 438 * overflow detection and recovery. 439 */ 440static bool allocMarkStack(GcMarkStack *stack, size_t maximumSize) 441{ 442 const char *name = "dalvik-mark-stack"; 443 void *addr; 444 445 assert(stack != NULL); 446 stack->length = maximumSize * sizeof(Object*) / 447 (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD); 448 addr = dvmAllocRegion(stack->length, PROT_READ | PROT_WRITE, name); 449 if (addr == NULL) { 450 return false; 451 } 452 stack->base = (const Object **)addr; 453 stack->limit = (const Object **)((char *)addr + stack->length); 454 stack->top = NULL; 455 madvise(stack->base, stack->length, MADV_DONTNEED); 456 return true; 457} 458 459static void freeMarkStack(GcMarkStack *stack) 460{ 461 assert(stack != NULL); 462 munmap(stack->base, stack->length); 463 memset(stack, 0, sizeof(*stack)); 464} 465 466/* 467 * Initializes the heap source; must be called before any other 468 * dvmHeapSource*() functions. Returns a GcHeap structure 469 * allocated from the heap source. 470 */ 471GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize, 472 size_t growthLimit) 473{ 474 GcHeap *gcHeap; 475 HeapSource *hs; 476 mspace msp; 477 size_t length; 478 void *base; 479 480 assert(gHs == NULL); 481 482 if (!(startSize <= growthLimit && growthLimit <= maximumSize)) { 483 LOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)", 484 startSize, maximumSize, growthLimit); 485 return NULL; 486 } 487 488 /* 489 * Allocate a contiguous region of virtual memory to subdivided 490 * among the heaps managed by the garbage collector. 491 */ 492 length = ALIGN_UP_TO_PAGE_SIZE(maximumSize); 493 base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap"); 494 if (base == NULL) { 495 return NULL; 496 } 497 498 /* Create an unlocked dlmalloc mspace to use as 499 * a heap source. 500 */ 501 msp = createMspace(base, startSize, maximumSize); 502 if (msp == NULL) { 503 goto fail; 504 } 505 506 gcHeap = (GcHeap *)malloc(sizeof(*gcHeap)); 507 if (gcHeap == NULL) { 508 LOGE_HEAP("Can't allocate heap descriptor"); 509 goto fail; 510 } 511 memset(gcHeap, 0, sizeof(*gcHeap)); 512 513 hs = (HeapSource *)malloc(sizeof(*hs)); 514 if (hs == NULL) { 515 LOGE_HEAP("Can't allocate heap source"); 516 free(gcHeap); 517 goto fail; 518 } 519 memset(hs, 0, sizeof(*hs)); 520 521 hs->targetUtilization = DEFAULT_HEAP_UTILIZATION; 522 hs->startSize = startSize; 523 hs->maximumSize = maximumSize; 524 hs->growthLimit = growthLimit; 525 hs->idealSize = startSize; 526 hs->softLimit = SIZE_MAX; // no soft limit at first 527 hs->numHeaps = 0; 528 hs->sawZygote = gDvm.zygote; 529 hs->hasGcThread = false; 530 hs->heapBase = (char *)base; 531 hs->heapLength = length; 532 if (!addInitialHeap(hs, msp, growthLimit)) { 533 LOGE_HEAP("Can't add initial heap"); 534 goto fail; 535 } 536 if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) { 537 LOGE_HEAP("Can't create liveBits"); 538 goto fail; 539 } 540 if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) { 541 LOGE_HEAP("Can't create markBits"); 542 dvmHeapBitmapDelete(&hs->liveBits); 543 goto fail; 544 } 545 if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) { 546 LOGE("Can't create markStack"); 547 dvmHeapBitmapDelete(&hs->markBits); 548 dvmHeapBitmapDelete(&hs->liveBits); 549 goto fail; 550 } 551 gcHeap->markContext.bitmap = &hs->markBits; 552 gcHeap->heapSource = hs; 553 554 gHs = hs; 555 return gcHeap; 556 557fail: 558 munmap(base, length); 559 return NULL; 560} 561 562bool dvmHeapSourceStartupAfterZygote() 563{ 564 return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true; 565} 566 567/* 568 * This is called while in zygote mode, right before we fork() for the 569 * first time. We create a heap for all future zygote process allocations, 570 * in an attempt to avoid touching pages in the zygote heap. (This would 571 * probably be unnecessary if we had a compacting GC -- the source of our 572 * troubles is small allocations filling in the gaps from larger ones.) 573 */ 574bool dvmHeapSourceStartupBeforeFork() 575{ 576 HS_BOILERPLATE(); 577 578 assert(gDvm.zygote); 579 580 if (!gDvm.newZygoteHeapAllocated) { 581 /* Create a new heap for post-fork zygote allocations. We only 582 * try once, even if it fails. 583 */ 584 LOGV("Splitting out new zygote heap"); 585 gDvm.newZygoteHeapAllocated = true; 586 dvmClearCardTable(); 587 return addNewHeap(gHs); 588 } 589 return true; 590} 591 592void dvmHeapSourceThreadShutdown() 593{ 594 if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) { 595 gcDaemonShutdown(); 596 } 597} 598 599/* 600 * Tears down the entire GcHeap structure and all of the substructures 601 * attached to it. This call has the side effect of setting the given 602 * gcHeap pointer and gHs to NULL. 603 */ 604void dvmHeapSourceShutdown(GcHeap **gcHeap) 605{ 606 assert(gcHeap != NULL); 607 if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) { 608 HeapSource *hs = (*gcHeap)->heapSource; 609 dvmHeapBitmapDelete(&hs->liveBits); 610 dvmHeapBitmapDelete(&hs->markBits); 611 freeMarkStack(&(*gcHeap)->markContext.stack); 612 munmap(hs->heapBase, hs->heapLength); 613 free(hs); 614 gHs = NULL; 615 free(*gcHeap); 616 *gcHeap = NULL; 617 } 618} 619 620/* 621 * Gets the begining of the allocation for the HeapSource. 622 */ 623void *dvmHeapSourceGetBase() 624{ 625 return gHs->heapBase; 626} 627 628/* 629 * Returns the requested value. If the per-heap stats are requested, fill 630 * them as well. 631 * 632 * Caller must hold the heap lock. 633 */ 634size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[], 635 size_t arrayLen) 636{ 637 HeapSource *hs = gHs; 638 size_t value = 0; 639 size_t total = 0; 640 641 HS_BOILERPLATE(); 642 643 assert(arrayLen >= hs->numHeaps || perHeapStats == NULL); 644 for (size_t i = 0; i < hs->numHeaps; i++) { 645 Heap *const heap = &hs->heaps[i]; 646 647 switch (spec) { 648 case HS_FOOTPRINT: 649 value = mspace_footprint(heap->msp); 650 break; 651 case HS_ALLOWED_FOOTPRINT: 652 value = mspace_max_allowed_footprint(heap->msp); 653 break; 654 case HS_BYTES_ALLOCATED: 655 value = heap->bytesAllocated; 656 break; 657 case HS_OBJECTS_ALLOCATED: 658 value = heap->objectsAllocated; 659 break; 660 default: 661 // quiet gcc 662 break; 663 } 664 if (perHeapStats) { 665 perHeapStats[i] = value; 666 } 667 total += value; 668 } 669 return total; 670} 671 672void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, uintptr_t *limit, 673 size_t numHeaps) 674{ 675 HeapSource *hs = gHs; 676 677 HS_BOILERPLATE(); 678 679 assert(numHeaps <= hs->numHeaps); 680 for (size_t i = 0; i < numHeaps; ++i) { 681 base[i] = (uintptr_t)hs->heaps[i].base; 682 if (max != NULL) { 683 max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max); 684 } 685 if (limit != NULL) { 686 limit[i] = (uintptr_t)hs->heaps[i].limit; 687 } 688 } 689} 690 691/* 692 * Get the bitmap representing all live objects. 693 */ 694HeapBitmap *dvmHeapSourceGetLiveBits() 695{ 696 HS_BOILERPLATE(); 697 698 return &gHs->liveBits; 699} 700 701/* 702 * Get the bitmap representing all marked objects. 703 */ 704HeapBitmap *dvmHeapSourceGetMarkBits() 705{ 706 HS_BOILERPLATE(); 707 708 return &gHs->markBits; 709} 710 711void dvmHeapSourceSwapBitmaps() 712{ 713 HeapBitmap tmp = gHs->liveBits; 714 gHs->liveBits = gHs->markBits; 715 gHs->markBits = tmp; 716} 717 718void dvmHeapSourceZeroMarkBitmap() 719{ 720 HS_BOILERPLATE(); 721 722 dvmHeapBitmapZero(&gHs->markBits); 723} 724 725void dvmMarkImmuneObjects(const char *immuneLimit) 726{ 727 /* 728 * Copy the contents of the live bit vector for immune object 729 * range into the mark bit vector. 730 */ 731 /* The only values generated by dvmHeapSourceGetImmuneLimit() */ 732 assert(immuneLimit == gHs->heaps[0].base || 733 immuneLimit == NULL); 734 assert(gHs->liveBits.base == gHs->markBits.base); 735 assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen); 736 /* heap[0] is never immune */ 737 assert(gHs->heaps[0].base >= immuneLimit); 738 assert(gHs->heaps[0].limit > immuneLimit); 739 740 for (size_t i = 1; i < gHs->numHeaps; ++i) { 741 if (gHs->heaps[i].base < immuneLimit) { 742 assert(gHs->heaps[i].limit <= immuneLimit); 743 /* Compute the number of words to copy in the bitmap. */ 744 size_t index = HB_OFFSET_TO_INDEX( 745 (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base); 746 /* Compute the starting offset in the live and mark bits. */ 747 char *src = (char *)(gHs->liveBits.bits + index); 748 char *dst = (char *)(gHs->markBits.bits + index); 749 /* Compute the number of bytes of the live bitmap to copy. */ 750 size_t length = HB_OFFSET_TO_BYTE_INDEX( 751 gHs->heaps[i].limit - gHs->heaps[i].base); 752 /* Do the copy. */ 753 memcpy(dst, src, length); 754 /* Make sure max points to the address of the highest set bit. */ 755 if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) { 756 gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit; 757 } 758 } 759 } 760} 761 762/* 763 * Allocates <n> bytes of zeroed data. 764 */ 765void* dvmHeapSourceAlloc(size_t n) 766{ 767 HS_BOILERPLATE(); 768 769 HeapSource *hs = gHs; 770 Heap* heap = hs2heap(hs); 771 if (heap->bytesAllocated + n > hs->softLimit) { 772 /* 773 * This allocation would push us over the soft limit; act as 774 * if the heap is full. 775 */ 776 LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation", 777 FRACTIONAL_MB(hs->softLimit), n); 778 return NULL; 779 } 780 void* ptr = mspace_calloc(heap->msp, 1, n); 781 if (ptr == NULL) { 782 return NULL; 783 } 784 countAllocation(heap, ptr); 785 /* 786 * Check to see if a concurrent GC should be initiated. 787 */ 788 if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) { 789 /* 790 * The garbage collector thread is already running or has yet 791 * to be started. Do nothing. 792 */ 793 return ptr; 794 } 795 if (heap->bytesAllocated > heap->concurrentStartBytes) { 796 /* 797 * We have exceeded the allocation threshold. Wake up the 798 * garbage collector. 799 */ 800 dvmSignalCond(&gHs->gcThreadCond); 801 } 802 return ptr; 803} 804 805/* Remove any hard limits, try to allocate, and shrink back down. 806 * Last resort when trying to allocate an object. 807 */ 808static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n) 809{ 810 /* Grow as much as possible, but don't let the real footprint 811 * go over the absolute max. 812 */ 813 size_t max = heap->maximumSize; 814 815 mspace_set_max_allowed_footprint(heap->msp, max); 816 void* ptr = dvmHeapSourceAlloc(n); 817 818 /* Shrink back down as small as possible. Our caller may 819 * readjust max_allowed to a more appropriate value. 820 */ 821 mspace_set_max_allowed_footprint(heap->msp, 822 mspace_footprint(heap->msp)); 823 return ptr; 824} 825 826/* 827 * Allocates <n> bytes of zeroed data, growing as much as possible 828 * if necessary. 829 */ 830void* dvmHeapSourceAllocAndGrow(size_t n) 831{ 832 HS_BOILERPLATE(); 833 834 HeapSource *hs = gHs; 835 Heap* heap = hs2heap(hs); 836 void* ptr = dvmHeapSourceAlloc(n); 837 if (ptr != NULL) { 838 return ptr; 839 } 840 841 size_t oldIdealSize = hs->idealSize; 842 if (isSoftLimited(hs)) { 843 /* We're soft-limited. Try removing the soft limit to 844 * see if we can allocate without actually growing. 845 */ 846 hs->softLimit = SIZE_MAX; 847 ptr = dvmHeapSourceAlloc(n); 848 if (ptr != NULL) { 849 /* Removing the soft limit worked; fix things up to 850 * reflect the new effective ideal size. 851 */ 852 snapIdealFootprint(); 853 return ptr; 854 } 855 // softLimit intentionally left at SIZE_MAX. 856 } 857 858 /* We're not soft-limited. Grow the heap to satisfy the request. 859 * If this call fails, no footprints will have changed. 860 */ 861 ptr = heapAllocAndGrow(hs, heap, n); 862 if (ptr != NULL) { 863 /* The allocation succeeded. Fix up the ideal size to 864 * reflect any footprint modifications that had to happen. 865 */ 866 snapIdealFootprint(); 867 } else { 868 /* We just couldn't do it. Restore the original ideal size, 869 * fixing up softLimit if necessary. 870 */ 871 setIdealFootprint(oldIdealSize); 872 } 873 return ptr; 874} 875 876/* 877 * Frees the first numPtrs objects in the ptrs list and returns the 878 * amount of reclaimed storage. The list must contain addresses all in 879 * the same mspace, and must be in increasing order. This implies that 880 * there are no duplicates, and no entries are NULL. 881 */ 882size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs) 883{ 884 HS_BOILERPLATE(); 885 886 if (numPtrs == 0) { 887 return 0; 888 } 889 890 assert(ptrs != NULL); 891 assert(*ptrs != NULL); 892 Heap* heap = ptr2heap(gHs, *ptrs); 893 size_t numBytes = 0; 894 if (heap != NULL) { 895 mspace msp = heap->msp; 896 // Calling mspace_free on shared heaps disrupts sharing too 897 // much. For heap[0] -- the 'active heap' -- we call 898 // mspace_free, but on the other heaps we only do some 899 // accounting. 900 if (heap == gHs->heaps) { 901 // mspace_merge_objects takes two allocated objects, and 902 // if the second immediately follows the first, will merge 903 // them, returning a larger object occupying the same 904 // memory. This is a local operation, and doesn't require 905 // dlmalloc to manipulate any freelists. It's pretty 906 // inexpensive compared to free(). 907 908 // ptrs is an array of objects all in memory order, and if 909 // client code has been allocating lots of short-lived 910 // objects, this is likely to contain runs of objects all 911 // now garbage, and thus highly amenable to this optimization. 912 913 // Unroll the 0th iteration around the loop below, 914 // countFree ptrs[0] and initializing merged. 915 assert(ptrs[0] != NULL); 916 assert(ptr2heap(gHs, ptrs[0]) == heap); 917 countFree(heap, ptrs[0], &numBytes); 918 void *merged = ptrs[0]; 919 for (size_t i = 1; i < numPtrs; i++) { 920 assert(merged != NULL); 921 assert(ptrs[i] != NULL); 922 assert((intptr_t)merged < (intptr_t)ptrs[i]); 923 assert(ptr2heap(gHs, ptrs[i]) == heap); 924 countFree(heap, ptrs[i], &numBytes); 925 // Try to merge. If it works, merged now includes the 926 // memory of ptrs[i]. If it doesn't, free merged, and 927 // see if ptrs[i] starts a new run of adjacent 928 // objects to merge. 929 if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) { 930 mspace_free(msp, merged); 931 merged = ptrs[i]; 932 } 933 } 934 assert(merged != NULL); 935 mspace_free(msp, merged); 936 } else { 937 // This is not an 'active heap'. Only do the accounting. 938 for (size_t i = 0; i < numPtrs; i++) { 939 assert(ptrs[i] != NULL); 940 assert(ptr2heap(gHs, ptrs[i]) == heap); 941 countFree(heap, ptrs[i], &numBytes); 942 } 943 } 944 } 945 return numBytes; 946} 947 948/* 949 * Returns true iff <ptr> is in the heap source. 950 */ 951bool dvmHeapSourceContainsAddress(const void *ptr) 952{ 953 HS_BOILERPLATE(); 954 955 return (dvmHeapBitmapCoversAddress(&gHs->liveBits, ptr)); 956} 957 958/* 959 * Returns true iff <ptr> was allocated from the heap source. 960 */ 961bool dvmHeapSourceContains(const void *ptr) 962{ 963 HS_BOILERPLATE(); 964 965 if (dvmHeapSourceContainsAddress(ptr)) { 966 return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0; 967 } 968 return false; 969} 970 971bool dvmIsZygoteObject(const Object* obj) 972{ 973 HeapSource *hs = gHs; 974 975 HS_BOILERPLATE(); 976 977 if (dvmHeapSourceContains(obj) && hs->sawZygote) { 978 Heap *heap = ptr2heap(hs, obj); 979 if (heap != NULL) { 980 /* If the object is not in the active heap, we assume that 981 * it was allocated as part of zygote. 982 */ 983 return heap != hs->heaps; 984 } 985 } 986 /* The pointer is outside of any known heap, or we are not 987 * running in zygote mode. 988 */ 989 return false; 990} 991 992/* 993 * Returns the number of usable bytes in an allocated chunk; the size 994 * may be larger than the size passed to dvmHeapSourceAlloc(). 995 */ 996size_t dvmHeapSourceChunkSize(const void *ptr) 997{ 998 HS_BOILERPLATE(); 999 1000 Heap* heap = ptr2heap(gHs, ptr); 1001 if (heap != NULL) { 1002 return mspace_usable_size(heap->msp, ptr); 1003 } 1004 return 0; 1005} 1006 1007/* 1008 * Returns the number of bytes that the heap source has allocated 1009 * from the system using sbrk/mmap, etc. 1010 * 1011 * Caller must hold the heap lock. 1012 */ 1013size_t dvmHeapSourceFootprint() 1014{ 1015 HS_BOILERPLATE(); 1016 1017//TODO: include size of bitmaps? 1018 return oldHeapOverhead(gHs, true); 1019} 1020 1021static size_t getMaximumSize(const HeapSource *hs) 1022{ 1023 return hs->growthLimit; 1024} 1025 1026/* 1027 * Returns the current maximum size of the heap source respecting any 1028 * growth limits. 1029 */ 1030size_t dvmHeapSourceGetMaximumSize() 1031{ 1032 HS_BOILERPLATE(); 1033 return getMaximumSize(gHs); 1034} 1035 1036/* 1037 * Removes any growth limits. Allows the user to allocate up to the 1038 * maximum heap size. 1039 */ 1040void dvmClearGrowthLimit() 1041{ 1042 HS_BOILERPLATE(); 1043 dvmLockHeap(); 1044 dvmWaitForConcurrentGcToComplete(); 1045 gHs->growthLimit = gHs->maximumSize; 1046 size_t overhead = oldHeapOverhead(gHs, false); 1047 gHs->heaps[0].maximumSize = gHs->maximumSize - overhead; 1048 gHs->heaps[0].limit = gHs->heaps[0].base + gHs->heaps[0].maximumSize; 1049 dvmUnlockHeap(); 1050} 1051 1052/* 1053 * Return the real bytes used by old heaps plus the soft usage of the 1054 * current heap. When a soft limit is in effect, this is effectively 1055 * what it's compared against (though, in practice, it only looks at 1056 * the current heap). 1057 */ 1058static size_t getSoftFootprint(bool includeActive) 1059{ 1060 HS_BOILERPLATE(); 1061 1062 HeapSource *hs = gHs; 1063 size_t ret = oldHeapOverhead(hs, false); 1064 if (includeActive) { 1065 ret += hs->heaps[0].bytesAllocated; 1066 } 1067 1068 return ret; 1069} 1070 1071/* 1072 * Gets the maximum number of bytes that the heap source is allowed 1073 * to allocate from the system. 1074 */ 1075size_t dvmHeapSourceGetIdealFootprint() 1076{ 1077 HeapSource *hs = gHs; 1078 1079 HS_BOILERPLATE(); 1080 1081 return hs->idealSize; 1082} 1083 1084/* 1085 * Sets the soft limit, handling any necessary changes to the allowed 1086 * footprint of the active heap. 1087 */ 1088static void setSoftLimit(HeapSource *hs, size_t softLimit) 1089{ 1090 /* Compare against the actual footprint, rather than the 1091 * max_allowed, because the heap may not have grown all the 1092 * way to the allowed size yet. 1093 */ 1094 mspace msp = hs->heaps[0].msp; 1095 size_t currentHeapSize = mspace_footprint(msp); 1096 if (softLimit < currentHeapSize) { 1097 /* Don't let the heap grow any more, and impose a soft limit. 1098 */ 1099 mspace_set_max_allowed_footprint(msp, currentHeapSize); 1100 hs->softLimit = softLimit; 1101 } else { 1102 /* Let the heap grow to the requested max, and remove any 1103 * soft limit, if set. 1104 */ 1105 mspace_set_max_allowed_footprint(msp, softLimit); 1106 hs->softLimit = SIZE_MAX; 1107 } 1108} 1109 1110/* 1111 * Sets the maximum number of bytes that the heap source is allowed 1112 * to allocate from the system. Clamps to the appropriate maximum 1113 * value. 1114 */ 1115static void setIdealFootprint(size_t max) 1116{ 1117 HS_BOILERPLATE(); 1118 1119 HeapSource *hs = gHs; 1120 size_t maximumSize = getMaximumSize(hs); 1121 if (max > maximumSize) { 1122 LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB", 1123 FRACTIONAL_MB(max), 1124 FRACTIONAL_MB(maximumSize)); 1125 max = maximumSize; 1126 } 1127 1128 /* Convert max into a size that applies to the active heap. 1129 * Old heaps will count against the ideal size. 1130 */ 1131 size_t overhead = getSoftFootprint(false); 1132 size_t activeMax; 1133 if (overhead < max) { 1134 activeMax = max - overhead; 1135 } else { 1136 activeMax = 0; 1137 } 1138 1139 setSoftLimit(hs, activeMax); 1140 hs->idealSize = max; 1141} 1142 1143/* 1144 * Make the ideal footprint equal to the current footprint. 1145 */ 1146static void snapIdealFootprint() 1147{ 1148 HS_BOILERPLATE(); 1149 1150 setIdealFootprint(getSoftFootprint(true)); 1151} 1152 1153/* 1154 * Gets the current ideal heap utilization, represented as a number 1155 * between zero and one. 1156 */ 1157float dvmGetTargetHeapUtilization() 1158{ 1159 HeapSource *hs = gHs; 1160 1161 HS_BOILERPLATE(); 1162 1163 return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX; 1164} 1165 1166/* 1167 * Sets the new ideal heap utilization, represented as a number 1168 * between zero and one. 1169 */ 1170void dvmSetTargetHeapUtilization(float newTarget) 1171{ 1172 HeapSource *hs = gHs; 1173 1174 HS_BOILERPLATE(); 1175 1176 /* Clamp it to a reasonable range. 1177 */ 1178 // TODO: This may need some tuning. 1179 if (newTarget < 0.2) { 1180 newTarget = 0.2; 1181 } else if (newTarget > 0.8) { 1182 newTarget = 0.8; 1183 } 1184 1185 hs->targetUtilization = 1186 (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX); 1187 LOGV("Set heap target utilization to %zd/%d (%f)", 1188 hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget); 1189} 1190 1191/* 1192 * Given the size of a live set, returns the ideal heap size given 1193 * the current target utilization and MIN/MAX values. 1194 * 1195 * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX. 1196 */ 1197static size_t getUtilizationTarget(size_t liveSize, size_t targetUtilization) 1198{ 1199 /* Use the current target utilization ratio to determine the 1200 * ideal heap size based on the size of the live set. 1201 */ 1202 size_t targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX; 1203 1204 /* Cap the amount of free space, though, so we don't end up 1205 * with, e.g., 8MB of free space when the live set size hits 8MB. 1206 */ 1207 if (targetSize > liveSize + HEAP_IDEAL_FREE) { 1208 targetSize = liveSize + HEAP_IDEAL_FREE; 1209 } else if (targetSize < liveSize + HEAP_MIN_FREE) { 1210 targetSize = liveSize + HEAP_MIN_FREE; 1211 } 1212 return targetSize; 1213} 1214 1215/* 1216 * Given the current contents of the active heap, increase the allowed 1217 * heap footprint to match the target utilization ratio. This 1218 * should only be called immediately after a full mark/sweep. 1219 */ 1220void dvmHeapSourceGrowForUtilization() 1221{ 1222 HS_BOILERPLATE(); 1223 1224 HeapSource *hs = gHs; 1225 Heap* heap = hs2heap(hs); 1226 1227 /* Use the current target utilization ratio to determine the 1228 * ideal heap size based on the size of the live set. 1229 * Note that only the active heap plays any part in this. 1230 * 1231 * Avoid letting the old heaps influence the target free size, 1232 * because they may be full of objects that aren't actually 1233 * in the working set. Just look at the allocated size of 1234 * the current heap. 1235 */ 1236 size_t currentHeapUsed = heap->bytesAllocated; 1237 size_t targetHeapSize = 1238 getUtilizationTarget(currentHeapUsed, hs->targetUtilization); 1239 1240 /* The ideal size includes the old heaps; add overhead so that 1241 * it can be immediately subtracted again in setIdealFootprint(). 1242 * If the target heap size would exceed the max, setIdealFootprint() 1243 * will clamp it to a legal value. 1244 */ 1245 size_t overhead = getSoftFootprint(false); 1246 setIdealFootprint(targetHeapSize + overhead); 1247 1248 size_t freeBytes = getAllocLimit(hs); 1249 if (freeBytes < CONCURRENT_MIN_FREE) { 1250 /* Not enough free memory to allow a concurrent GC. */ 1251 heap->concurrentStartBytes = SIZE_MAX; 1252 } else { 1253 heap->concurrentStartBytes = freeBytes - CONCURRENT_START; 1254 } 1255} 1256 1257/* 1258 * Return free pages to the system. 1259 * TODO: move this somewhere else, especially the native heap part. 1260 */ 1261static void releasePagesInRange(void *start, void *end, void *nbytes) 1262{ 1263 /* Linux requires that the madvise() start address is page-aligned. 1264 * We also align the end address. 1265 */ 1266 start = (void *)ALIGN_UP_TO_PAGE_SIZE(start); 1267 end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1)); 1268 if (start < end) { 1269 size_t length = (char *)end - (char *)start; 1270 madvise(start, length, MADV_DONTNEED); 1271 *(size_t *)nbytes += length; 1272 } 1273} 1274 1275/* 1276 * Return unused memory to the system if possible. 1277 */ 1278void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen) 1279{ 1280 HS_BOILERPLATE(); 1281 1282 assert(arrayLen >= gHs->numHeaps); 1283 HeapSource *hs = gHs; 1284 size_t heapBytes = 0; 1285 for (size_t i = 0; i < hs->numHeaps; i++) { 1286 Heap *heap = &hs->heaps[i]; 1287 1288 /* Return the wilderness chunk to the system. 1289 */ 1290 mspace_trim(heap->msp, 0); 1291 1292 /* Return any whole free pages to the system. 1293 */ 1294 bytesTrimmed[i] = 0; 1295 mspace_walk_free_pages(heap->msp, releasePagesInRange, 1296 &bytesTrimmed[i]); 1297 heapBytes += bytesTrimmed[i]; 1298 } 1299 1300 /* Same for the native heap. 1301 */ 1302 dlmalloc_trim(0); 1303 size_t nativeBytes = 0; 1304 dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes); 1305 1306 LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes", 1307 heapBytes, nativeBytes, heapBytes + nativeBytes); 1308} 1309 1310/* 1311 * Walks over the heap source and passes every allocated and 1312 * free chunk to the callback. 1313 */ 1314void dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen, 1315 const void *userptr, size_t userlen, 1316 void *arg), 1317 void *arg) 1318{ 1319 HS_BOILERPLATE(); 1320 1321 /* Walk the heaps from oldest to newest. 1322 */ 1323//TODO: do this in address order 1324 HeapSource *hs = gHs; 1325 for (size_t i = hs->numHeaps; i > 0; --i) { 1326 mspace_walk_heap(hs->heaps[i-1].msp, callback, arg); 1327 } 1328} 1329 1330/* 1331 * Gets the number of heaps available in the heap source. 1332 * 1333 * Caller must hold the heap lock, because gHs caches a field 1334 * in gDvm.gcHeap. 1335 */ 1336size_t dvmHeapSourceGetNumHeaps() 1337{ 1338 HS_BOILERPLATE(); 1339 1340 return gHs->numHeaps; 1341} 1342 1343void *dvmHeapSourceGetImmuneLimit(bool isPartial) 1344{ 1345 if (isPartial) { 1346 return hs2heap(gHs)->base; 1347 } else { 1348 return NULL; 1349 } 1350} 1351