1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <cutils/mspace.h> 18#include <limits.h> // for INT_MAX 19#include <sys/mman.h> 20#include <errno.h> 21 22#include "Dalvik.h" 23#include "alloc/Heap.h" 24#include "alloc/HeapInternal.h" 25#include "alloc/HeapSource.h" 26#include "alloc/HeapBitmap.h" 27 28// TODO: find a real header file for these. 29extern int dlmalloc_trim(size_t); 30extern void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*); 31 32static void snapIdealFootprint(void); 33static void setIdealFootprint(size_t max); 34 35#ifndef PAGE_SIZE 36#define PAGE_SIZE 4096 37#endif 38#define ALIGN_UP_TO_PAGE_SIZE(p) \ 39 (((size_t)(p) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1)) 40#define ALIGN_DOWN_TO_PAGE_SIZE(p) \ 41 ((size_t)(p) & ~(PAGE_SIZE - 1)) 42 43#define HEAP_UTILIZATION_MAX 1024 44#define DEFAULT_HEAP_UTILIZATION 512 // Range 1..HEAP_UTILIZATION_MAX 45#define HEAP_IDEAL_FREE (2 * 1024 * 1024) 46#define HEAP_MIN_FREE (HEAP_IDEAL_FREE / 4) 47 48#define HS_BOILERPLATE() \ 49 do { \ 50 assert(gDvm.gcHeap != NULL); \ 51 assert(gDvm.gcHeap->heapSource != NULL); \ 52 assert(gHs == gDvm.gcHeap->heapSource); \ 53 } while (0) 54 55#define DEBUG_HEAP_SOURCE 0 56#if DEBUG_HEAP_SOURCE 57#define HSTRACE(...) LOG(LOG_INFO, LOG_TAG "-hs", __VA_ARGS__) 58#else 59#define HSTRACE(...) /**/ 60#endif 61 62/* 63======================================================= 64======================================================= 65======================================================= 66 67How will this be used? 68allocating/freeing: Heap.c just wants to say "alloc(n)" and get a ptr 69 - if allocating in large doesn't work, try allocating from small 70Heap.c will use HeapSource.h; HeapSource.c will do the right thing 71 between small and large 72 - some operations should be abstracted; put in a structure 73 74How do we manage the size trade-offs? 75- keep mspace max footprint clamped to actual footprint 76- if small-alloc returns null, adjust large vs. small ratio 77 - give small all available slack and retry 78 - success or fail, snap back to actual footprint and give rest to large 79 80managed as "small actual" + "large actual" + "delta to allowed total footprint" 81- when allocating from one source or the other, give the delta to the 82 active source, but snap back afterwards 83- that may not work so great for a gc heap, because small will always consume. 84 - but we need to use the memory, and the current max is the amount we 85 need to fill before a GC. 86 87Find a way to permanently steal pages from the middle of the heap 88 - segment tricks? 89 90Allocate String and char[] in a separate heap? 91 92Maybe avoid growing small heap, even if there's slack? Look at 93live ratio of small heap after a gc; scale it based on that. 94 95======================================================= 96======================================================= 97======================================================= 98*/ 99 100typedef struct { 101 /* The mspace to allocate from. 102 */ 103 mspace *msp; 104 105 /* The bitmap that keeps track of where objects are in the heap. 106 */ 107 HeapBitmap objectBitmap; 108 109 /* The largest size that this heap is allowed to grow to. 110 */ 111 size_t absoluteMaxSize; 112 113 /* Number of bytes allocated from this mspace for objects, 114 * including any overhead. This value is NOT exact, and 115 * should only be used as an input for certain heuristics. 116 */ 117 size_t bytesAllocated; 118 119 /* Number of objects currently allocated from this mspace. 120 */ 121 size_t objectsAllocated; 122} Heap; 123 124struct HeapSource { 125 /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX 126 */ 127 size_t targetUtilization; 128 129 /* Requested minimum heap size, or zero if there is no minimum. 130 */ 131 size_t minimumSize; 132 133 /* The starting heap size. 134 */ 135 size_t startSize; 136 137 /* The largest that the heap source as a whole is allowed to grow. 138 */ 139 size_t absoluteMaxSize; 140 141 /* The desired max size of the heap source as a whole. 142 */ 143 size_t idealSize; 144 145 /* The maximum number of bytes allowed to be allocated from the 146 * active heap before a GC is forced. This is used to "shrink" the 147 * heap in lieu of actual compaction. 148 */ 149 size_t softLimit; 150 151 /* The heaps; heaps[0] is always the active heap, 152 * which new objects should be allocated from. 153 */ 154 Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT]; 155 156 /* The current number of heaps. 157 */ 158 size_t numHeaps; 159 160 /* External allocation count. 161 */ 162 size_t externalBytesAllocated; 163 164 /* The maximum number of external bytes that may be allocated. 165 */ 166 size_t externalLimit; 167 168 /* True if zygote mode was active when the HeapSource was created. 169 */ 170 bool sawZygote; 171}; 172 173#define hs2heap(hs_) (&((hs_)->heaps[0])) 174 175/* 176 * Returns true iff a soft limit is in effect for the active heap. 177 */ 178static inline bool 179softLimited(const HeapSource *hs) 180{ 181 /* softLimit will be either INT_MAX or the limit for the 182 * active mspace. idealSize can be greater than softLimit 183 * if there is more than one heap. If there is only one 184 * heap, a non-INT_MAX softLimit should always be the same 185 * as idealSize. 186 */ 187 return hs->softLimit <= hs->idealSize; 188} 189 190/* 191 * Returns the current footprint of all heaps. If includeActive 192 * is false, don't count the heap at index 0. 193 */ 194static inline size_t 195oldHeapOverhead(const HeapSource *hs, bool includeActive) 196{ 197 size_t footprint = 0; 198 size_t i; 199 200 if (includeActive) { 201 i = 0; 202 } else { 203 i = 1; 204 } 205 for (/* i = i */; i < hs->numHeaps; i++) { 206//TODO: include size of bitmaps? If so, don't use bitsLen, listen to .max 207 footprint += mspace_footprint(hs->heaps[i].msp); 208 } 209 return footprint; 210} 211 212/* 213 * Returns the heap that <ptr> could have come from, or NULL 214 * if it could not have come from any heap. 215 */ 216static inline Heap * 217ptr2heap(const HeapSource *hs, const void *ptr) 218{ 219 const size_t numHeaps = hs->numHeaps; 220 size_t i; 221 222//TODO: unroll this to HEAP_SOURCE_MAX_HEAP_COUNT 223 if (ptr != NULL) { 224 for (i = 0; i < numHeaps; i++) { 225 const Heap *const heap = &hs->heaps[i]; 226 227 if (dvmHeapBitmapMayContainObject(&heap->objectBitmap, ptr)) { 228 return (Heap *)heap; 229 } 230 } 231 } 232 return NULL; 233} 234 235/* 236 * Functions to update heapSource->bytesAllocated when an object 237 * is allocated or freed. mspace_usable_size() will give 238 * us a much more accurate picture of heap utilization than 239 * the requested byte sizes would. 240 * 241 * These aren't exact, and should not be treated as such. 242 */ 243static inline void 244countAllocation(Heap *heap, const void *ptr, bool isObj) 245{ 246 assert(heap->bytesAllocated < mspace_footprint(heap->msp)); 247 248 heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) + 249 HEAP_SOURCE_CHUNK_OVERHEAD; 250 if (isObj) { 251 heap->objectsAllocated++; 252 dvmHeapBitmapSetObjectBit(&heap->objectBitmap, ptr); 253 } 254 255 assert(heap->bytesAllocated < mspace_footprint(heap->msp)); 256} 257 258static inline void 259countFree(Heap *heap, const void *ptr, bool isObj) 260{ 261 size_t delta; 262 263 delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD; 264 assert(delta > 0); 265 if (delta < heap->bytesAllocated) { 266 heap->bytesAllocated -= delta; 267 } else { 268 heap->bytesAllocated = 0; 269 } 270 if (isObj) { 271 dvmHeapBitmapClearObjectBit(&heap->objectBitmap, ptr); 272 if (heap->objectsAllocated > 0) { 273 heap->objectsAllocated--; 274 } 275 } 276} 277 278static HeapSource *gHs = NULL; 279 280static mspace * 281createMspace(size_t startSize, size_t absoluteMaxSize, size_t id) 282{ 283 mspace *msp; 284 char name[PATH_MAX]; 285 286 /* If two ashmem regions have the same name, only one gets 287 * the name when looking at the maps. 288 */ 289 snprintf(name, sizeof(name)-1, "dalvik-heap%s/%zd", 290 gDvm.zygote ? "/zygote" : "", id); 291 name[sizeof(name)-1] = '\0'; 292 293 /* Create an unlocked dlmalloc mspace to use as 294 * a small-object heap source. 295 * 296 * We start off reserving heapSizeStart/2 bytes but 297 * letting the heap grow to heapSizeStart. This saves 298 * memory in the case where a process uses even less 299 * than the starting size. 300 */ 301 LOGV_HEAP("Creating VM heap of size %u\n", startSize); 302 errno = 0; 303 msp = create_contiguous_mspace_with_name(startSize/2, 304 absoluteMaxSize, /*locked=*/false, name); 305 if (msp != NULL) { 306 /* Don't let the heap grow past the starting size without 307 * our intervention. 308 */ 309 mspace_set_max_allowed_footprint(msp, startSize); 310 } else { 311 /* There's no guarantee that errno has meaning when the call 312 * fails, but it often does. 313 */ 314 LOGE_HEAP("Can't create VM heap of size (%u,%u) (errno=%d)\n", 315 startSize/2, absoluteMaxSize, errno); 316 } 317 318 return msp; 319} 320 321static bool 322addNewHeap(HeapSource *hs, mspace *msp, size_t mspAbsoluteMaxSize) 323{ 324 Heap heap; 325 326 if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) { 327 LOGE("Attempt to create too many heaps (%zd >= %zd)\n", 328 hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT); 329 dvmAbort(); 330 return false; 331 } 332 333 memset(&heap, 0, sizeof(heap)); 334 335 if (msp != NULL) { 336 heap.msp = msp; 337 heap.absoluteMaxSize = mspAbsoluteMaxSize; 338 } else { 339 size_t overhead; 340 341 overhead = oldHeapOverhead(hs, true); 342 if (overhead + HEAP_MIN_FREE >= hs->absoluteMaxSize) { 343 LOGE_HEAP("No room to create any more heaps " 344 "(%zd overhead, %zd max)\n", 345 overhead, hs->absoluteMaxSize); 346 return false; 347 } 348 heap.absoluteMaxSize = hs->absoluteMaxSize - overhead; 349 heap.msp = createMspace(HEAP_MIN_FREE, heap.absoluteMaxSize, 350 hs->numHeaps); 351 if (heap.msp == NULL) { 352 return false; 353 } 354 } 355 if (!dvmHeapBitmapInit(&heap.objectBitmap, 356 (void *)ALIGN_DOWN_TO_PAGE_SIZE(heap.msp), 357 heap.absoluteMaxSize, 358 "objects")) 359 { 360 LOGE_HEAP("Can't create objectBitmap\n"); 361 goto fail; 362 } 363 364 /* Don't let the soon-to-be-old heap grow any further. 365 */ 366 if (hs->numHeaps > 0) { 367 mspace *msp = hs->heaps[0].msp; 368 mspace_set_max_allowed_footprint(msp, mspace_footprint(msp)); 369 } 370 371 /* Put the new heap in the list, at heaps[0]. 372 * Shift existing heaps down. 373 */ 374 memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0])); 375 hs->heaps[0] = heap; 376 hs->numHeaps++; 377 378 return true; 379 380fail: 381 if (msp == NULL) { 382 destroy_contiguous_mspace(heap.msp); 383 } 384 return false; 385} 386 387/* 388 * Initializes the heap source; must be called before any other 389 * dvmHeapSource*() functions. Returns a GcHeap structure 390 * allocated from the heap source. 391 */ 392GcHeap * 393dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize) 394{ 395 GcHeap *gcHeap; 396 HeapSource *hs; 397 Heap *heap; 398 mspace msp; 399 400 assert(gHs == NULL); 401 402 if (startSize > absoluteMaxSize) { 403 LOGE("Bad heap parameters (start=%d, max=%d)\n", 404 startSize, absoluteMaxSize); 405 return NULL; 406 } 407 408 /* Create an unlocked dlmalloc mspace to use as 409 * the small object heap source. 410 */ 411 msp = createMspace(startSize, absoluteMaxSize, 0); 412 if (msp == NULL) { 413 return false; 414 } 415 416 /* Allocate a descriptor from the heap we just created. 417 */ 418 gcHeap = mspace_malloc(msp, sizeof(*gcHeap)); 419 if (gcHeap == NULL) { 420 LOGE_HEAP("Can't allocate heap descriptor\n"); 421 goto fail; 422 } 423 memset(gcHeap, 0, sizeof(*gcHeap)); 424 425 hs = mspace_malloc(msp, sizeof(*hs)); 426 if (hs == NULL) { 427 LOGE_HEAP("Can't allocate heap source\n"); 428 goto fail; 429 } 430 memset(hs, 0, sizeof(*hs)); 431 432 hs->targetUtilization = DEFAULT_HEAP_UTILIZATION; 433 hs->minimumSize = 0; 434 hs->startSize = startSize; 435 hs->absoluteMaxSize = absoluteMaxSize; 436 hs->idealSize = startSize; 437 hs->softLimit = INT_MAX; // no soft limit at first 438 hs->numHeaps = 0; 439 hs->sawZygote = gDvm.zygote; 440 if (!addNewHeap(hs, msp, absoluteMaxSize)) { 441 LOGE_HEAP("Can't add initial heap\n"); 442 goto fail; 443 } 444 445 gcHeap->heapSource = hs; 446 447 countAllocation(hs2heap(hs), gcHeap, false); 448 countAllocation(hs2heap(hs), hs, false); 449 450 gHs = hs; 451 return gcHeap; 452 453fail: 454 destroy_contiguous_mspace(msp); 455 return NULL; 456} 457 458/* 459 * If the HeapSource was created while in zygote mode, this 460 * will create a new heap for post-zygote allocations. 461 * Having a separate heap should maximize the number of pages 462 * that a given app_process shares with the zygote process. 463 */ 464bool 465dvmHeapSourceStartupAfterZygote() 466{ 467 HeapSource *hs = gHs; // use a local to avoid the implicit "volatile" 468 469 HS_BOILERPLATE(); 470 471 assert(!gDvm.zygote); 472 473 if (hs->sawZygote) { 474 /* Create a new heap for post-zygote allocations. 475 */ 476 return addNewHeap(hs, NULL, 0); 477 } 478 return true; 479} 480 481/* 482 * This is called while in zygote mode, right before we fork() for the 483 * first time. We create a heap for all future zygote process allocations, 484 * in an attempt to avoid touching pages in the zygote heap. (This would 485 * probably be unnecessary if we had a compacting GC -- the source of our 486 * troubles is small allocations filling in the gaps from larger ones.) 487 */ 488bool 489dvmHeapSourceStartupBeforeFork() 490{ 491 HeapSource *hs = gHs; // use a local to avoid the implicit "volatile" 492 493 HS_BOILERPLATE(); 494 495 assert(gDvm.zygote); 496 497 if (!gDvm.newZygoteHeapAllocated) { 498 /* Create a new heap for post-fork zygote allocations. We only 499 * try once, even if it fails. 500 */ 501 LOGI("Splitting out new zygote heap\n"); 502 gDvm.newZygoteHeapAllocated = true; 503 return addNewHeap(hs, NULL, 0); 504 } 505 return true; 506} 507 508/* 509 * Tears down the heap source and frees any resources associated with it. 510 */ 511void 512dvmHeapSourceShutdown(GcHeap *gcHeap) 513{ 514 if (gcHeap != NULL && gcHeap->heapSource != NULL) { 515 HeapSource *hs; 516 size_t numHeaps; 517 size_t i; 518 519 hs = gcHeap->heapSource; 520 gHs = NULL; 521 522 /* Cache numHeaps because hs will be invalid after the last 523 * heap is freed. 524 */ 525 numHeaps = hs->numHeaps; 526 527 for (i = 0; i < numHeaps; i++) { 528 Heap *heap = &hs->heaps[i]; 529 530 dvmHeapBitmapDelete(&heap->objectBitmap); 531 destroy_contiguous_mspace(heap->msp); 532 } 533 /* The last heap is the original one, which contains the 534 * HeapSource object itself. 535 */ 536 } 537} 538 539/* 540 * Returns the requested value. If the per-heap stats are requested, fill 541 * them as well. 542 * 543 * Caller must hold the heap lock. 544 */ 545size_t 546dvmHeapSourceGetValue(enum HeapSourceValueSpec spec, size_t perHeapStats[], 547 size_t arrayLen) 548{ 549 HeapSource *hs = gHs; 550 size_t value = 0; 551 size_t total = 0; 552 size_t i; 553 554 HS_BOILERPLATE(); 555 556 switch (spec) { 557 case HS_EXTERNAL_BYTES_ALLOCATED: 558 return hs->externalBytesAllocated; 559 case HS_EXTERNAL_LIMIT: 560 return hs->externalLimit; 561 default: 562 // look at all heaps. 563 ; 564 } 565 566 assert(arrayLen >= hs->numHeaps || perHeapStats == NULL); 567 for (i = 0; i < hs->numHeaps; i++) { 568 Heap *const heap = &hs->heaps[i]; 569 570 switch (spec) { 571 case HS_FOOTPRINT: 572 value = mspace_footprint(heap->msp); 573 break; 574 case HS_ALLOWED_FOOTPRINT: 575 value = mspace_max_allowed_footprint(heap->msp); 576 break; 577 case HS_BYTES_ALLOCATED: 578 value = heap->bytesAllocated; 579 break; 580 case HS_OBJECTS_ALLOCATED: 581 value = heap->objectsAllocated; 582 break; 583 default: 584 // quiet gcc 585 break; 586 } 587 if (perHeapStats) { 588 perHeapStats[i] = value; 589 } 590 total += value; 591 } 592 return total; 593} 594 595/* 596 * Writes shallow copies of the currently-used bitmaps into outBitmaps, 597 * returning the number of bitmaps written. Returns <0 if the array 598 * was not long enough. 599 */ 600ssize_t 601dvmHeapSourceGetObjectBitmaps(HeapBitmap outBitmaps[], size_t maxBitmaps) 602{ 603 HeapSource *hs = gHs; 604 605 HS_BOILERPLATE(); 606 607 if (maxBitmaps >= hs->numHeaps) { 608 size_t i; 609 610 for (i = 0; i < hs->numHeaps; i++) { 611 outBitmaps[i] = hs->heaps[i].objectBitmap; 612 } 613 return i; 614 } 615 return -1; 616} 617 618/* 619 * Replaces the object location HeapBitmaps with the elements of 620 * <objectBitmaps>. The elements of <objectBitmaps> are overwritten 621 * with shallow copies of the old bitmaps. 622 * 623 * Returns false if the number of bitmaps doesn't match the number 624 * of heaps. 625 */ 626bool 627dvmHeapSourceReplaceObjectBitmaps(HeapBitmap objectBitmaps[], size_t nBitmaps) 628{ 629 HeapSource *hs = gHs; 630 size_t i; 631 632 HS_BOILERPLATE(); 633 634 if (nBitmaps != hs->numHeaps) { 635 return false; 636 } 637 638 for (i = 0; i < hs->numHeaps; i++) { 639 Heap *heap = &hs->heaps[i]; 640 HeapBitmap swap; 641 642 swap = heap->objectBitmap; 643 heap->objectBitmap = objectBitmaps[i]; 644 objectBitmaps[i] = swap; 645 } 646 return true; 647} 648 649/* 650 * Allocates <n> bytes of zeroed data. 651 */ 652void * 653dvmHeapSourceAlloc(size_t n) 654{ 655 HeapSource *hs = gHs; 656 Heap *heap; 657 void *ptr; 658 659 HS_BOILERPLATE(); 660 heap = hs2heap(hs); 661 662 if (heap->bytesAllocated + n <= hs->softLimit) { 663// TODO: allocate large blocks (>64k?) as separate mmap regions so that 664// they don't increase the high-water mark when they're freed. 665// TODO: zero out large objects using madvise 666 ptr = mspace_calloc(heap->msp, 1, n); 667 if (ptr != NULL) { 668 countAllocation(heap, ptr, true); 669 } 670 } else { 671 /* This allocation would push us over the soft limit; 672 * act as if the heap is full. 673 */ 674 LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation\n", 675 FRACTIONAL_MB(hs->softLimit), n); 676 ptr = NULL; 677 } 678 return ptr; 679} 680 681/* Remove any hard limits, try to allocate, and shrink back down. 682 * Last resort when trying to allocate an object. 683 */ 684static void * 685heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n) 686{ 687 void *ptr; 688 size_t max; 689 690 /* Grow as much as possible, but don't let the real footprint 691 * plus external allocations go over the absolute max. 692 */ 693 max = heap->absoluteMaxSize; 694 if (max > hs->externalBytesAllocated) { 695 max -= hs->externalBytesAllocated; 696 697 mspace_set_max_allowed_footprint(heap->msp, max); 698 ptr = dvmHeapSourceAlloc(n); 699 700 /* Shrink back down as small as possible. Our caller may 701 * readjust max_allowed to a more appropriate value. 702 */ 703 mspace_set_max_allowed_footprint(heap->msp, 704 mspace_footprint(heap->msp)); 705 } else { 706 ptr = NULL; 707 } 708 709 return ptr; 710} 711 712/* 713 * Allocates <n> bytes of zeroed data, growing as much as possible 714 * if necessary. 715 */ 716void * 717dvmHeapSourceAllocAndGrow(size_t n) 718{ 719 HeapSource *hs = gHs; 720 Heap *heap; 721 void *ptr; 722 size_t oldIdealSize; 723 724 HS_BOILERPLATE(); 725 heap = hs2heap(hs); 726 727 ptr = dvmHeapSourceAlloc(n); 728 if (ptr != NULL) { 729 return ptr; 730 } 731 732 oldIdealSize = hs->idealSize; 733 if (softLimited(hs)) { 734 /* We're soft-limited. Try removing the soft limit to 735 * see if we can allocate without actually growing. 736 */ 737 hs->softLimit = INT_MAX; 738 ptr = dvmHeapSourceAlloc(n); 739 if (ptr != NULL) { 740 /* Removing the soft limit worked; fix things up to 741 * reflect the new effective ideal size. 742 */ 743 snapIdealFootprint(); 744 return ptr; 745 } 746 // softLimit intentionally left at INT_MAX. 747 } 748 749 /* We're not soft-limited. Grow the heap to satisfy the request. 750 * If this call fails, no footprints will have changed. 751 */ 752 ptr = heapAllocAndGrow(hs, heap, n); 753 if (ptr != NULL) { 754 /* The allocation succeeded. Fix up the ideal size to 755 * reflect any footprint modifications that had to happen. 756 */ 757 snapIdealFootprint(); 758 } else { 759 /* We just couldn't do it. Restore the original ideal size, 760 * fixing up softLimit if necessary. 761 */ 762 setIdealFootprint(oldIdealSize); 763 } 764 return ptr; 765} 766 767/* 768 * Frees the memory pointed to by <ptr>, which may be NULL. 769 */ 770void 771dvmHeapSourceFree(void *ptr) 772{ 773 Heap *heap; 774 775 HS_BOILERPLATE(); 776 777 heap = ptr2heap(gHs, ptr); 778 if (heap != NULL) { 779 countFree(heap, ptr, true); 780 /* Only free objects that are in the active heap. 781 * Touching old heaps would pull pages into this process. 782 */ 783 if (heap == gHs->heaps) { 784 mspace_free(heap->msp, ptr); 785 } 786 } 787} 788 789/* 790 * Returns true iff <ptr> was allocated from the heap source. 791 */ 792bool 793dvmHeapSourceContains(const void *ptr) 794{ 795 Heap *heap; 796 797 HS_BOILERPLATE(); 798 799 heap = ptr2heap(gHs, ptr); 800 if (heap != NULL) { 801 return dvmHeapBitmapIsObjectBitSet(&heap->objectBitmap, ptr) != 0; 802 } 803 return false; 804} 805 806/* 807 * Returns the value of the requested flag. 808 */ 809bool 810dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag) 811{ 812 if (ptr == NULL) { 813 return false; 814 } 815 816 if (flag == HS_CONTAINS) { 817 return dvmHeapSourceContains(ptr); 818 } else if (flag == HS_ALLOCATED_IN_ZYGOTE) { 819 HeapSource *hs = gHs; 820 821 HS_BOILERPLATE(); 822 823 if (hs->sawZygote) { 824 Heap *heap; 825 826 heap = ptr2heap(hs, ptr); 827 if (heap != NULL) { 828 /* If the object is not in the active heap, we assume that 829 * it was allocated as part of zygote. 830 */ 831 return heap != hs->heaps; 832 } 833 } 834 /* The pointer is outside of any known heap, or we are not 835 * running in zygote mode. 836 */ 837 return false; 838 } 839 840 return false; 841} 842 843/* 844 * Returns the number of usable bytes in an allocated chunk; the size 845 * may be larger than the size passed to dvmHeapSourceAlloc(). 846 */ 847size_t 848dvmHeapSourceChunkSize(const void *ptr) 849{ 850 Heap *heap; 851 852 HS_BOILERPLATE(); 853 854 heap = ptr2heap(gHs, ptr); 855 if (heap != NULL) { 856 return mspace_usable_size(heap->msp, ptr); 857 } 858 return 0; 859} 860 861/* 862 * Returns the number of bytes that the heap source has allocated 863 * from the system using sbrk/mmap, etc. 864 * 865 * Caller must hold the heap lock. 866 */ 867size_t 868dvmHeapSourceFootprint() 869{ 870 HS_BOILERPLATE(); 871 872//TODO: include size of bitmaps? 873 return oldHeapOverhead(gHs, true); 874} 875 876/* 877 * Return the real bytes used by old heaps and external memory 878 * plus the soft usage of the current heap. When a soft limit 879 * is in effect, this is effectively what it's compared against 880 * (though, in practice, it only looks at the current heap). 881 */ 882static size_t 883getSoftFootprint(bool includeActive) 884{ 885 HeapSource *hs = gHs; 886 size_t ret; 887 888 HS_BOILERPLATE(); 889 890 ret = oldHeapOverhead(hs, false) + hs->externalBytesAllocated; 891 if (includeActive) { 892 ret += hs->heaps[0].bytesAllocated; 893 } 894 895 return ret; 896} 897 898/* 899 * Gets the maximum number of bytes that the heap source is allowed 900 * to allocate from the system. 901 */ 902size_t 903dvmHeapSourceGetIdealFootprint() 904{ 905 HeapSource *hs = gHs; 906 907 HS_BOILERPLATE(); 908 909 return hs->idealSize; 910} 911 912/* 913 * Sets the soft limit, handling any necessary changes to the allowed 914 * footprint of the active heap. 915 */ 916static void 917setSoftLimit(HeapSource *hs, size_t softLimit) 918{ 919 /* Compare against the actual footprint, rather than the 920 * max_allowed, because the heap may not have grown all the 921 * way to the allowed size yet. 922 */ 923 mspace *msp = hs->heaps[0].msp; 924 size_t currentHeapSize = mspace_footprint(msp); 925 if (softLimit < currentHeapSize) { 926 /* Don't let the heap grow any more, and impose a soft limit. 927 */ 928 mspace_set_max_allowed_footprint(msp, currentHeapSize); 929 hs->softLimit = softLimit; 930 } else { 931 /* Let the heap grow to the requested max, and remove any 932 * soft limit, if set. 933 */ 934 mspace_set_max_allowed_footprint(msp, softLimit); 935 hs->softLimit = INT_MAX; 936 } 937} 938 939/* 940 * Sets the maximum number of bytes that the heap source is allowed 941 * to allocate from the system. Clamps to the appropriate maximum 942 * value. 943 */ 944static void 945setIdealFootprint(size_t max) 946{ 947 HeapSource *hs = gHs; 948#if DEBUG_HEAP_SOURCE 949 HeapSource oldHs = *hs; 950 mspace *msp = hs->heaps[0].msp; 951 size_t oldAllowedFootprint = 952 mspace_max_allowed_footprint(msp); 953#endif 954 955 HS_BOILERPLATE(); 956 957 if (max > hs->absoluteMaxSize) { 958 LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB\n", 959 FRACTIONAL_MB(max), 960 FRACTIONAL_MB(hs->absoluteMaxSize)); 961 max = hs->absoluteMaxSize; 962 } else if (max < hs->minimumSize) { 963 max = hs->minimumSize; 964 } 965 966 /* Convert max into a size that applies to the active heap. 967 * Old heaps and external allocations will count against the ideal size. 968 */ 969 size_t overhead = getSoftFootprint(false); 970 size_t activeMax; 971 if (overhead < max) { 972 activeMax = max - overhead; 973 } else { 974 activeMax = 0; 975 } 976 977 setSoftLimit(hs, activeMax); 978 hs->idealSize = max; 979 980 HSTRACE("IDEAL %zd->%zd (%d), soft %zd->%zd (%d), allowed %zd->%zd (%d), " 981 "ext %zd\n", 982 oldHs.idealSize, hs->idealSize, hs->idealSize - oldHs.idealSize, 983 oldHs.softLimit, hs->softLimit, hs->softLimit - oldHs.softLimit, 984 oldAllowedFootprint, mspace_max_allowed_footprint(msp), 985 mspace_max_allowed_footprint(msp) - oldAllowedFootprint, 986 hs->externalBytesAllocated); 987 988} 989 990/* 991 * Make the ideal footprint equal to the current footprint. 992 */ 993static void 994snapIdealFootprint() 995{ 996 HeapSource *hs = gHs; 997 998 HS_BOILERPLATE(); 999 1000 setIdealFootprint(getSoftFootprint(true)); 1001} 1002 1003/* 1004 * Gets the current ideal heap utilization, represented as a number 1005 * between zero and one. 1006 */ 1007float dvmGetTargetHeapUtilization() 1008{ 1009 HeapSource *hs = gHs; 1010 1011 HS_BOILERPLATE(); 1012 1013 return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX; 1014} 1015 1016/* 1017 * Sets the new ideal heap utilization, represented as a number 1018 * between zero and one. 1019 */ 1020void dvmSetTargetHeapUtilization(float newTarget) 1021{ 1022 HeapSource *hs = gHs; 1023 size_t newUtilization; 1024 1025 HS_BOILERPLATE(); 1026 1027 /* Clamp it to a reasonable range. 1028 */ 1029 // TODO: This may need some tuning. 1030 if (newTarget < 0.2) { 1031 newTarget = 0.2; 1032 } else if (newTarget > 0.8) { 1033 newTarget = 0.8; 1034 } 1035 1036 hs->targetUtilization = 1037 (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX); 1038 LOGV("Set heap target utilization to %zd/%d (%f)\n", 1039 hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget); 1040} 1041 1042/* 1043 * If set is true, sets the new minimum heap size to size; always 1044 * returns the current (or previous) size. If size is negative, 1045 * removes the current minimum constraint (if present). 1046 */ 1047size_t 1048dvmMinimumHeapSize(size_t size, bool set) 1049{ 1050 HeapSource *hs = gHs; 1051 size_t oldMinimumSize; 1052 1053 /* gHs caches an entry in gDvm.gcHeap; we need to hold the 1054 * heap lock if we're going to look at it. We also need the 1055 * lock for the call to setIdealFootprint(). 1056 */ 1057 dvmLockHeap(); 1058 1059 HS_BOILERPLATE(); 1060 1061 oldMinimumSize = hs->minimumSize; 1062 1063 if (set) { 1064 /* Don't worry about external allocations right now. 1065 * setIdealFootprint() will take them into account when 1066 * minimumSize is used, and it's better to hold onto the 1067 * intended minimumSize than to clamp it arbitrarily based 1068 * on the current allocations. 1069 */ 1070 if (size > hs->absoluteMaxSize) { 1071 size = hs->absoluteMaxSize; 1072 } 1073 hs->minimumSize = size; 1074 if (size > hs->idealSize) { 1075 /* Force a snap to the minimum value, which we just set 1076 * and which setIdealFootprint() will take into consideration. 1077 */ 1078 setIdealFootprint(hs->idealSize); 1079 } 1080 /* Otherwise we'll just keep it in mind the next time 1081 * setIdealFootprint() is called. 1082 */ 1083 } 1084 1085 dvmUnlockHeap(); 1086 1087 return oldMinimumSize; 1088} 1089 1090/* 1091 * Given the size of a live set, returns the ideal heap size given 1092 * the current target utilization and MIN/MAX values. 1093 * 1094 * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX. 1095 */ 1096static size_t 1097getUtilizationTarget(const HeapSource *hs, 1098 size_t liveSize, size_t targetUtilization) 1099{ 1100 size_t targetSize; 1101 1102 /* Use the current target utilization ratio to determine the 1103 * ideal heap size based on the size of the live set. 1104 */ 1105 targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX; 1106 1107 /* Cap the amount of free space, though, so we don't end up 1108 * with, e.g., 8MB of free space when the live set size hits 8MB. 1109 */ 1110 if (targetSize > liveSize + HEAP_IDEAL_FREE) { 1111 targetSize = liveSize + HEAP_IDEAL_FREE; 1112 } else if (targetSize < liveSize + HEAP_MIN_FREE) { 1113 targetSize = liveSize + HEAP_MIN_FREE; 1114 } 1115 return targetSize; 1116} 1117 1118/* 1119 * Given the current contents of the active heap, increase the allowed 1120 * heap footprint to match the target utilization ratio. This 1121 * should only be called immediately after a full mark/sweep. 1122 */ 1123void dvmHeapSourceGrowForUtilization() 1124{ 1125 HeapSource *hs = gHs; 1126 Heap *heap; 1127 size_t targetHeapSize; 1128 size_t currentHeapUsed; 1129 size_t oldIdealSize; 1130 size_t newHeapMax; 1131 size_t overhead; 1132 1133 HS_BOILERPLATE(); 1134 heap = hs2heap(hs); 1135 1136 /* Use the current target utilization ratio to determine the 1137 * ideal heap size based on the size of the live set. 1138 * Note that only the active heap plays any part in this. 1139 * 1140 * Avoid letting the old heaps influence the target free size, 1141 * because they may be full of objects that aren't actually 1142 * in the working set. Just look at the allocated size of 1143 * the current heap. 1144 */ 1145 currentHeapUsed = heap->bytesAllocated; 1146#define LET_EXTERNAL_INFLUENCE_UTILIZATION 1 1147#if LET_EXTERNAL_INFLUENCE_UTILIZATION 1148 /* This is a hack to deal with the side-effects of moving 1149 * bitmap data out of the Dalvik heap. Since the amount 1150 * of free space after a GC scales with the size of the 1151 * live set, many apps expected the large free space that 1152 * appeared along with megabytes' worth of bitmaps. When 1153 * the bitmaps were removed, the free size shrank significantly, 1154 * and apps started GCing constantly. This makes it so the 1155 * post-GC free space is the same size it would have been 1156 * if the bitmaps were still in the Dalvik heap. 1157 */ 1158 currentHeapUsed += hs->externalBytesAllocated; 1159#endif 1160 targetHeapSize = 1161 getUtilizationTarget(hs, currentHeapUsed, hs->targetUtilization); 1162#if LET_EXTERNAL_INFLUENCE_UTILIZATION 1163 currentHeapUsed -= hs->externalBytesAllocated; 1164 targetHeapSize -= hs->externalBytesAllocated; 1165#endif 1166 1167 /* The ideal size includes the old heaps; add overhead so that 1168 * it can be immediately subtracted again in setIdealFootprint(). 1169 * If the target heap size would exceed the max, setIdealFootprint() 1170 * will clamp it to a legal value. 1171 */ 1172 overhead = getSoftFootprint(false); 1173 oldIdealSize = hs->idealSize; 1174 setIdealFootprint(targetHeapSize + overhead); 1175 1176 newHeapMax = mspace_max_allowed_footprint(heap->msp); 1177 if (softLimited(hs)) { 1178 LOGD_HEAP("GC old usage %zd.%zd%%; now " 1179 "%zd.%03zdMB used / %zd.%03zdMB soft max " 1180 "(%zd.%03zdMB over, " 1181 "%zd.%03zdMB ext, " 1182 "%zd.%03zdMB real max)\n", 1183 FRACTIONAL_PCT(currentHeapUsed, oldIdealSize), 1184 FRACTIONAL_MB(currentHeapUsed), 1185 FRACTIONAL_MB(hs->softLimit), 1186 FRACTIONAL_MB(overhead), 1187 FRACTIONAL_MB(hs->externalBytesAllocated), 1188 FRACTIONAL_MB(newHeapMax)); 1189 } else { 1190 LOGD_HEAP("GC old usage %zd.%zd%%; now " 1191 "%zd.%03zdMB used / %zd.%03zdMB real max " 1192 "(%zd.%03zdMB over, " 1193 "%zd.%03zdMB ext)\n", 1194 FRACTIONAL_PCT(currentHeapUsed, oldIdealSize), 1195 FRACTIONAL_MB(currentHeapUsed), 1196 FRACTIONAL_MB(newHeapMax), 1197 FRACTIONAL_MB(overhead), 1198 FRACTIONAL_MB(hs->externalBytesAllocated)); 1199 } 1200} 1201 1202/* 1203 * Return free pages to the system. 1204 * TODO: move this somewhere else, especially the native heap part. 1205 */ 1206 1207static void releasePagesInRange(void *start, void *end, void *nbytes) 1208{ 1209 /* Linux requires that the madvise() start address is page-aligned. 1210 * We also align the end address. 1211 */ 1212 start = (void *)ALIGN_UP_TO_PAGE_SIZE(start); 1213 end = (void *)((size_t)end & ~(PAGE_SIZE - 1)); 1214 if (start < end) { 1215 size_t length = (char *)end - (char *)start; 1216 madvise(start, length, MADV_DONTNEED); 1217 *(size_t *)nbytes += length; 1218 } 1219} 1220 1221/* 1222 * Return unused memory to the system if possible. 1223 */ 1224void 1225dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen) 1226{ 1227 HeapSource *hs = gHs; 1228 size_t nativeBytes, heapBytes; 1229 size_t i; 1230 1231 HS_BOILERPLATE(); 1232 1233 assert(arrayLen >= hs->numHeaps); 1234 1235 heapBytes = 0; 1236 for (i = 0; i < hs->numHeaps; i++) { 1237 Heap *heap = &hs->heaps[i]; 1238 1239 /* Return the wilderness chunk to the system. 1240 */ 1241 mspace_trim(heap->msp, 0); 1242 1243 /* Return any whole free pages to the system. 1244 */ 1245 bytesTrimmed[i] = 0; 1246 mspace_walk_free_pages(heap->msp, releasePagesInRange, 1247 &bytesTrimmed[i]); 1248 heapBytes += bytesTrimmed[i]; 1249 } 1250 1251 /* Same for the native heap. 1252 */ 1253 dlmalloc_trim(0); 1254 nativeBytes = 0; 1255 dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes); 1256 1257 LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes\n", 1258 heapBytes, nativeBytes, heapBytes + nativeBytes); 1259} 1260 1261/* 1262 * Walks over the heap source and passes every allocated and 1263 * free chunk to the callback. 1264 */ 1265void 1266dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen, 1267 const void *userptr, size_t userlen, 1268 void *arg), 1269 void *arg) 1270{ 1271 HeapSource *hs = gHs; 1272 size_t i; 1273 1274 HS_BOILERPLATE(); 1275 1276 /* Walk the heaps from oldest to newest. 1277 */ 1278//TODO: do this in address order 1279 for (i = hs->numHeaps; i > 0; --i) { 1280 mspace_walk_heap(hs->heaps[i-1].msp, callback, arg); 1281 } 1282} 1283 1284/* 1285 * Gets the number of heaps available in the heap source. 1286 * 1287 * Caller must hold the heap lock, because gHs caches a field 1288 * in gDvm.gcHeap. 1289 */ 1290size_t 1291dvmHeapSourceGetNumHeaps() 1292{ 1293 HeapSource *hs = gHs; 1294 1295 HS_BOILERPLATE(); 1296 1297 return hs->numHeaps; 1298} 1299 1300 1301/* 1302 * External allocation tracking 1303 * 1304 * In some situations, memory outside of the heap is tied to the 1305 * lifetime of objects in the heap. Since that memory is kept alive 1306 * by heap objects, it should provide memory pressure that can influence 1307 * GCs. 1308 */ 1309 1310 1311static bool 1312externalAllocPossible(const HeapSource *hs, size_t n) 1313{ 1314 const Heap *heap; 1315 size_t currentHeapSize; 1316 1317 /* Make sure that this allocation is even possible. 1318 * Don't let the external size plus the actual heap size 1319 * go over the absolute max. This essentially treats 1320 * external allocations as part of the active heap. 1321 * 1322 * Note that this will fail "mysteriously" if there's 1323 * a small softLimit but a large heap footprint. 1324 */ 1325 heap = hs2heap(hs); 1326 currentHeapSize = mspace_max_allowed_footprint(heap->msp); 1327 if (currentHeapSize + hs->externalBytesAllocated + n <= 1328 heap->absoluteMaxSize) 1329 { 1330 return true; 1331 } 1332 HSTRACE("externalAllocPossible(): " 1333 "footprint %zu + extAlloc %zu + n %zu >= max %zu (space for %zu)\n", 1334 currentHeapSize, hs->externalBytesAllocated, n, 1335 heap->absoluteMaxSize, 1336 heap->absoluteMaxSize - 1337 (currentHeapSize + hs->externalBytesAllocated)); 1338 return false; 1339} 1340 1341#define EXTERNAL_TARGET_UTILIZATION 820 // 80% 1342 1343/* 1344 * Tries to update the internal count of externally-allocated memory. 1345 * If there's enough room for that memory, returns true. If not, returns 1346 * false and does not update the count. 1347 * 1348 * The caller must ensure externalAllocPossible(hs, n) == true. 1349 */ 1350static bool 1351externalAlloc(HeapSource *hs, size_t n, bool grow) 1352{ 1353 Heap *heap; 1354 size_t currentHeapSize; 1355 size_t newTotal; 1356 size_t max; 1357 bool grew; 1358 1359 assert(hs->externalLimit >= hs->externalBytesAllocated); 1360 1361 HSTRACE("externalAlloc(%zd%s)\n", n, grow ? ", grow" : ""); 1362 assert(externalAllocPossible(hs, n)); // The caller must ensure this. 1363 1364 /* External allocations have their own "free space" that they 1365 * can allocate from without causing a GC. 1366 */ 1367 if (hs->externalBytesAllocated + n <= hs->externalLimit) { 1368 hs->externalBytesAllocated += n; 1369#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS 1370 if (gDvm.allocProf.enabled) { 1371 Thread* self = dvmThreadSelf(); 1372 gDvm.allocProf.externalAllocCount++; 1373 gDvm.allocProf.externalAllocSize += n; 1374 if (self != NULL) { 1375 self->allocProf.externalAllocCount++; 1376 self->allocProf.externalAllocSize += n; 1377 } 1378 } 1379#endif 1380 return true; 1381 } 1382 if (!grow) { 1383 return false; 1384 } 1385 1386 /* GROW */ 1387 hs->externalBytesAllocated += n; 1388 hs->externalLimit = getUtilizationTarget(hs, 1389 hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION); 1390 HSTRACE("EXTERNAL grow limit to %zd\n", hs->externalLimit); 1391 return true; 1392} 1393 1394static void 1395gcForExternalAlloc(bool collectSoftReferences) 1396{ 1397#ifdef WITH_PROFILER // even if !PROFILE_EXTERNAL_ALLOCATIONS 1398 if (gDvm.allocProf.enabled) { 1399 Thread* self = dvmThreadSelf(); 1400 gDvm.allocProf.gcCount++; 1401 if (self != NULL) { 1402 self->allocProf.gcCount++; 1403 } 1404 } 1405#endif 1406 dvmCollectGarbageInternal(collectSoftReferences); 1407} 1408 1409/* 1410 * Updates the internal count of externally-allocated memory. If there's 1411 * enough room for that memory, returns true. If not, returns false and 1412 * does not update the count. 1413 * 1414 * May cause a GC as a side-effect. 1415 */ 1416bool 1417dvmTrackExternalAllocation(size_t n) 1418{ 1419 HeapSource *hs = gHs; 1420 size_t overhead; 1421 bool ret = false; 1422 1423 /* gHs caches an entry in gDvm.gcHeap; we need to hold the 1424 * heap lock if we're going to look at it. 1425 */ 1426 dvmLockHeap(); 1427 1428 HS_BOILERPLATE(); 1429 assert(hs->externalLimit >= hs->externalBytesAllocated); 1430 1431 if (!externalAllocPossible(hs, n)) { 1432 LOGE_HEAP("%zd-byte external allocation " 1433 "too large for this process.\n", n); 1434 goto out; 1435 } 1436 1437 /* Try "allocating" using the existing "free space". 1438 */ 1439 HSTRACE("EXTERNAL alloc %zu (%zu < %zu)\n", 1440 n, hs->externalBytesAllocated, hs->externalLimit); 1441 if (externalAlloc(hs, n, false)) { 1442 ret = true; 1443 goto out; 1444 } 1445 1446 /* The "allocation" failed. Free up some space by doing 1447 * a full garbage collection. This may grow the heap source 1448 * if the live set is sufficiently large. 1449 */ 1450 HSTRACE("EXTERNAL alloc %zd: GC 1\n", n); 1451 gcForExternalAlloc(false); // don't collect SoftReferences 1452 if (externalAlloc(hs, n, false)) { 1453 ret = true; 1454 goto out; 1455 } 1456 1457 /* Even that didn't work; this is an exceptional state. 1458 * Try harder, growing the heap source if necessary. 1459 */ 1460 HSTRACE("EXTERNAL alloc %zd: frag\n", n); 1461 ret = externalAlloc(hs, n, true); 1462 dvmHeapSizeChanged(); 1463 if (ret) { 1464 goto out; 1465 } 1466 1467 /* We couldn't even grow enough to satisfy the request. 1468 * Try one last GC, collecting SoftReferences this time. 1469 */ 1470 HSTRACE("EXTERNAL alloc %zd: GC 2\n", n); 1471 gcForExternalAlloc(true); // collect SoftReferences 1472 ret = externalAlloc(hs, n, true); 1473 dvmHeapSizeChanged(); 1474 if (!ret) { 1475 LOGE_HEAP("Out of external memory on a %zu-byte allocation.\n", n); 1476 } 1477 1478#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS 1479 if (gDvm.allocProf.enabled) { 1480 Thread* self = dvmThreadSelf(); 1481 gDvm.allocProf.failedExternalAllocCount++; 1482 gDvm.allocProf.failedExternalAllocSize += n; 1483 if (self != NULL) { 1484 self->allocProf.failedExternalAllocCount++; 1485 self->allocProf.failedExternalAllocSize += n; 1486 } 1487 } 1488#endif 1489 1490out: 1491 dvmUnlockHeap(); 1492 1493 return ret; 1494} 1495 1496/* 1497 * Reduces the internal count of externally-allocated memory. 1498 */ 1499void 1500dvmTrackExternalFree(size_t n) 1501{ 1502 HeapSource *hs = gHs; 1503 size_t newIdealSize; 1504 size_t newExternalLimit; 1505 size_t oldExternalBytesAllocated; 1506 1507 HSTRACE("EXTERNAL free %zu (%zu < %zu)\n", 1508 n, hs->externalBytesAllocated, hs->externalLimit); 1509 1510 /* gHs caches an entry in gDvm.gcHeap; we need to hold the 1511 * heap lock if we're going to look at it. 1512 */ 1513 dvmLockHeap(); 1514 1515 HS_BOILERPLATE(); 1516 assert(hs->externalLimit >= hs->externalBytesAllocated); 1517 1518 oldExternalBytesAllocated = hs->externalBytesAllocated; 1519 if (n <= hs->externalBytesAllocated) { 1520 hs->externalBytesAllocated -= n; 1521 } else { 1522 n = hs->externalBytesAllocated; 1523 hs->externalBytesAllocated = 0; 1524 } 1525 1526#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS 1527 if (gDvm.allocProf.enabled) { 1528 Thread* self = dvmThreadSelf(); 1529 gDvm.allocProf.externalFreeCount++; 1530 gDvm.allocProf.externalFreeSize += n; 1531 if (self != NULL) { 1532 self->allocProf.externalFreeCount++; 1533 self->allocProf.externalFreeSize += n; 1534 } 1535 } 1536#endif 1537 1538 /* Shrink as quickly as we can. 1539 */ 1540 newExternalLimit = getUtilizationTarget(hs, 1541 hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION); 1542 if (newExternalLimit < oldExternalBytesAllocated) { 1543 /* Make sure that the remaining free space is at least 1544 * big enough to allocate something of the size that was 1545 * just freed. This makes it more likely that 1546 * externalFree(N); externalAlloc(N); 1547 * will work without causing a GC. 1548 */ 1549 HSTRACE("EXTERNAL free preserved %zu extra free bytes\n", 1550 oldExternalBytesAllocated - newExternalLimit); 1551 newExternalLimit = oldExternalBytesAllocated; 1552 } 1553 if (newExternalLimit < hs->externalLimit) { 1554 hs->externalLimit = newExternalLimit; 1555 } 1556 1557 dvmUnlockHeap(); 1558} 1559 1560/* 1561 * Returns the number of externally-allocated bytes being tracked by 1562 * dvmTrackExternalAllocation/Free(). 1563 */ 1564size_t 1565dvmGetExternalBytesAllocated() 1566{ 1567 const HeapSource *hs = gHs; 1568 size_t ret; 1569 1570 /* gHs caches an entry in gDvm.gcHeap; we need to hold the 1571 * heap lock if we're going to look at it. We also need the 1572 * lock for the call to setIdealFootprint(). 1573 */ 1574 dvmLockHeap(); 1575 HS_BOILERPLATE(); 1576 ret = hs->externalBytesAllocated; 1577 dvmUnlockHeap(); 1578 1579 return ret; 1580} 1581