Lines Matching refs:page

40 // Two partition pages are used as guard / metadata page so make sure the super
41 // page size is bigger.
45 // of metadata in the middle of a guard partition page.
66 // allocator, and the current values of partition page size and constants.
86 // Leaving a page unfaulted is not free; the page will occupy an empty page table entry.
108 // We mark the seed page as free to make sure it is skipped by our
109 // logic to find a new active page.
240 PartitionPage* page = bucket->activePagesHead;
241 while (page) {
242 if (page->numAllocatedSlots)
244 page = page->nextPage;
256 // to free all our super pages. We first collect the super page pointers
334 // In this case, we can still hand out pages from the current super page
341 // Need a new super page.
356 // Make the first partition page in the super page a guard page, but leave a
358 // This is where we put page metadata and also a tiny amount of extent
362 // Also make the last partition page a guard page.
371 // We allocated a new super page so update super page metadata.
400 static ALWAYS_INLINE void partitionUnusePage(PartitionRootBase* root, PartitionPage* page)
402 ASSERT(page->bucket->numSystemPagesPerSlotSpan);
403 void* addr = partitionPageToPointer(page);
404 partitionDecommitSystemPages(root, addr, page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize);
417 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page, PartitionBucket* bucket)
419 ASSERT(page != &PartitionRootGeneric::gSeedPage);
420 page->numAllocatedSlots = 0;
421 page->numUnprovisionedSlots = partitionBucketSlots(bucket);
422 ASSERT(page->numUnprovisionedSlots);
423 page->bucket = bucket;
424 page->nextPage = 0;
426 page->freelistHead = 0;
427 page->pageOffset = 0;
428 page->freeCacheIndex = -1;
431 char* pageCharPtr = reinterpret_cast<char*>(page);
439 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page)
441 ASSERT(page != &PartitionRootGeneric::gSeedPage);
442 size_t numSlots = page->numUnprovisionedSlots;
444 PartitionBucket* bucket = page->bucket;
447 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket));
449 ASSERT(!page->freelistHead);
450 ASSERT(page->numAllocatedSlots >= 0);
453 char* base = reinterpret_cast<char*>(partitionPageToPointer(page));
454 char* returnObject = base + (size * page->numAllocatedSlots);
458 // page containing the "end" of the returned slot, and then allow freelist
459 // pointers to be written up to the end of that page.
461 char* slotsLimit = returnObject + (size * page->numUnprovisionedSlots);
478 // We do not neccessarily create any new freelist entries, because we cross sub page boundaries frequently for large bucket sizes.
481 page->numUnprovisionedSlots = numSlots;
482 page->numAllocatedSlots++;
487 page->freelistHead = entry;
496 page->freelistHead = 0;
501 // This helper function scans the active page list for a suitable new active
502 // page, starting at the passed in page.
503 // When it finds a suitable new active page (one that has free slots), it is
504 // set as the new active page and true is returned. If there is no suitable new
505 // active page, false is returned and the current active page is set to null.
507 // Freed pages are swept on to the free page list and full pages are unlinked
509 static ALWAYS_INLINE bool partitionSetNewActivePage(PartitionPage* page)
511 if (page == &PartitionRootBase::gSeedPage) {
512 ASSERT(!page->nextPage);
517 PartitionBucket* bucket = page->bucket;
519 for (; page; page = nextPage) {
520 nextPage = page->nextPage;
521 ASSERT(page->bucket == bucket);
522 ASSERT(page != bucket->freePagesHead);
523 ASSERT(!bucket->freePagesHead || page != bucket->freePagesHead->nextPage);
527 if (LIKELY(page->freelistHead != 0) || LIKELY(page->numUnprovisionedSlots)) {
528 bucket->activePagesHead = page;
532 ASSERT(page->numAllocatedSlots >= 0);
533 if (LIKELY(page->numAllocatedSlots == 0)) {
534 ASSERT(page->freeCacheIndex == -1);
535 // We hit a free page, so shepherd it on to the free page list.
536 page->nextPage = bucket->freePagesHead;
537 bucket->freePagesHead = page;
539 // If we get here, we found a full page. Skip over it too, and also
541 // free'ing can tell, and move it back into the active page list.
542 ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(bucket)));
543 page->numAllocatedSlots = -page->numAllocatedSlots;
549 page->nextPage = 0;
561 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(PartitionPage* page)
563 ASSERT(partitionBucketIsDirectMapped(page->bucket));
564 return reinterpret_cast<PartitionDirectMapExtent*>(reinterpret_cast<char*>(page) + 2 * kPageMetadataSize);
571 // Because we need to fake looking like a super page, We need to allocate
573 // - The first few system pages are the partition page in which the super
574 // page metadata is stored. We fault just one system page out of a partition
575 // page sized clump.
576 // - We add a trailing guard page.
585 // and page table overhead.
588 // TODO: consider pre-populating page tables (e.g. MAP_POPULATE on Linux,
600 // We could get it down to three by using read-only for the metadata page,
601 // or perhaps two by leaving out the trailing guard page on 64-bit.
608 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ret);
609 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(reinterpret_cast<char*>(page) + kPageMetadataSize);
610 page->freelistHead = 0;
611 page->nextPage = 0;
612 page->bucket = bucket;
613 page->numAllocatedSlots = 1;
614 page->numUnprovisionedSlots = 0;
615 page->pageOffset = 0;
616 page->freeCacheIndex = 0;
624 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page);
630 static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page)
632 size_t unmapSize = partitionPageToDirectMapExtent(page)->mapSize;
634 // Add on the size of the trailing guard page and preceeding partition
635 // page.
640 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
641 // Account for the mapping starting a partition page before the actual
669 // First, look for a usable page in the existing active pages list.
670 // Change active page, accepting the current page as a candidate.
695 // Third. If we get here, we need a brand new page.
702 // Skip the alignment check because it depends on page->bucket, which is not yet set.
711 static ALWAYS_INLINE void partitionFreePage(PartitionRootBase* root, PartitionPage* page)
713 ASSERT(page->freelistHead);
714 ASSERT(!page->numAllocatedSlots);
715 partitionUnusePage(root, page);
716 // We actually leave the freed page in the active list. We'll sweep it on
717 // to the free page list when we next walk the active page list. Pulling
718 // this trick enables us to use a singly-linked page list for all cases,
719 // which is critical in keeping the page metadata structure down to 32
721 page->freelistHead = 0;
722 page->numUnprovisionedSlots = 0;
725 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page)
727 PartitionRootBase* root = partitionPageToRoot(page);
729 // If the page is already registered as empty, give it another life.
730 if (page->freeCacheIndex != -1) {
731 ASSERT(page->freeCacheIndex >= 0);
732 ASSERT(static_cast<unsigned>(page->freeCacheIndex) < kMaxFreeableSpans);
733 ASSERT(root->globalEmptyPageRing[page->freeCacheIndex] == page);
734 root->globalEmptyPageRing[page->freeCacheIndex] = 0;
739 // The page might well have been re-activated, filled up, etc. before we get
747 // The page is still empty, and not freed, so _really_ free it.
757 root->globalEmptyPageRing[currentIndex] = page;
758 page->freeCacheIndex = currentIndex;
765 void partitionFreeSlowPath(PartitionPage* page)
767 PartitionBucket* bucket = page->bucket;
768 ASSERT(page != &PartitionRootGeneric::gSeedPage);
770 if (LIKELY(page->numAllocatedSlots == 0)) {
773 partitionDirectUnmap(page);
776 // If it's the current active page, attempt to change it. We'd prefer to leave
777 // the page empty as a gentle force towards defragmentation.
778 if (LIKELY(page == bucket->activePagesHead) && page->nextPage) {
779 if (partitionSetNewActivePage(page->nextPage)) {
780 ASSERT(bucket->activePagesHead != page);
781 // Link the empty page back in after the new current page, to
783 // TODO: consider walking the list to link the empty page after
786 page->nextPage = currentPage->nextPage;
787 currentPage->nextPage = page;
789 bucket->activePagesHead = page;
790 page->nextPage = 0;
793 partitionRegisterEmptyPage(page);
795 // Ensure that the page is full. That's the only valid case if we
797 ASSERT(page->numAllocatedSlots < 0);
800 RELEASE_ASSERT(page->numAllocatedSlots != -1);
801 page->numAllocatedSlots = -page->numAllocatedSlots - 2;
802 ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(bucket) - 1));
803 // Fully used page became partially used. It must be put back on the
804 // non-full page list. Also make it the current page to increase the
805 // chances of it being filled up again. The old current page will be
806 // the next page.
807 page->nextPage = bucket->activePagesHead;
808 bucket->activePagesHead = page;
810 // Special case: for a partition page with just a single slot, it may
812 if (UNLIKELY(page->numAllocatedSlots == 0))
813 partitionFreeSlowPath(page);
817 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, PartitionPage* page, size_t newSize)
819 ASSERT(partitionBucketIsDirectMapped(page->bucket));
830 size_t currentSize = page->bucket->slotSize;
834 char* charPtr = static_cast<char*>(partitionPageToPointer(page));
837 size_t mapSize = partitionPageToDirectMapExtent(page)->mapSize;
848 } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) {
869 page->bucket->slotSize = newSize;
889 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
891 if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) {
895 if (partitionReallocDirectMappedInPlace(root, page, newSize))
953 const PartitionPage* page = bucket.activePagesHead;
954 while (page) {
955 ASSERT(page != &PartitionRootGeneric::gSeedPage);
956 // A page may be on the active list but freed and not yet swept.
957 if (!page->freelistHead && !page->numUnprovisionedSlots && !page->numAllocatedSlots) {
961 numActiveBytes += (page->numAllocatedSlots * bucketSlotSize);
962 size_t pageBytesResident = (bucketNumSlots - page->numUnprovisionedSlots) * bucketSlotSize;
963 // Round up to system page size.
966 if (!page->numAllocatedSlots)
969 page = page->nextPage;