Lines Matching defs:Page

37 // Page::kMaxHeapObjectSize, so that they do not have to move during
79 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
85 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
88 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
701 // Page* p = Page::FromAddress(addr); or
702 // Page* p = Page::FromAllocationTop(top);
703 class Page : public MemoryChunk {
709 INLINE(static Page* FromAddress(Address a)) {
710 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
717 INLINE(static Page* FromAllocationTop(Address top)) {
718 Page* p = FromAddress(top - kPointerSize);
723 inline Page* next_page();
724 inline Page* prev_page();
725 inline void set_next_page(Page* page);
726 inline void set_prev_page(Page* page);
747 // Page size in bytes. This must be a multiple of the OS page size.
756 // Page size mask.
761 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
793 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
910 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
915 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
965 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
969 Page* page = Page::FromAddress(addr);
982 static const int kSize = Page::kPageSize / kRegionSize;
984 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1007 Page* AllocatePage(intptr_t size, PagedSpace* owner,
1032 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
1145 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1189 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1234 inline Page* next();
1238 Page* prev_page_; // Previous page returned.
1241 Page* next_page_;
1247 // Page::next_page() call.
1286 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
1447 intptr_t EvictFreeListItemsInList(Page* p);
1448 bool ContainsPageFreeListItemsInList(Page* p);
1569 intptr_t EvictFreeListItems(Page* p);
1570 bool ContainsPageFreeListItems(Page* p);
1580 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
1707 void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
1778 Page::FromAddress(top) == Page::FromAddress(limit - 1));
1798 void ReleasePage(Page* page);
1801 Page* anchor() { return &anchor_; }
1827 static bool ShouldBeSweptBySweeperThreads(Page* p) {
1829 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
1834 void IncreaseUnsweptFreeBytes(Page* p) {
1841 void DecreaseUnsweptFreeBytes(Page* p) {
1854 void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
1856 Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
1858 Page* FirstPage() { return anchor_.next_page(); }
1859 Page* LastPage() { return anchor_.prev_page(); }
1891 Page anchor_;
1906 Page* end_of_unswept_pages_;
1987 static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
2006 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
2011 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2020 ~Page::kPageAlignmentMask);
2369 return (to_space_.TotalCapacity() / Page::kPageSize) *
2639 static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
2704 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2706 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2707 // A large object always starts at Page::kObjectStartOffset to a page.
2722 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2723 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;