Lines Matching defs:Page

56 // Page::kMaxHeapObjectSize, so that they do not have to move during
98 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
107 ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
110 ASSERT((Page::kObjectStartOffset <= offset) \
111 && (offset <= Page::kPageSize))
646 // Page* p = Page::FromAddress(addr); or
647 // Page* p = Page::FromAllocationTop(top);
648 class Page : public MemoryChunk {
654 INLINE(static Page* FromAddress(Address a)) {
655 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
662 INLINE(static Page* FromAllocationTop(Address top)) {
663 Page* p = FromAddress(top - kPointerSize);
668 inline Page* next_page();
669 inline Page* prev_page();
670 inline void set_next_page(Page* page);
671 inline void set_prev_page(Page* page);
692 // Page size in bytes. This must be a multiple of the OS page size.
701 // Page size mask.
706 static inline Page* Initialize(Heap* heap,
731 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
852 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
857 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
911 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
915 Page* page = Page::FromAddress(addr);
928 static const int kSize = Page::kPageSize / kRegionSize;
930 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
953 Page* AllocatePage(PagedSpace* owner, Executability executable);
978 return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
1079 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1118 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1168 inline Page* next();
1172 Page* prev_page_; // Previous page returned.
1175 Page* next_page_;
1181 // Page::next_page() call.
1195 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
1392 void CountFreeListItems(Page* p, SizeStats* sizes);
1394 intptr_t EvictFreeListItems(Page* p);
1399 static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
1526 Page::FromAddress(top) == Page::FromAddress(limit - 1));
1540 void ReleasePage(Page* page);
1546 Page* anchor() { return &anchor_; }
1573 static bool ShouldBeSweptLazily(Page* p) {
1575 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
1579 void SetPagesToSweep(Page* first) {
1589 void IncreaseUnsweptFreeBytes(Page* p) {
1594 void DecreaseUnsweptFreeBytes(Page* p) {
1605 Page* FirstPage() { return anchor_.next_page(); }
1606 Page* LastPage() { return anchor_.prev_page(); }
1608 void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
1634 Page anchor_;
1652 Page* first_unswept_page_;
1728 static const int kAreaSize = Page::kNonCodeObjectAreaSize;
1753 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
1758 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
1769 ~Page::kPageAlignmentMask);
2113 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2329 virtual Address PageAllocationLimit(Page* page) {
2359 page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
2363 virtual Address PageAllocationLimit(Page* page) {
2415 static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
2458 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2460 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2461 // A large object always starts at Page::kObjectStartOffset to a page.
2476 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2477 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;