Lines Matching refs:page

36  * are encoded in its (first)page->mapping
43 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
46 static int is_first_page(struct page *page)
48 return test_bit(PG_private, &page->flags);
51 static int is_last_page(struct page *page)
53 return test_bit(PG_private_2, &page->flags);
56 static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
60 BUG_ON(!is_first_page(page));
62 m = (unsigned long)page->mapping;
67 static void set_zspage_mapping(struct page *page, unsigned int class_idx,
71 BUG_ON(!is_first_page(page));
75 page->mapping = (struct address_space *)m;
89 static enum fullness_group get_fullness_group(struct page *page)
93 BUG_ON(!is_first_page(page));
95 inuse = page->inuse;
96 max_objects = page->objects;
110 static void insert_zspage(struct page *page, struct size_class *class,
113 struct page **head;
115 BUG_ON(!is_first_page(page));
122 list_add_tail(&page->lru, &(*head)->lru);
124 *head = page;
127 static void remove_zspage(struct page *page, struct size_class *class,
130 struct page **head;
132 BUG_ON(!is_first_page(page));
141 else if (*head == page)
142 *head = (struct page *)list_entry((*head)->lru.next,
143 struct page, lru);
145 list_del_init(&page->lru);
149 struct page *page)
155 BUG_ON(!is_first_page(page));
157 get_zspage_mapping(page, &class_idx, &currfg);
158 newfg = get_fullness_group(page);
163 remove_zspage(page, class, currfg);
164 insert_zspage(page, class, newfg);
165 set_zspage_mapping(page, class_idx, newfg);
208 * linked together using fields in struct page. This function finds
209 * the first/head page, given any component page of a zspage.
211 static struct page *get_first_page(struct page *page)
213 if (is_first_page(page))
214 return page;
216 return page->first_page;
219 static struct page *get_next_page(struct page *page)
221 struct page *next;
223 if (is_last_page(page))
225 else if (is_first_page(page))
226 next = (struct page *)page->private;
228 next = list_entry(page->lru.next, struct page, lru);
233 /* Encode <page, obj_idx> as a single handle value */
234 static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
238 if (!page) {
243 handle = page_to_pfn(page) << OBJ_INDEX_BITS;
249 /* Decode <page, obj_idx> pair from the given object handle */
250 static void obj_handle_to_location(void *handle, struct page **page,
255 *page = pfn_to_page(hval >> OBJ_INDEX_BITS);
259 static unsigned long obj_idx_to_offset(struct page *page,
264 if (!is_first_page(page))
265 off = page->index;
270 static void reset_page(struct page *page)
272 clear_bit(PG_private, &page->flags);
273 clear_bit(PG_private_2, &page->flags);
274 set_page_private(page, 0);
275 page->mapping = NULL;
276 page->freelist = NULL;
277 reset_page_mapcount(page);
280 static void free_zspage(struct page *first_page)
282 struct page *nextp, *tmp, *head_extra;
287 head_extra = (struct page *)page_private(first_page);
292 /* zspage with only 1 system page */
306 static void init_zspage(struct page *first_page, struct size_class *class)
309 struct page *page = first_page;
312 while (page) {
313 struct page *next_page;
318 * page->index stores offset of first object starting
319 * in the page. For the first page, this is always 0,
323 if (page != first_page)
324 page->index = off;
326 link = (struct link_free *)kmap_atomic(page) +
333 link->next = obj_location_to_handle(page, i);
340 * page, which must point to the first object on the next
341 * page (if present)
343 next_page = get_next_page(page);
346 page = next_page;
354 static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
357 struct page *first_page = NULL;
361 * 1. first page->private = first sub-page
362 * 2. all sub-pages are linked together using page->lru
363 * 3. each sub-page is linked to the first page using page->first_page
366 * page->lru. Also, we set PG_private to identify the first page
367 * (i.e. no other sub-page has this flag set) and PG_private_2 to
368 * identify the last page.
372 struct page *page, *prev_page;
374 page = alloc_page(flags);
375 if (!page)
378 INIT_LIST_HEAD(&page->lru);
379 if (i == 0) { /* first page */
380 set_bit(PG_private, &page->flags);
381 set_page_private(page, 0);
382 first_page = page;
386 first_page->private = (unsigned long)page;
388 page->first_page = first_page;
390 list_add(&page->lru, &prev_page->lru);
391 if (i == class->zspage_order - 1) /* last page */
392 set_bit(PG_private_2, &page->flags);
394 prev_page = page;
414 static struct page *find_get_zspage(struct size_class *class)
417 struct page *page;
420 page = class->fullness_list[i];
421 if (page)
425 return page;
570 * @page: page no. that holds the object
571 * @offset: location of object within page
573 * On success, <page, offset> identifies block allocated
574 * and 0 is returned. On failure, <page, offset> is set to
586 struct page *first_page, *m_page;
632 struct page *first_page, *f_page;
673 struct page *page;
683 obj_handle_to_location(handle, &page, &obj_idx);
684 get_zspage_mapping(get_first_page(page), &class_idx, &fg);
686 off = obj_idx_to_offset(page, obj_idx, class->size);
690 /* this object is contained entirely within a page */
691 area->vm_addr = kmap_atomic(page);
694 struct page *nextp;
696 nextp = get_next_page(page);
700 set_pte(area->vm_ptes[0], mk_pte(page, PAGE_KERNEL));
713 struct page *page;
723 obj_handle_to_location(handle, &page, &obj_idx);
724 get_zspage_mapping(get_first_page(page), &class_idx, &fg);
726 off = obj_idx_to_offset(page, obj_idx, class->size);