/mm/ |
H A D | truncate.c | 27 pgoff_t index, void *entry) 42 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) 178 (loff_t)page->index << PAGE_CACHE_SHIFT, 252 pgoff_t index; local 281 index = start; 282 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, 283 min(end - index, (pgoff_t)PAGEVEC_SIZE), 288 /* We rely upon deletion not changing page->index */ 289 index 26 clear_exceptional_entry(struct address_space *mapping, pgoff_t index, void *entry) argument 484 pgoff_t index = start; local 587 pgoff_t index; local 757 pgoff_t index; local [all...] |
H A D | cleancache.c | 218 * "Get" data from cleancache associated with the poolid/inode/index 251 key, page->index, page); 264 * inode and page index. Page must be locked. Note that a put_page 291 cleancache_ops->put_page(pool_id, key, page->index, page); 299 * page's inode and page index so that a subsequent "get" will fail. 324 key, page->index); 366 int index; local 372 index = fake_pool_id - FAKE_SHARED_FS_POOLID_OFFSET; 373 old_poolid = shared_fs_poolid_map[index]; 374 shared_fs_poolid_map[index] [all...] |
H A D | filemap.c | 116 unsigned long index; local 123 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); 145 index = page->index; 146 offset = index & RADIX_TREE_MAP_MASK; 149 radix_tree_tag_clear(&mapping->page_tree, index, tag); 198 /* Leave page->index set: truncation lookup relies upon it */ 334 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; local 344 while ((index <= end) && 345 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 891 page_cache_next_hole(struct address_space *mapping, pgoff_t index, unsigned long max_scan) argument 932 page_cache_prev_hole(struct address_space *mapping, pgoff_t index, unsigned long max_scan) argument 1291 find_get_pages_contig(struct address_space *mapping, pgoff_t index, unsigned int nr_pages, struct page **pages) argument 1367 find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages) argument 1473 pgoff_t index; local 2145 __read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *, struct page *), void *data, gfp_t gfp) argument 2178 do_read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *, struct page *), void *data, gfp_t gfp) argument 2231 read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *, struct page *), void *data) argument 2251 read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) argument 2436 grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index, unsigned flags) argument [all...] |
H A D | filemap_xip.c | 60 pgoff_t index, end_index; local 68 index = pos >> PAGE_CACHE_SHIFT; 84 if (index >= end_index) { 85 if (index > end_index) 96 error = mapping->a_ops->get_xip_mem(mapping, index, 0, 134 index += offset >> PAGE_CACHE_SHIFT; 337 unsigned long index; local 344 index = pos >> PAGE_CACHE_SHIFT; 349 status = a_ops->get_xip_mem(mapping, index, 0, 354 status = a_ops->get_xip_mem(mapping, index, 452 pgoff_t index = from >> PAGE_CACHE_SHIFT; local [all...] |
H A D | shmem.c | 121 struct shmem_inode_info *info, pgoff_t index); 122 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 125 static inline int shmem_getpage(struct inode *inode, pgoff_t index, argument 128 return shmem_getpage_gfp(inode, index, pagep, sgp, 259 pgoff_t index, void *expected, void *replacement) 266 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 284 pgoff_t index, swp_entry_t swap) 289 item = radix_tree_lookup(&mapping->page_tree, index); 299 pgoff_t index, void *expected) 308 page->index 258 shmem_radix_tree_replace(struct address_space *mapping, pgoff_t index, void *expected, void *replacement) argument 283 shmem_confirm_swap(struct address_space *mapping, pgoff_t index, swp_entry_t swap) argument 297 shmem_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t index, void *expected) argument 351 shmem_free_swap(struct address_space *mapping, pgoff_t index, void *radswap) argument 372 pgoff_t index = 0; local 411 pgoff_t index; local 622 pgoff_t index; local 755 pgoff_t index; local 879 shmem_swapin(swp_entry_t swap, gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) argument 900 shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) argument 927 shmem_swapin(swp_entry_t swap, gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) argument 933 shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) argument 964 shmem_replace_page(struct page **pagep, gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) argument 1037 shmem_getpage_gfp(struct inode *inode, pgoff_t index, struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) argument 1364 pgoff_t index; local 1484 pgoff_t index = pos >> PAGE_CACHE_SHIFT; local 1527 pgoff_t index; local 1639 pgoff_t index, end_index; local 1746 shmem_seek_hole_data(struct address_space *mapping, pgoff_t index, pgoff_t end, int whence) argument 2059 pgoff_t start, index, end; local 3447 shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) argument [all...] |
H A D | madvise.c | 144 unsigned long index; local 149 for (index = start; index != end; index += PAGE_SIZE) { 156 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); 166 vma, index); 192 pgoff_t index; local 197 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 199 page = find_get_entry(mapping, index);
|
H A D | readahead.c | 77 * pages have their ->index populated and are otherwise uninitialised. 93 page->index, GFP_KERNEL)) { 131 page->index, GFP_KERNEL)) { 187 page->index = page_offset; 554 pgoff_t index, unsigned long nr) 559 return force_page_cache_readahead(mapping, filp, index, nr); 553 do_readahead(struct address_space *mapping, struct file *filp, pgoff_t index, unsigned long nr) argument
|
H A D | page-writeback.c | 1786 * @start: starting page index 1787 * @end: ending page index (inclusive) 1850 pgoff_t index; local 1860 index = writeback_index; 1861 if (index == 0) 1867 index = wbc->range_start >> PAGE_CACHE_SHIFT; 1879 tag_pages_for_writeback(mapping, index, end); 1880 done_index = index; 1881 while (!done && (index <= end)) { 1884 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, ta [all...] |
H A D | vmstat.c | 635 * A fragmentation index only makes sense if an allocation of a requested 636 * size would fail. If that is true, the fragmentation index indicates 648 /* Fragmentation index only makes sense when a request would fail */ 661 /* Same as __fragmentation index but allocs contig_page_info on stack */ 1441 * Return an index indicating how much of the available free memory is 1466 int index; local 1474 index = unusable_free_index(order, &info); 1475 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); 1482 * Display unusable free space index 1526 int index; local [all...] |
H A D | slab_common.c | 611 * Conversion table for small slabs sizes / 8 to the index in the 654 int index; local 665 index = size_index[size_index_elem(size)]; 667 index = fls(size - 1); 671 return kmalloc_dma_caches[index]; 674 return kmalloc_caches[index]; 692 * handle the index determination for the smaller caches.
|
H A D | zsmalloc.c | 49 * page->index (union with page->freelist): offset of the first object 67 * page->mapping: class index and fullness group of the zspage 116 * Note that object index <obj_idx> is relative to system 195 unsigned int index; member in struct:size_class 224 * A zspage's class index and fullness group 358 * classes depending on its size. This function returns index of the 581 off = page->index; 634 * page->index stores offset of first object starting 636 * so we use first_page->index (aka ->freelist) to store 640 page->index [all...] |
H A D | rmap.c | 903 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); 936 page->index = linear_page_index(vma, address); 950 * The page's anon-rmap details (mapping and index) are guaranteed to 962 BUG_ON(page->index != linear_page_index(vma, address)); 973 * the anon_vma case: to serialize mapping,index checking after setting, 1281 * Consequently, given a particular page and its ->index, we cannot locate the 1386 if (page->index != linear_page_index(vma, address)) { 1387 pte_t ptfile = pgoff_to_pte(page->index); 1750 page->index = linear_page_index(vma, address);
|
H A D | debug.c | 86 pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", 88 page->mapping, page->index);
|
H A D | migrate.c | 199 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 763 newpage->index = page->index; 1812 new_page->index = page->index;
|
H A D | swapfile.c | 429 cluster_set_null(&percpu_cluster->index); 446 if (cluster_is_null(&cluster->index)) { 448 cluster->index = si->free_cluster_head; 449 cluster->next = cluster_next(&cluster->index) * 470 while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) * 479 cluster_set_null(&cluster->index); 1032 * corresponding to given index in swap_info (swap type). 2457 cluster_set_null(&cluster->index);
|
H A D | memory.c | 653 pgoff_t index; local 679 index = linear_page_index(vma, addr); 688 "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", 689 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); 1112 * Each page->index must be checked when 1116 (page->index < details->first_index || 1117 page->index > details->last_index)) 1127 addr) != page->index) { 1128 pte_t ptfile = pgoff_to_pte(page->index); 2003 vmf.pgoff = page->index; [all...] |
H A D | swap.c | 1052 * @start: The starting entry index 1103 * @start: The starting page index 1124 pgoff_t *index, int tag, unsigned nr_pages) 1126 pvec->nr = find_get_pages_tag(mapping, index, tag, 1123 pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) argument
|
H A D | huge_memory.c | 1736 page_tail->index = page->index + i; 1857 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
H A D | percpu.c | 43 * Chunks can be determined from the address using the index field 44 * in the page struct. The index field contains a pointer to the chunk. 229 page->index = (unsigned long)pcpu; 235 return (struct pcpu_chunk *)page->index; 267 * be integer variables and will be set to start and end page index of 323 * @i: index of the area in question
|
H A D | ksm.c | 485 * This helper is used for getting right index into array of tree roots. 487 * stable and unstable pages from all nodes with roots in index 0. Otherwise, 1875 page->index == linear_page_index(vma, address)) {
|
H A D | hugetlb.c | 969 pgoff_t index = page_index(page_head); local 980 return (index << compound_order(page_head)) + compound_idx; 1959 * index of a node device or _hstate == node id.
|
H A D | slab.c | 1366 static void __init set_up_node(struct kmem_cache *cachep, int index) argument 1371 cachep->node[node] = &init_kmem_cache_node[index + node]; 2204 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
|