Searched defs:mapping (Results 1 - 25 of 27) sorted by relevance

12

/mm/
H A Dfadvise.c31 struct address_space *mapping; local
47 mapping = f.file->f_mapping;
48 if (!mapping || len < 0) {
53 if (mapping->a_ops->get_xip_mem) {
76 bdi = mapping->backing_dev_info;
110 force_page_cache_readahead(mapping, f.file, start_index,
116 if (!bdi_write_congested(mapping->backing_dev_info))
117 __filemap_fdatawrite_range(mapping, offset, endbyte,
125 unsigned long count = invalidate_mapping_pages(mapping,
136 invalidate_mapping_pages(mapping, start_inde
[all...]
H A Dfremap.c70 * previously existing mapping.
146 struct address_space *mapping; local
206 /* Don't need a nonlinear mapping, exit success */
219 mapping = vma->vm_file->f_mapping;
225 if (mapping_cap_account_dirty(mapping)) {
241 mutex_lock(&mapping->i_mmap_mutex);
242 flush_dcache_mmap_lock(mapping);
244 vma_interval_tree_remove(vma, &mapping->i_mmap);
245 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
246 flush_dcache_mmap_unlock(mapping);
[all...]
H A Dmincore.c62 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) argument
68 * When tmpfs swaps out a page from a file, any process mapping that
70 * any other file mapping (ie. marked !present and faulted in with
74 if (shmem_mapping(mapping)) {
75 page = find_get_entry(mapping, pgoff);
85 page = find_get_page(mapping, pgoff);
87 page = find_get_page(mapping, pgoff);
H A Dpage_io.c140 struct address_space *mapping = swap_file->f_mapping; local
141 struct inode *inode = mapping->host;
266 struct address_space *mapping = swap_file->f_mapping; local
286 ret = mapping->a_ops->direct_IO(ITER_BVEC | WRITE,
352 struct address_space *mapping = swap_file->f_mapping; local
354 ret = mapping->a_ops->readpage(swap_file, page);
384 struct address_space *mapping = sis->swap_file->f_mapping; local
385 return mapping->a_ops->set_page_dirty(page);
H A Dworkingset.c207 * @mapping: address space the page was backing
210 * Returns a shadow entry to be stored in @mapping->page_tree in place
213 void *workingset_eviction(struct address_space *mapping, struct page *page) argument
276 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
308 struct address_space *mapping; local
315 * the shadow node LRU under the mapping->tree_lock and the
320 * We can then safely transition to the mapping->tree_lock to
326 mapping = node->private_data;
329 if (!spin_trylock(&mapping->tree_lock)) {
353 BUG_ON(!mapping
[all...]
H A Dcleancache.c241 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
246 if (cleancache_get_key(page->mapping->host, &key) < 0)
283 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
290 cleancache_get_key(page->mapping->host, &key) >= 0) {
305 void __cleancache_invalidate_page(struct address_space *mapping, argument
308 /* careful... page->mapping is NULL sometimes when this is called */
310 int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
322 if (cleancache_get_key(mapping->host, &key) >= 0) {
340 void __cleancache_invalidate_inode(struct address_space *mapping) argument
343 int fake_pool_id = mapping
[all...]
H A Dfilemap_xip.c46 * the mapping->a_ops->get_xip_mem() function for the actual low-level
52 do_xip_mapping_read(struct address_space *mapping, argument
59 struct inode *inode = mapping->host;
65 BUG_ON(!mapping->a_ops->get_xip_mem);
96 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
110 if (mapping_writably_mapped(mapping))
165 __xip_unmap (struct address_space * mapping, argument
185 mutex_lock(&mapping->i_mmap_mutex);
186 vma_interval_tree_foreach(vma, &mapping
225 struct address_space *mapping = file->f_mapping; local
327 struct address_space * mapping = filp->f_mapping; local
401 struct address_space *mapping = filp->f_mapping; local
450 xip_truncate_page(struct address_space *mapping, loff_t from) argument
[all...]
H A Dmadvise.c190 struct address_space *mapping)
199 page = find_get_entry(mapping, index);
188 force_shm_swapin_readahead(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct address_space *mapping) argument
H A Dmremap.c95 struct address_space *mapping = NULL; local
121 mapping = vma->vm_file->f_mapping;
122 mutex_lock(&mapping->i_mmap_mutex);
158 if (mapping)
159 mutex_unlock(&mapping->i_mmap_mutex);
352 /* Need to be careful about a growing mapping */
470 * Expand (or shrink) an existing mapping, potentially moving it at the
539 /* can we just expand the current mapping? */
H A Dreadahead.c28 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) argument
30 ra->ra_pages = mapping->backing_dev_info->ra_pages;
44 static void read_cache_pages_invalidate_page(struct address_space *mapping, argument
50 page->mapping = mapping;
52 page->mapping = NULL;
61 static void read_cache_pages_invalidate_pages(struct address_space *mapping, argument
69 read_cache_pages_invalidate_page(mapping, victim);
75 * @mapping: the address_space
83 int read_cache_pages(struct address_space *mapping, struc argument
111 read_pages(struct address_space *mapping, struct file *filp, struct list_head *pages, unsigned nr_pages) argument
152 __do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read, unsigned long lookahead_size) argument
210 force_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read) argument
328 count_history_pages(struct address_space *mapping, pgoff_t offset, unsigned long max) argument
343 try_context_readahead(struct address_space *mapping, struct file_ra_state *ra, pgoff_t offset, unsigned long req_size, unsigned long max) argument
378 ondemand_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, bool hit_readahead_marker, pgoff_t offset, unsigned long req_size) argument
489 page_cache_sync_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, pgoff_t offset, unsigned long req_size) argument
524 page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, struct page *page, pgoff_t offset, unsigned long req_size) argument
553 do_readahead(struct address_space *mapping, struct file *filp, pgoff_t index, unsigned long nr) argument
571 struct address_space *mapping = f.file->f_mapping; local
[all...]
H A Dtruncate.c26 static void clear_exceptional_entry(struct address_space *mapping, argument
33 if (shmem_mapping(mapping))
36 spin_lock_irq(&mapping->tree_lock);
42 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
47 mapping->nrshadows--;
56 * protected by mapping->tree_lock.
61 __radix_tree_delete_node(&mapping->page_tree, node);
63 spin_unlock_irq(&mapping->tree_lock);
86 invalidatepage = page->mapping->a_ops->invalidatepage;
112 struct address_space *mapping local
135 truncate_complete_page(struct address_space *mapping, struct page *page) argument
159 invalidate_complete_page(struct address_space *mapping, struct page *page) argument
174 truncate_inode_page(struct address_space *mapping, struct page *page) argument
187 generic_error_remove_page(struct address_space *mapping, struct page *page) argument
209 struct address_space *mapping = page_mapping(page); local
243 truncate_inode_pages_range(struct address_space *mapping, loff_t lstart, loff_t lend) argument
413 truncate_inode_pages(struct address_space *mapping, loff_t lstart) argument
428 truncate_inode_pages_final(struct address_space *mapping) argument
479 invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end) argument
536 invalidate_complete_page2(struct address_space *mapping, struct page *page) argument
562 do_launder_page(struct address_space *mapping, struct page *page) argument
582 invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end) argument
668 invalidate_inode_pages2(struct address_space *mapping) argument
691 struct address_space *mapping = inode->i_mapping; local
799 struct address_space *mapping = inode->i_mapping; local
[all...]
H A Dutil.c298 struct address_space *mapping = page->mapping; local
308 mapping = swap_address_space(entry);
309 } else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
310 mapping = NULL;
311 return mapping;
H A Dinternal.h25 extern int __do_page_cache_readahead(struct address_space *mapping,
33 struct address_space *mapping, struct file *filp)
35 return __do_page_cache_readahead(mapping, filp,
73 * lock while the pte (or pmd_trans_huge) is still mapping the page.
32 ra_submit(struct file_ra_state *ra, struct address_space *mapping, struct file *filp) argument
H A Dswap.c1051 * @mapping: The address_space to search
1057 * to @nr_entries pages and shadow entries in the mapping. All
1061 * The search returns a group of mapping-contiguous entries with
1069 struct address_space *mapping,
1073 pvec->nr = find_get_entries(mapping, start, nr_pages,
1102 * @mapping: The address_space to search
1107 * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
1110 * The search returns a group of mapping-contiguous pages with ascending
1115 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, argument
1118 pvec->nr = find_get_pages(mapping, star
1068 pagevec_lookup_entries(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages, pgoff_t *indices) argument
1123 pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) argument
[all...]
H A Dfilemap.c68 * ->mapping->tree_lock
76 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
86 * ->mapping->tree_lock (__sync_single_inode)
112 static void page_cache_tree_delete(struct address_space *mapping, argument
123 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
126 mapping->nrshadows++;
135 mapping->nrpages--;
139 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK;
149 radix_tree_tag_clear(&mapping->page_tree, index, tag);
158 if (__radix_tree_delete_node(&mapping
182 struct address_space *mapping = page->mapping; local
228 struct address_space *mapping = page->mapping; local
244 filemap_check_errors(struct address_space *mapping) argument
272 __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode) argument
290 __filemap_fdatawrite(struct address_space *mapping, int sync_mode) argument
296 filemap_fdatawrite(struct address_space *mapping) argument
302 filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end) argument
316 filemap_flush(struct address_space *mapping) argument
331 filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, loff_t end_byte) argument
380 filemap_fdatawait(struct address_space *mapping) argument
391 filemap_write_and_wait(struct address_space *mapping) argument
426 filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend) argument
473 struct address_space *mapping = old->mapping; local
503 page_cache_tree_insert(struct address_space *mapping, struct page *page, void **shadowp) argument
545 __add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask, void **shadowp) argument
606 add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) argument
614 add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) argument
891 page_cache_next_hole(struct address_space *mapping, pgoff_t index, unsigned long max_scan) argument
932 page_cache_prev_hole(struct address_space *mapping, pgoff_t index, unsigned long max_scan) argument
965 find_get_entry(struct address_space *mapping, pgoff_t offset) argument
1024 find_lock_entry(struct address_space *mapping, pgoff_t offset) argument
1070 pagecache_get_page(struct address_space *mapping, pgoff_t offset, int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask) argument
1161 find_get_entries(struct address_space *mapping, pgoff_t start, unsigned int nr_entries, struct page **entries, pgoff_t *indices) argument
1224 find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages) argument
1291 find_get_pages_contig(struct address_space *mapping, pgoff_t index, unsigned int nr_pages, struct page **pages) argument
1367 find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages) argument
1470 struct address_space *mapping = filp->f_mapping; local
1704 struct address_space *mapping = file->f_mapping; local
1755 struct address_space *mapping = file->f_mapping; local
1789 struct address_space *mapping = file->f_mapping; local
1834 struct address_space *mapping = file->f_mapping; local
1874 struct address_space *mapping = file->f_mapping; local
1996 struct address_space *mapping = file->f_mapping; local
2101 struct address_space *mapping = file->f_mapping; local
2145 __read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *, struct page *), void *data, gfp_t gfp) argument
2178 do_read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *, struct page *), void *data, gfp_t gfp) argument
2231 read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *, struct page *), void *data) argument
2251 read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) argument
2343 pagecache_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) argument
2354 pagecache_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument
2368 struct address_space *mapping = file->f_mapping; local
2436 grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index, unsigned flags) argument
2458 struct address_space *mapping = file->f_mapping; local
2564 struct address_space * mapping = file->f_mapping; local
2697 struct address_space * const mapping = page->mapping; local
[all...]
H A Dmemory-failure.c81 struct address_space *mapping; local
94 mapping = page_mapping(p);
95 if (mapping == NULL || mapping->host == NULL)
98 dev = mapping->host->i_sb->s_dev;
467 struct address_space *mapping = page->mapping; local
469 mutex_lock(&mapping->i_mmap_mutex);
477 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
491 mutex_unlock(&mapping
586 struct address_space *mapping; local
649 struct address_space *mapping = page_mapping(p); local
891 struct address_space *mapping; local
[all...]
H A Dmigrate.c195 struct address_space *mapping, void *arg)
203 &mapping->i_mmap_nonlinear, shared.nonlinear) {
333 * Replace the page in the mapping.
336 * 1 for anonymous pages without a mapping
337 * 2 for pages with a mapping
338 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
340 int migrate_page_move_mapping(struct address_space *mapping, argument
348 if (!mapping) {
349 /* Anonymous page without mapping */
355 spin_lock_irq(&mapping
194 remove_linear_migration_ptes_from_nonlinear(struct page *page, struct address_space *mapping, void *arg) argument
429 migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page) argument
594 migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) argument
618 buffer_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) argument
675 writeout(struct address_space *mapping, struct page *page) argument
716 fallback_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) argument
751 struct address_space *mapping; local
[all...]
H A Drmap.c7 * Simple, low overhead reverse mapping scheme.
26 * mapping->i_mmap_mutex
32 * mapping->private_lock (in __set_page_dirty_buffers)
36 * mapping->tree_lock (widely used, in set_page_dirty,
40 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon)
139 * This makes sure the memory mapping described by 'vma' has
144 * not we either need to find an adjacent mapping that we
403 * that the anon_vma pointer from page->mapping is valid if there is a
412 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
456 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
863 struct address_space *mapping; local
1409 try_to_unmap_nonlinear(struct page *page, struct address_space *mapping, void *arg) argument
1678 struct address_space *mapping = page->mapping; local
[all...]
H A Dmemory.c652 struct address_space *mapping; local
678 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
688 "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
689 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
715 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
716 * special mapping (even if there are underlying and valid "struct pages").
722 * mapping will always honor the rule
730 * as the vma is not a COW mapping; in that case, we know that all ptes are
857 * If it's a COW mapping, write protect it both
866 * If it's a shared mapping, mar
2170 struct address_space *mapping = dirty_page->mapping; local
2371 unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) argument
2976 struct address_space *mapping; local
[all...]
H A Dmmap.c64 /* description of effects of mapping type and prot in current implementation.
139 * mapping. 0 means there is enough memory for the allocation to
238 struct file *file, struct address_space *mapping)
243 mapping_unmap_writable(mapping);
245 flush_dcache_mmap_lock(mapping);
249 vma_interval_tree_remove(vma, &mapping->i_mmap);
250 flush_dcache_mmap_unlock(mapping);
262 struct address_space *mapping = file->f_mapping; local
263 mutex_lock(&mapping->i_mmap_mutex);
264 __remove_shared_vm_struct(vma, file, mapping);
237 __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) argument
644 struct address_space *mapping = file->f_mapping; local
673 struct address_space *mapping = NULL; local
734 struct address_space *mapping = NULL; local
3081 vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) argument
3184 vm_unlock_mapping(struct address_space *mapping) argument
[all...]
H A Dnommu.c218 * @vma: memory mapping
709 struct address_space *mapping; local
721 /* add the VMA to the mapping */
723 mapping = vma->vm_file->f_mapping;
725 mutex_lock(&mapping->i_mmap_mutex);
726 flush_dcache_mmap_lock(mapping);
727 vma_interval_tree_insert(vma, &mapping->i_mmap);
728 flush_dcache_mmap_unlock(mapping);
729 mutex_unlock(&mapping->i_mmap_mutex);
777 struct address_space *mapping; local
950 struct address_space *mapping; local
1883 unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) argument
[all...]
H A Dpage-writeback.c1338 static void balance_dirty_pages(struct address_space *mapping, argument
1354 struct backing_dev_info *bdi = mapping->backing_dev_info;
1547 struct address_space *mapping = page_mapping(page); local
1549 if (mapping)
1550 balance_dirty_pages_ratelimited(mapping);
1574 * @mapping: address_space which was dirtied
1585 void balance_dirty_pages_ratelimited(struct address_space *mapping) argument
1587 struct backing_dev_info *bdi = mapping->backing_dev_info;
1627 balance_dirty_pages(mapping, current->nr_dirtied);
1785 * @mapping
1800 tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end) argument
1841 write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data) argument
2004 struct address_space *mapping = data; local
2018 generic_writepages(struct address_space *mapping, struct writeback_control *wbc) argument
2036 do_writepages(struct address_space *mapping, struct writeback_control *wbc) argument
2060 struct address_space *mapping = page->mapping; local
2102 account_page_dirtied(struct page *page, struct address_space *mapping) argument
2136 struct address_space *mapping = page_mapping(page); local
2172 struct address_space *mapping = page->mapping; local
2207 struct address_space *mapping = page_mapping(page); local
2273 struct address_space *mapping = page_mapping(page); local
2329 struct address_space *mapping = page_mapping(page); local
2366 struct address_space *mapping = page_mapping(page); local
2412 mapping_tagged(struct address_space *mapping, int tag) argument
2428 struct address_space *mapping = page_mapping(page); local
[all...]
H A Dswapfile.c1633 struct address_space *mapping = swap_file->f_mapping; local
1636 mapping->a_ops->swap_deactivate(swap_file);
1720 struct address_space *mapping = swap_file->f_mapping; local
1721 struct inode *inode = mapping->host;
1730 if (mapping->a_ops->swap_activate) {
1731 ret = mapping->a_ops->swap_activate(sis, swap_file, span);
1809 struct address_space *mapping; local
1829 mapping = victim->f_mapping;
1833 if (p->swap_file->f_mapping == mapping) {
1925 inode = mapping
2350 struct address_space *mapping; local
[all...]
H A Dshmem.c172 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
258 static int shmem_radix_tree_replace(struct address_space *mapping, argument
266 pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
269 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
283 static bool shmem_confirm_swap(struct address_space *mapping, argument
289 item = radix_tree_lookup(&mapping->page_tree, index);
298 struct address_space *mapping,
307 page->mapping = mapping;
310 spin_lock_irq(&mapping
297 shmem_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t index, void *expected) argument
334 struct address_space *mapping = page->mapping; local
351 shmem_free_swap(struct address_space *mapping, pgoff_t index, void *radswap) argument
368 shmem_unlock_mapping(struct address_space *mapping) argument
402 struct address_space *mapping = inode->i_mapping; local
620 struct address_space *mapping = info->vfs_inode.i_mapping; local
752 struct address_space *mapping; local
1040 struct address_space *mapping = inode->i_mapping; local
1462 shmem_mapping(struct address_space *mapping) argument
1478 shmem_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) argument
1498 shmem_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument
1526 struct address_space *mapping = inode->i_mapping; local
1633 struct address_space *mapping = in->f_mapping; local
1746 shmem_seek_hole_data(struct address_space *mapping, pgoff_t index, pgoff_t end, int whence) argument
1795 struct address_space *mapping = file->f_mapping; local
1838 shmem_tag_pins(struct address_space *mapping) argument
1880 shmem_wait_for_pins(struct address_space *mapping) argument
2068 struct address_space *mapping = file->f_mapping; local
3300 shmem_unlock_mapping(struct address_space *mapping) argument
3447 shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) argument
[all...]
H A Dvmscan.c474 * The tricky part is that after writepage we cannot touch the mapping: nothing
476 * that page is locked, the mapping is pinned.
481 static void handle_write_error(struct address_space *mapping, argument
485 if (page_mapping(page) == mapping)
486 mapping_set_error(mapping, error);
506 static pageout_t pageout(struct page *page, struct address_space *mapping, argument
527 if (!mapping) {
530 * page->mapping == NULL while being dirty with clean buffers.
541 if (mapping->a_ops->writepage == NULL)
543 if (!may_write_to_queue(mapping
581 __remove_mapping(struct address_space *mapping, struct page *page, bool reclaimed) argument
665 remove_mapping(struct address_space *mapping, struct page *page) argument
817 struct address_space *mapping; local
869 struct address_space *mapping; local
1261 struct address_space *mapping; local
[all...]

Completed in 237 milliseconds

12