/mm/ |
H A D | readahead.c | 153 pgoff_t offset, unsigned long nr_to_read, 173 pgoff_t page_offset = offset + page_idx; 211 pgoff_t offset, unsigned long nr_to_read) 225 offset, this_chunk, 0); 229 offset += this_chunk; 323 * Count contiguously cached pages from @offset-1 to @offset-@max, 329 pgoff_t offset, unsigned long max) 334 head = page_cache_prev_hole(mapping, offset - 1, max); 337 return offset 152 __do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read, unsigned long lookahead_size) argument 210 force_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read) argument 328 count_history_pages(struct address_space *mapping, pgoff_t offset, unsigned long max) argument 343 try_context_readahead(struct address_space *mapping, struct file_ra_state *ra, pgoff_t offset, unsigned long req_size, unsigned long max) argument 378 ondemand_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, bool hit_readahead_marker, pgoff_t offset, unsigned long req_size) argument 489 page_cache_sync_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, pgoff_t offset, unsigned long req_size) argument 524 page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, struct page *page, pgoff_t offset, unsigned long req_size) argument [all...] |
H A D | frontswap.c | 192 pgoff_t offset) 197 ret = test_bit(offset, sis->frontswap_map); 203 pgoff_t offset) 205 clear_bit(offset, sis->frontswap_map); 211 * swaptype and offset. Page must be locked and in the swap cache. 213 * offset, the frontswap implementation may either overwrite the data and 222 pgoff_t offset = swp_offset(entry); local 233 if (__frontswap_test(sis, offset)) 235 ret = frontswap_ops->store(type, offset, page); 237 set_bit(offset, si 191 __frontswap_test(struct swap_info_struct *sis, pgoff_t offset) argument 202 __frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) argument 270 pgoff_t offset = swp_offset(entry); local 294 __frontswap_invalidate_page(unsigned type, pgoff_t offset) argument [all...] |
H A D | fadvise.c | 28 SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) 70 endbyte = offset + len; 98 start_index = offset >> PAGE_CACHE_SHIFT; 117 __filemap_fdatawrite_range(mapping, offset, endbyte, 121 start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; 151 SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice) 153 return sys_fadvise64_64(fd, offset, len, advice);
|
H A D | dmapool.c | 61 unsigned int offset; member in struct:dma_page 208 unsigned int offset = 0; local 212 unsigned int next = offset + pool->size; 217 *(int *)(page->vaddr + offset) = next; 218 offset = next; 219 } while (offset < pool->allocation); 237 page->offset = 0; 323 size_t offset; local 330 if (page->offset < pool->allocation) 346 offset 408 unsigned int offset; local [all...] |
H A D | early_ioremap.c | 99 unsigned long offset; local 128 offset = phys_addr & ~PAGE_MASK; 153 __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]); 155 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); 162 unsigned long offset; local 191 offset = virt_addr & ~PAGE_MASK; 192 nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
|
H A D | zswap.c | 175 * offset - the swap offset for the entry. Index into the red-black tree. 182 pgoff_t offset; member in struct:zswap_entry 239 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset) argument 246 if (entry->offset > offset) 248 else if (entry->offset < offset) 257 * In the case that a entry with the same offset is found, a pointer to 269 if (myentry->offset > entr 325 zswap_entry_find_get(struct rb_root *root, pgoff_t offset) argument 538 pgoff_t offset; local 640 zswap_frontswap_store(unsigned type, pgoff_t offset, struct page *page) argument 739 zswap_frontswap_load(unsigned type, pgoff_t offset, struct page *page) argument 777 zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset) argument [all...] |
H A D | swapfile.c | 57 static const char Bad_offset[] = "Bad swap offset entry "; 58 static const char Unused_offset[] = "Unused swap offset entry "; 96 __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) argument 98 swp_entry_t entry = swp_entry(si->type, offset); 172 pgoff_t offset = start_page - se->start_page; local 173 sector_t start_block = se->start_block + offset; 174 sector_t nr_blocks = se->nr_pages - offset; 415 unsigned long offset) 420 offset /= SWAPFILE_CLUSTER; 422 offset ! 414 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, unsigned long offset) argument 437 scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, unsigned long *offset, unsigned long *scan_base) argument 490 unsigned long offset; local 642 pgoff_t offset; local 707 pgoff_t offset; local 728 unsigned long offset, type; local 764 unsigned long offset = swp_offset(entry); local 988 swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) argument 1034 swapdev_block(int type, pgoff_t offset) argument 1584 pgoff_t offset; local 2595 unsigned long offset, type; local 2747 pgoff_t offset; local 2842 swap_count_continued(struct swap_info_struct *si, pgoff_t offset, unsigned char count) argument 2925 pgoff_t offset; local [all...] |
H A D | swap_state.c | 394 static unsigned long swapin_nr_pages(unsigned long offset) argument 413 * stuck here forever, so check for an adjacent offset instead 416 if (offset != prev_offset + 1 && offset != prev_offset - 1) 418 prev_offset = offset; 462 unsigned long offset = entry_offset; local 467 mask = swapin_nr_pages(offset) - 1; 471 /* Read a page_cluster sized and aligned cluster around offset. */ 472 start_offset = offset & ~mask; 473 end_offset = offset | mas [all...] |
H A D | iov_iter.c | 97 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, argument 119 from = kaddr + offset; 142 offset = from - kaddr; 149 from = kaddr + offset; 178 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, argument 200 to = kaddr + offset; 223 offset = to - kaddr; 230 to = kaddr + offset; 331 struct iov_iter *i, unsigned long offset, size_t bytes) 340 left = __copy_from_user_inatomic(kaddr + offset, bu 330 copy_from_user_atomic_iovec(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) argument 450 size_t offset = i->iov_offset; local 478 size_t offset = i->iov_offset; local 513 size_t offset = i->iov_offset; local 537 memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) argument 544 memcpy_to_page(struct page *page, size_t offset, char *from, size_t len) argument 551 memzero_page(struct page *page, size_t offset, size_t len) argument 639 copy_page_to_iter_bvec(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) argument 648 copy_page_from_iter_bvec(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) argument 694 copy_from_user_bvec(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) argument 817 size_t offset = i->iov_offset; local 839 copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) argument 849 copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) argument 887 iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) argument [all...] |
H A D | filemap_xip.c | 61 unsigned long offset; local 69 offset = pos & ~PAGE_CACHE_MASK; 88 if (nr <= offset) { 92 nr = nr - offset; 123 left = __copy_to_user(buf+copied, xip_mem+offset, nr); 133 offset += (nr - left); 134 index += offset >> PAGE_CACHE_SHIFT; 135 offset &= ~PAGE_CACHE_MASK; 338 unsigned long offset; local 343 offset 453 unsigned offset = from & (PAGE_CACHE_SIZE-1); local [all...] |
H A D | internal.h | 26 struct file *filp, pgoff_t offset, unsigned long nr_to_read, 282 * Return the mem_map entry representing the 'offset' subpage within 286 static inline struct page *mem_map_offset(struct page *base, int offset) argument 288 if (unlikely(offset >= MAX_ORDER_NR_PAGES)) 289 return nth_page(base, offset); 290 return base + offset; 298 struct page *base, int offset) 300 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { 301 unsigned long pfn = page_to_pfn(base) + offset; 297 mem_map_next(struct page *iter, struct page *base, int offset) argument
|
H A D | filemap.c | 117 unsigned int offset; local 146 offset = index & RADIX_TREE_MAP_MASK; 148 if (test_bit(offset, node->tags[tag])) 260 * @start: offset in bytes where the range starts 261 * @end: offset in bytes where the range ends (inclusive) 325 * @start_byte: offset in bytes where the range starts 326 * @end_byte: offset in bytes where the range ends (inclusive) 418 * @lstart: offset in bytes where the range starts 419 * @lend: offset in bytes where the range ends (inclusive) 476 pgoff_t offset local 545 __add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask, void **shadowp) argument 606 add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) argument 614 add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) argument 965 find_get_entry(struct address_space *mapping, pgoff_t offset) argument 1024 find_lock_entry(struct address_space *mapping, pgoff_t offset) argument 1070 pagecache_get_page(struct address_space *mapping, pgoff_t offset, int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask) argument 1476 unsigned long offset; /* offset into pagecache page */ local 1753 page_cache_read(struct file *file, pgoff_t offset) argument 1783 do_sync_mmap_readahead(struct vm_area_struct *vma, struct file_ra_state *ra, struct file *file, pgoff_t offset) argument 1828 do_async_mmap_readahead(struct vm_area_struct *vma, struct file_ra_state *ra, struct file *file, struct page *page, pgoff_t offset) argument 1877 pgoff_t offset = vmf->pgoff; local 2472 unsigned long offset; /* Offset into pagecache page */ local [all...] |
H A D | process_vm_access.c | 27 * @start_offset: offset in page to start copying from/to 34 unsigned offset, 42 size_t copy = PAGE_SIZE - offset; 49 copied = copy_page_from_iter(page, offset, copy, iter); 52 copied = copy_page_to_iter(page, offset, copy, iter); 57 offset = 0; 33 process_vm_rw_pages(struct page **pages, unsigned offset, size_t len, struct iov_iter *iter, int vm_write) argument
|
H A D | util.c | 276 unsigned long flag, unsigned long offset) 278 if (unlikely(offset + PAGE_ALIGN(len) < offset)) 280 if (unlikely(offset & ~PAGE_MASK)) 283 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 274 vm_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) argument
|
H A D | page_cgroup.c | 27 unsigned long offset; local 41 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn; 42 return base + offset; 384 pgoff_t offset = swp_offset(ent); local 393 mappage = ctrl->map[offset / SC_PER_PAGE]; 395 return sc + offset % SC_PER_PAGE;
|
H A D | shmem.c | 93 pgoff_t next; /* the next page offset to be fallocated */ 1528 unsigned long offset; local 1543 offset = *ppos & ~PAGE_CACHE_MASK; 1556 if (nr <= offset) 1578 if (nr <= offset) { 1584 nr -= offset; 1597 if (!offset) 1608 ret = copy_page_to_iter(page, offset, nr, to); 1610 offset += ret; 1611 index += offset >> PAGE_CACHE_SHIF 1793 shmem_file_llseek(struct file *file, loff_t offset, int whence) argument 2052 shmem_fallocate(struct file *file, int mode, loff_t offset, loff_t len) argument [all...] |
H A D | page_io.c | 119 unsigned long offset; local 122 offset = swp_offset(entry); 126 offset);
|
H A D | madvise.c | 301 loff_t offset; local 319 offset = (loff_t)(start - vma->vm_start) 332 offset, end - start);
|
H A D | vmalloc.c | 965 unsigned long offset; local 977 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 988 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); 1882 unsigned long offset, length; local 1884 offset = (unsigned long)addr & ~PAGE_MASK; 1885 length = PAGE_SIZE - offset; 1902 memcpy(buf, map + offset, length); 1921 unsigned long offset, length; local 1923 offset = (unsigned long)addr & ~PAGE_MASK; 1924 length = PAGE_SIZE - offset; [all...] |
H A D | slob.c | 78 * or offset of next block if -ve (in SLOB_UNITs). 80 * Free blocks of size 1 unit simply contain the offset of the next block. 82 * memory, and the offset of the next free block in the second SLOB_UNIT. 148 slobidx_t offset = next - base; local 152 s[1].units = offset; 154 s[0].units = -offset;
|
H A D | truncate.c | 69 * @offset: start of the range to invalidate 76 * ensure that no dirty buffer is left outside @offset and that no I/O 81 void do_invalidatepage(struct page *page, unsigned int offset, argument 92 (*invalidatepage)(page, offset, length); 222 * @lstart: offset from which to truncate 223 * @lend: offset to which to truncate (inclusive) 237 * recently touched, and freeing happens in ascending file offset order. 402 * truncate_inode_pages - truncate *all* the pages from an offset 404 * @lstart: offset from which to truncate 469 * @start: the offset 'fro [all...] |
H A D | slub.c | 246 return *(void **)(object + s->offset); 251 prefetch(object + s->offset); 259 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p)); 268 *(void **)(object + s->offset) = fp; 484 if (s->offset) 485 p = object + s->offset + sizeof(void *); 605 pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", 617 if (s->offset) 618 off = s->offset + sizeof(void *); 739 if (s->offset) 1467 int offset = (PAGE_SIZE << order) - s->reserved; local [all...] |
H A D | slab.c | 447 * We want to avoid an expensive divide : (offset / cache->size) 449 * we can replace (offset / cache->size) by 450 * reciprocal_divide(offset, cache->reciprocal_buffer_size) 455 u32 offset = (obj - page->s_mem); local 456 return reciprocal_divide(offset, cache->reciprocal_buffer_size); 1701 static void dump_line(char *data, int offset, int limit) argument 1707 printk(KERN_ERR "%03x: ", offset); 1709 if (data[offset + i] != POISON_FREE) { 1710 error = data[offset + i]; 1715 &data[offset], limi 2585 size_t offset; local 4102 unsigned long offset, size; local [all...] |
H A D | memory.c | 2347 * offset and file offset. So we must perform an exhaustive search 2892 * if page by the offset is not ready to be mapped (cold cache or 3549 int offset = addr & (PAGE_SIZE-1); local 3556 memcpy_toio(maddr + offset, buf, len); 3558 memcpy_fromio(buf, maddr + offset, len); 3579 int bytes, ret, offset; local 3605 offset = addr & (PAGE_SIZE-1); 3606 if (bytes > PAGE_SIZE-offset) 3607 bytes = PAGE_SIZE-offset; [all...] |
H A D | nommu.c | 944 /* offset overflow? */ 1525 unsigned long offset; member in struct:mmap_arg_struct 1534 if (a.offset & ~PAGE_MASK) 1538 a.offset >> PAGE_SHIFT);
|