Searched refs:len (Results 1 - 25 of 26) sorted by relevance

12

/mm/
H A Dutil.c26 size_t len; local
32 len = strlen(s) + 1;
33 buf = kmalloc_track_caller(len, gfp);
35 memcpy(buf, s, len);
48 size_t len; local
54 len = strnlen(s, max);
55 buf = kmalloc_track_caller(len+1, gfp);
57 memcpy(buf, s, len);
58 buf[len] = '\0';
68 * @len
71 kmemdup(const void *src, size_t len, gfp_t gfp) argument
90 memdup_user(const void __user *src, size_t len) argument
254 vm_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff) argument
274 vm_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) argument
367 unsigned int len; local
[all...]
H A Dfadvise.c28 SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
48 if (!mapping || len < 0) {
70 endbyte = offset + len;
71 if (!len || endbyte < len)
151 SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
153 return sys_fadvise64_64(fd, offset, len, advice);
H A Diov_iter.c452 size_t len; local
457 len = iov->iov_len - offset;
458 if (len > i->count)
459 len = i->count;
460 if (len > maxsize)
461 len = maxsize;
463 len += *start = addr & (PAGE_SIZE - 1);
464 if (len > maxpages * PAGE_SIZE)
465 len = maxpages * PAGE_SIZE;
467 n = (len
480 size_t len; local
521 size_t len = iov->iov_len - offset; local
537 memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) argument
544 memcpy_to_page(struct page *page, size_t offset, char *from, size_t len) argument
551 memzero_page(struct page *page, size_t offset, size_t len) argument
781 size_t len = bvec->bv_len - i->iov_offset; local
799 size_t len = bvec->bv_len - i->iov_offset; local
824 size_t len = bvec->bv_len - offset; local
[all...]
H A Dmsync.c31 SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
46 len = (len + ~PAGE_MASK) & PAGE_MASK;
47 end = start + len;
H A Dmmap.c53 #define arch_mmap_check(addr, len, flags) (0)
57 #define arch_rebalance_pgtables(addr, len) (addr)
286 static unsigned long do_brk(unsigned long addr, unsigned long len);
1246 unsigned long len)
1252 locked = len >> PAGE_SHIFT;
1267 unsigned long len, unsigned long prot,
1286 if (!len)
1293 len = PAGE_ALIGN(len);
1294 if (!len)
1244 mlock_future_check(struct mm_struct *mm, unsigned long flags, unsigned long len) argument
1266 do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate) argument
1467 unsigned long len; member in struct:mmap_arg_struct
1541 mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff) argument
1922 arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) argument
1958 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) argument
2009 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) argument
2534 do_munmap(struct mm_struct *mm, unsigned long start, size_t len) argument
2617 vm_munmap(unsigned long start, size_t len) argument
2650 do_brk(unsigned long addr, unsigned long len) argument
2731 vm_brk(unsigned long addr, unsigned long len) argument
2837 copy_vma(struct vm_area_struct **vmap, unsigned long addr, unsigned long len, pgoff_t pgoff, bool *need_rmap_locks) argument
2988 __install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_operations_struct *ops, void *priv) argument
3036 _install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_special_mapping *spec) argument
3045 install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, struct page **pages) argument
[all...]
H A Dnommu.c883 unsigned long len)
886 unsigned long end = addr + len;
915 unsigned long len,
936 if (!len)
940 rlen = PAGE_ALIGN(len);
1149 unsigned long len,
1182 order = get_order(len);
1183 kdebug("alloc order %d for %lx", order, len);
1192 point = len >> PAGE_SHIFT;
1214 region->vm_end = region->vm_start + len;
881 find_vma_exact(struct mm_struct *mm, unsigned long addr, unsigned long len) argument
913 validate_mmap_request(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *_capabilities) argument
1147 do_mmap_private(struct vm_area_struct *vma, struct vm_region *region, unsigned long len, unsigned long capabilities) argument
1261 do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate) argument
1521 unsigned long len; member in struct:mmap_arg_struct
1653 do_munmap(struct mm_struct *mm, unsigned long start, size_t len) argument
1729 vm_munmap(unsigned long addr, size_t len) argument
1770 vm_brk(unsigned long addr, unsigned long len) argument
1852 vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) argument
1877 arch_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) argument
2005 __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) argument
2047 access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) argument
2057 access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) argument
[all...]
H A Dprocess_vm_access.c28 * @len: number of bytes to copy
35 size_t len,
40 while (len && iov_iter_count(iter)) {
45 if (copy > len)
46 copy = len;
54 len -= copied;
68 * @len: size of area to copy to/from
78 unsigned long len,
93 if (len == 0)
95 nr_pages = (addr + len
33 process_vm_rw_pages(struct page **pages, unsigned offset, size_t len, struct iov_iter *iter, int vm_write) argument
77 process_vm_rw_single_vec(unsigned long addr, unsigned long len, struct iov_iter *iter, struct page **process_pages, struct mm_struct *mm, struct task_struct *task, int vm_write) argument
[all...]
H A Dmlock.c613 static int do_mlock(unsigned long start, size_t len, int on) argument
620 VM_BUG_ON(len != PAGE_ALIGN(len));
621 end = start + len;
671 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) argument
680 VM_BUG_ON(len != PAGE_ALIGN(len));
681 end = start + len;
727 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
738 len
[all...]
H A Dfilemap_xip.c56 size_t len,
93 if (nr > len - copied)
94 nr = len - copied;
136 } while (copied < len);
147 xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) argument
149 if (!access_ok(VERIFY_WRITE, buf, len))
153 buf, len, ppos);
398 xip_file_write(struct file *filp, const char __user *buf, size_t len, argument
409 if (!access_ok(VERIFY_READ, buf, len)) {
415 count = len;
52 do_xip_mapping_read(struct address_space *mapping, struct file_ra_state *_ra, struct file *filp, char __user *buf, size_t len, loff_t *ppos) argument
[all...]
H A Dcleancache.c187 int len = 0, maxlen = CLEANCACHE_KEY_MAX; local
194 len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
195 if (len <= FILEID_ROOT || len == FILEID_INVALID)
H A Dmincore.c248 * current process's address space specified by [addr, addr + len).
262 * -ENOMEM - Addresses in the range [addr, addr + len] are
268 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
280 if (!access_ok(VERIFY_READ, (void __user *) start, len))
284 pages = len >> PAGE_SHIFT;
285 pages += (len & ~PAGE_MASK) != 0;
H A Dslub.c4005 int len = 0; local
4039 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4041 len += sprintf(buf + len, "%7ld ", l->count);
4044 len += sprintf(buf + len, "%pS", (void *)l->addr);
4046 len += sprintf(buf + len, "<not-available>");
4049 len += sprintf(buf + len, " ag
4427 int len; local
4716 int len; local
4888 slab_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) argument
[all...]
H A Dmprotect.c335 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
348 if (!len)
350 len = PAGE_ALIGN(len);
351 end = start + len;
H A Dshmem.c1479 loff_t pos, unsigned len, unsigned flags,
1490 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
1499 loff_t pos, unsigned len, unsigned copied,
1630 struct pipe_inode_info *pipe, size_t len,
1656 if (unlikely(left < len))
1657 len = left;
1664 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1688 if (!len)
1691 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1717 len
1478 shmem_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) argument
1498 shmem_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument
1629 shmem_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) argument
2052 shmem_fallocate(struct file *file, int mode, loff_t offset, loff_t len) argument
2424 int len; local
2524 size_t len; local
2560 struct { const char *prefix; size_t len; } arr[] = { member in struct:__anon12
2705 shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, struct inode *parent) argument
2924 long len; local
[all...]
H A Dmadvise.c454 * -EINVAL - start + len < 0, start is not page-aligned,
470 size_t len; local
482 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
484 /* Check to see whether len was rounded up from small -ve to zero */
485 if (len_in && !len)
488 end = start + len;
H A Dgup.c944 unsigned long addr, len, end; local
951 len = (unsigned long) nr_pages << PAGE_SHIFT;
952 end = start + len;
955 start, len)))
H A Dmremap.c166 unsigned long new_addr, unsigned long len,
175 old_end = old_addr + len;
230 flush_tlb_range(vma, old_end-len, old_addr);
234 return len + old_addr - old_end; /* how much done */
499 * We allow a zero old-len as a special case
501 * a zero new-len is nonsensical.
164 move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, bool need_rmap_locks) argument
H A Dreadahead.c574 unsigned long len = end - start + 1; local
575 ret = do_readahead(mapping, f.file, start, len);
H A Dzswap.c646 unsigned int dlen = PAGE_SIZE, len; local
686 len = dlen + sizeof(struct zswap_header);
687 ret = zpool_malloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN,
H A Dmemory.c1793 * @len: size of area
1802 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) argument
1807 if (start + len < start)
1814 len += start & ~PAGE_MASK;
1816 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
3544 void *buf, int len, int write)
3556 memcpy_toio(maddr + offset, buf, len);
3558 memcpy_fromio(buf, maddr + offset, len);
3561 return len;
3571 unsigned long addr, void *buf, int len, in
3543 generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) argument
3570 __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) argument
3640 access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) argument
3651 access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) argument
[all...]
H A Dmempolicy.c1189 static long do_mbind(unsigned long start, unsigned long len, argument
1210 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1211 end = start + len;
1233 start, start + len, mode, mode_flags,
1352 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1370 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1569 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1590 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
H A Dkmemleak.c294 int i, len, remaining; local
298 remaining = len =
301 seq_printf(seq, " hex dump (first %d bytes):\n", len);
302 for (i = 0; i < len; i += HEX_ROW_SIZE) {
H A Dfilemap.c2344 loff_t pos, unsigned len, unsigned flags,
2349 return aops->write_begin(file, mapping, pos, len, flags,
2355 loff_t pos, unsigned len, unsigned copied,
2360 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2343 pagecache_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) argument
2354 pagecache_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument
H A Dhugetlb.c1741 unsigned long count, size_t len)
1775 return len;
1783 size_t len)
1795 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
1805 struct kobj_attribute *attr, const char *buf, size_t len)
1807 return nr_hugepages_store_common(false, kobj, buf, len);
1824 struct kobj_attribute *attr, const char *buf, size_t len)
1826 return nr_hugepages_store_common(true, kobj, buf, len);
1739 __nr_hugepages_store_common(bool obey_mempolicy, struct hstate *h, int nid, unsigned long count, size_t len) argument
1781 nr_hugepages_store_common(bool obey_mempolicy, struct kobject *kobj, const char *buf, size_t len) argument
1804 nr_hugepages_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) argument
1823 nr_hugepages_mempolicy_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) argument
H A Dswapfile.c2027 int len; local
2035 len = seq_path(swap, &file->f_path, " \t\n\\");
2037 len < 40 ? 40 - len : 1, " ",

Completed in 2058 milliseconds

12