Searched refs:ret (Results 26 - 45 of 45) sorted by relevance

12

/mm/
H A Dmadvise.c234 int ret = 0; local
240 int ret = get_user_pages_fast(start, 1, 0, &p); local
241 if (ret != 1)
242 return ret;
246 ret = soft_offline_page(p, MF_COUNT_INCREASED);
247 if (ret)
256 return ret;
H A Dmmu_notifier.c190 int ret; local
194 ret = -ENOMEM;
201 ret = mm_take_all_locks(mm);
202 if (unlikely(ret))
233 return ret;
H A Dhighmem.c343 void *ret; local
350 ret = NULL;
357 ret = pam->virtual;
364 return ret;
H A Dcompaction.c545 int ret; local
547 ret = compaction_suitable(zone, cc->order);
548 switch (ret) {
552 return ret;
565 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
571 ret = COMPACT_PARTIAL;
606 return ret;
H A Dprocess_vm_access.c60 int ret; local
103 ret = copy_from_user(target_kaddr,
108 ret = copy_to_user(lvec[*lvec_current].iov_base
112 if (ret) {
113 *bytes_copied += bytes_to_copy - ret;
H A Dslob.c483 void *ret; local
498 ret = (void *)m + align;
500 trace_kmalloc_node(_RET_IP_, ret,
507 ret = slob_new_pages(gfp, order, node);
508 if (ret) {
510 page = virt_to_page(ret);
514 trace_kmalloc_node(_RET_IP_, ret,
518 kmemleak_alloc(ret, size, 1, gfp);
519 return ret;
H A Doom_kill.c619 int ret = 1; local
624 ret = 0;
640 return ret;
668 int ret = 1; local
673 ret = 0;
680 return ret;
H A Dvmalloc.c180 int ret; local
182 ret = vmap_page_range_noflush(start, end, prot, pages);
184 return ret;
1763 void *ret; local
1765 ret = __vmalloc_node(size, SHMLBA,
1768 if (ret) {
1769 area = find_vm_area(ret);
1772 return ret;
1867 void *ret; local
1869 ret
2142 int ret; local
2219 struct vm_struct *ret; local
2604 int ret; local
[all...]
H A Dslub.c2345 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); local
2347 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
2349 return ret;
2356 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); local
2357 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2358 return ret;
2364 void *ret = kmalloc_order(size, flags, order); local
2365 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
2366 return ret;
2374 void *ret local
2388 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); local
3314 void *ret; local
3350 void *ret; local
3573 int ret = 0; local
3610 int ret = 0; local
3995 void *ret; local
4018 void *ret; local
4897 int ret = -EINVAL; local
[all...]
H A Dmempolicy.c163 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, argument
168 nodes_onto(*ret, tmp, *rel);
210 int ret; local
236 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
238 ret = mpol_ops[pol->mode].create(pol, NULL);
239 return ret;
724 int ret; local
731 ret = PTR_ERR(new);
743 ret = mpol_set_nodemask(new, nodes, scratch);
744 if (ret) {
1774 bool ret = true; local
2178 int ret; local
2442 int ret; local
[all...]
H A Dvmscan.c261 unsigned long ret = 0; local
268 ret = 1;
341 ret += nr_before - shrink_ret;
364 return ret;
1047 int ret = -EINVAL; local
1051 return ret;
1062 return ret;
1065 return ret;
1073 return ret;
1075 ret
1313 int ret = -EBUSY; local
3036 int ret; local
3222 int ret = 0; local
3427 int ret; local
[all...]
H A Dpage_alloc.c237 int ret = 0; local
244 ret = 1;
246 ret = 1;
249 return ret;
1763 int ret; local
1792 ret = zone_reclaim(zone, gfp_mask, order);
1793 switch (ret) {
1832 bool ret = false; local
1835 ret = in_interrupt();
1837 return ret;
2635 bool ret = false; local
2882 int ret; local
2903 int ret; local
3841 int ret; local
4255 int ret; local
5160 int ret; local
5446 int ret = -EBUSY; local
[all...]
H A Dpage_cgroup.c243 int ret = 0; local
246 ret = online_page_cgroup(mn->start_pfn,
261 return notifier_from_errno(ret);
H A Ddmapool.c178 int ret; local
182 ret = device_create_file(dev, &dev_attr_pools);
184 ret = 0;
186 if (!ret)
H A Dshmem.c320 unsigned int ret; local
327 ret = 0;
353 indices[ret] = indices[i];
354 pages[ret] = page;
355 ret++;
357 if (unlikely(!ret && nr_found))
360 return ret;
1055 int ret = VM_FAULT_LOCKED; local
1057 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1061 if (ret
1230 unsigned long nr, ret; local
1539 int ret; local
1712 int ret = -ENODATA; local
[all...]
H A Dslab.c1403 int ret = 0; local
1416 ret = -EBUSY;
1420 return ret;
1427 int ret = 0; local
1437 ret = init_cache_nodelists_node(nid);
1442 ret = drain_cache_nodelists_node(nid);
1452 return notifier_from_errno(ret);
2628 int ret = 0, i = 0; local
2641 ret += !list_empty(&l3->slabs_full) ||
2644 return (ret
2656 int ret; local
3715 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); local
3728 void *ret; local
3742 void *ret = __cache_alloc_node(cachep, flags, nodeid, local
3759 void *ret; local
3815 void *ret; local
4578 int ret = -ENOMEM; local
[all...]
H A Dmemblock.c483 int i, ret; local
485 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
486 if (ret)
487 return ret;
711 int i, ret; local
713 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
714 if (ret)
715 return ret;
H A Dkmemleak.c1615 int ret; local
1625 ret = mutex_lock_interruptible(&scan_mutex);
1626 if (ret < 0)
1627 return ret;
1642 ret = strict_strtoul(buf + 5, 0, &secs);
1643 if (ret < 0)
1655 ret = dump_str_object_info(buf + 5);
1657 ret = -EINVAL;
1661 if (ret < 0)
1662 return ret;
[all...]
H A Dvmstat.c27 static void sum_vm_events(unsigned long *ret) argument
32 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
38 ret[i] += this->event[i];
47 void all_vm_events(unsigned long *ret) argument
50 sum_vm_events(ret);
H A Dmmap.c1357 * if (ret & ~PAGE_MASK)
1358 * error = ret;
2086 int ret; local
2092 ret = do_munmap(mm, addr, len);
2094 return ret;
2443 int ret; local
2461 ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
2462 if (ret)
2465 ret = insert_vm_struct(mm, vma);
2466 if (ret)
2650 int ret; local
[all...]

Completed in 129 milliseconds

12