/mm/ |
H A D | balloon_compaction.c | 59 struct page *page, *tmp; local 64 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
|
H A D | kmemleak-test.c | 100 struct test_node *elem, *tmp; local 106 list_for_each_entry_safe(elem, tmp, &test_list, list)
|
H A D | mincore.c | 273 unsigned char *tmp; local 290 tmp = (void *) __get_free_page(GFP_USER); 291 if (!tmp) 301 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); 306 if (copy_to_user(vec, tmp, retval)) { 315 free_page((unsigned long) tmp);
|
H A D | madvise.c | 465 unsigned long end, tmp; local 527 tmp = vma->vm_end; 528 if (end < tmp) 529 tmp = end; 531 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ 532 error = madvise_vma(vma, &prev, start, tmp, behavior); 535 start = tmp;
|
H A D | mprotect.c | 338 unsigned long vm_flags, nstart, end, tmp, reqprot; local 411 tmp = vma->vm_end; 412 if (tmp > end) 413 tmp = end; 414 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); 417 nstart = tmp;
|
H A D | mlock.c | 615 unsigned long nstart, end, tmp; local 643 tmp = vma->vm_end; 644 if (tmp > end) 645 tmp = end; 646 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); 649 nstart = tmp;
|
H A D | slab_common.c | 82 char tmp; local 90 res = probe_kernel_address(s->name, tmp);
|
H A D | zsmalloc.c | 598 struct page *nextp, *tmp, *head_extra; local 612 list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
|
H A D | kmemleak.c | 440 struct hlist_node *tmp; local 449 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { 1258 struct kmemleak_object *object, *tmp; local 1273 tmp = list_entry(object->gray_list.next, typeof(*object), 1280 object = tmp;
|
H A D | memory.c | 2106 int tmp; local 2109 tmp = do_page_mkwrite(vma, old_page, address); 2110 if (unlikely(!tmp || (tmp & 2113 return tmp; 2980 int ret, tmp; local 2992 tmp = do_page_mkwrite(vma, fault_page, address); 2993 if (unlikely(!tmp || 2994 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 2996 return tmp; [all...] |
H A D | mempolicy.c | 179 nodemask_t tmp; local 180 nodes_fold(tmp, *orig, nodes_weight(*rel)); 181 nodes_onto(*ret, tmp, *rel); 323 nodemask_t tmp; local 326 nodes_and(tmp, pol->w.user_nodemask, *nodes); 328 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 335 nodes_remap(tmp, pol->v.nodes, 337 pol->w.cpuset_mems_allowed = step ? tmp : *nodes; 339 tmp = pol->w.cpuset_mems_allowed; 345 if (nodes_empty(tmp)) 368 nodemask_t tmp; local 1042 nodemask_t tmp; local [all...] |
H A D | mmap.c | 2057 struct vm_area_struct *tmp; local 2059 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); 2061 if (tmp->vm_end > addr) { 2062 vma = tmp; 2063 if (tmp->vm_start <= addr) 2563 * places tmp vma above, and higher split_vma places tmp vma below. 2595 struct vm_area_struct *tmp = vma; local 2596 while (tmp && tmp 3295 unsigned long tmp, free_kbytes; local [all...] |
H A D | swapfile.c | 442 unsigned long tmp; local 469 tmp = cluster->next; 470 while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) * 472 if (!si->swap_map[tmp]) { 476 tmp++; 482 cluster->next = tmp + 1; 483 *offset = tmp; 484 *scan_base = tmp;
|
H A D | vmalloc.c | 310 struct rb_node *tmp; local 329 tmp = rb_prev(&va->rb_node); 330 if (tmp) { 332 prev = rb_entry(tmp, struct vmap_area, rb_node); 411 struct vmap_area *tmp; local 412 tmp = rb_entry(n, struct vmap_area, rb_node); 413 if (tmp->va_end >= addr) { 414 first = tmp; 415 if (tmp->va_start <= addr) 850 struct vmap_block *tmp; local 1138 struct vm_struct *tmp, **p; local 1180 struct vm_struct *tmp; local [all...] |
H A D | hugetlb.c | 1207 struct page *page, *tmp; local 1264 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 1279 list_for_each_entry_safe(page, tmp, &surplus_list, lru) 2262 unsigned long tmp = h->max_huge_pages; local 2268 table->data = &tmp; 2276 NUMA_NO_NODE, tmp, *length); 2303 unsigned long tmp; local 2309 tmp = h->nr_overcommit_huge_pages; 2314 table->data = &tmp; 2322 h->nr_overcommit_huge_pages = tmp; 3699 struct page *tmp; local [all...] |
H A D | slab.c | 4004 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; local 4014 tmp = strchr(kbuf, ' '); 4015 if (!tmp) 4017 *tmp = '\0'; 4018 tmp++; 4019 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
|
H A D | memcontrol.c | 3049 struct memcg_cache_params *params, *tmp; local 3055 list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) { 4242 unsigned long long min_limit, min_memsw_limit, tmp; local 4253 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); 4254 min_limit = min(min_limit, tmp); 4255 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4256 min_memsw_limit = min(min_memsw_limit, tmp); 4802 struct mem_cgroup_eventfd_list *ev, *tmp; local 4806 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 5270 int zone, tmp local 5376 int tmp, node, zone; local 5511 struct mem_cgroup_event *event, *tmp; local [all...] |
H A D | page_alloc.c | 3176 char tmp[MIGRATE_TYPES + 1]; local 3177 char *p = tmp; 3186 printk("(%s) ", tmp); 3521 const struct cpumask *tmp = cpumask_of_node(0); local 3542 tmp = cpumask_of_node(n); 3543 if (!cpumask_empty(tmp))
|
H A D | slub.c | 350 struct page tmp; local 351 tmp.counters = counters_new; 358 page->frozen = tmp.frozen; 359 page->inuse = tmp.inuse; 360 page->objects = tmp.objects;
|