/mm/ |
H A D | kmemleak-test.c | 26 #include <linux/list.h> 34 struct list_head list; member in struct:test_node 74 * Add elements to a list. They should only appear as orphan 82 INIT_LIST_HEAD(&elem->list); 83 list_add_tail(&elem->list, &test_list); 101 * Remove the list elements without actually freeing the 104 list_for_each_entry_safe(elem, tmp, &test_list, list) 105 list_del(&elem->list);
|
H A D | highmem.c | 312 struct list_head list; member in struct:page_address_map 326 spinlock_t lock; /* Protect this bucket's list */ 355 list_for_each_entry(pam, &pas->lh, list) { 388 struct page_address_map, list); 389 list_del(&pam->list); 396 list_add_tail(&pam->list, &pas->lh); 400 list_for_each_entry(pam, &pas->lh, list) { 402 list_del(&pam->list); 405 list_add_tail(&pam->list, &page_address_pool); 424 list_add(&page_address_maps[i].list, [all...] |
H A D | slob.c | 15 * The slob heap is a set of linked list of pages from alloc_pages(), 16 * and within each page, there is a singly-linked list of free blocks 24 * into the free list in address order, so this is effectively an 68 #include <linux/list.h> 107 struct list_head list; /* linked list of free pages */ member in struct:slob_page::__anon15::__anon16 157 * slob_page_free: true for pages on free_slob_pages list. 164 static void set_slob_page_free(struct slob_page *sp, struct list_head *list) argument 166 list_add(&sp->list, list); [all...] |
H A D | percpu.c | 59 #include <linux/list.h> 101 struct list_head list; /* linked to pcpu_slot lists */ member in struct:pcpu_chunk 180 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 335 list_move(&chunk->list, &pcpu_slot[nslot]); 337 list_move_tail(&chunk->list, &pcpu_slot[nslot]); 622 INIT_LIST_HEAD(&chunk->list); 754 list_for_each_entry(chunk, &pcpu_slot[slot], list) { 769 * restart cpu_slot list walking. 884 list_for_each_entry_safe(chunk, next, head, list) { 888 if (chunk == list_first_entry(head, struct pcpu_chunk, list)) [all...] |
H A D | shmem.c | 80 struct list_head list; /* anchored by shmem_inode_info->xattr_list */ member in struct:shmem_xattr 594 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) { 735 * Add inode to shmem_unuse()'s list of swapped-out inodes, 1717 list_for_each_entry(xattr, &info->xattr_list, list) { 1766 list_for_each_entry(xattr, &info->xattr_list, list) { 1772 list_replace(&xattr->list, &new_xattr->list); 1774 list_del(&xattr->list); 1783 list_add(&new_xattr->list, &info->xattr_list); 1899 list_for_each_entry(xattr, &info->xattr_list, list) { [all...] |
H A D | vmalloc.c | 23 #include <linux/list.h> 257 struct list_head list; /* address sorted list */ member in struct:vmap_area 258 struct list_head purge_list; /* "lazy purge" list */ 316 /* address-sort this list so it is usable like the vmlist */ 321 list_add_rcu(&va->list, &prev->list); 323 list_add_rcu(&va->list, &vmap_area_list); 476 list_del_rcu(&va->list); 602 list_for_each_entry_rcu(va, &vmap_area_list, list) { [all...] |
H A D | vmscan.c | 57 * reclaim_mode determines how the inactive list is shrunk 212 list_add_tail(&shrinker->list, &shrinker_list); 223 list_del(&shrinker->list); 272 list_for_each_entry(shrinker, &shrinker_list, list) { 451 /* move page to the active list, page is locked */ 624 * putback_lru_page - put previously isolated page onto appropriate LRU list 625 * @page: page to be put back to appropriate lru list 627 * Add previously isolated @page to appropriate LRU list. 647 * unevictable page on [in]active list. 655 * list 1638 move_active_pages_to_lru(struct zone *zone, struct list_head *list, struct list_head *pages_to_free, enum lru_list lru) argument [all...] |
H A D | slab.c | 85 * Each node has its own list of partial, free and full slabs. 221 * Slabs are chained into three list: fully used, partial, fully free slabs. 226 struct list_head list; member in struct:slab::__anon12::__anon13 242 * - reduce the number of linked list operations 276 struct list_head slabs_partial; /* partial list first, better asm code */ 1261 * alloc_arraycache's are going to use this list. 1459 static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list, argument 1467 memcpy(ptr, list, sizeof(struct kmem_list3)); 2498 /* cache setup completed, link it into the list */ 2585 * Remove slabs from the list o [all...] |
H A D | memcontrol.c | 209 struct list_head list; member in struct:mem_cgroup_eventfd_list 238 * Per cgroup active and inactive list, similar to the 987 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 991 * Returns the lru list vector holding pages for the given @zone and 1502 * list or the wrong list here, we can start from some node and traverse all 1503 * nodes based on the zonelist. So update the list loosely once per 10 secs. 3597 * This routine traverse page_cgroup in given list and drop them all. 3605 struct list_head *list; local 3612 list [all...] |
H A D | page_alloc.c | 4 * Manages the free list, the system allocates free pages here. 513 * At each level, we keep a list of pages, which are heads of continuous 574 * that is happening, add the free page to the tail of the list 624 * Assumes all pages on list are in same zone, and of same order. 646 struct list_head *list; local 651 * empty list is encountered. This is so more pages are freed 659 list = &pcp->lists[migratetype]; 660 } while (list_empty(list)); 662 /* This is the only non-empty list. Free them all. */ 667 page = list_entry(list 1070 rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, int cold) argument 1263 free_hot_cold_page_list(struct list_head *list, int cold) argument 1363 struct list_head *list; local [all...] |