/mm/ |
H A D | kmemcheck.c | 7 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) argument 19 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); 115 /* XXX: Can use zone->node for node? */
|
H A D | prio_tree.c | 38 * heap_index] value, we have a corresponding priority search tree node. If 40 * them is used as a tree node and others are stored in a vm_set list. The tree 41 * node points to the first vma (head) of the list using vm_set.head. 51 * We need some way to identify whether a vma is a tree node, head of a vm_set 55 * removed under R->mmap_sem, H replaces R as a tree node. Since we do not hold 56 * H->mmap_sem, we cannot use H->vm_flags for marking that H is a tree node now. 60 * vma radix priority search tree node rules: 62 * vma->shared.vm_set.parent != NULL ==> a tree node 68 * vma->shared.vm_set.head == NULL ==> a list node 117 struct vm_area_struct *node, *hea local [all...] |
H A D | quicklist.c | 29 int node = numa_node_id(); local 30 struct zone *zones = NODE_DATA(node)->node_zones; 44 num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
|
H A D | sparse-vmemmap.c | 38 static void * __init_refok __earlyonly_bootmem_alloc(int node, argument 43 return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal); 49 void * __meminit vmemmap_alloc_block(unsigned long size, int node) argument 55 if (node_state(node, N_HIGH_MEMORY)) 56 page = alloc_pages_node(node, 65 return __earlyonly_bootmem_alloc(node, size, size, 70 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) argument 75 return vmemmap_alloc_block(size, node); 80 return vmemmap_alloc_block(size, node); 87 void __meminit vmemmap_verify(pte_t *pte, int node, argument 98 vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) argument 112 vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) argument 124 vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) argument 136 vmemmap_pgd_populate(unsigned long addr, int node) argument 148 vmemmap_populate_basepages(struct page *start_page, unsigned long size, int node) argument [all...] |
H A D | slob.c | 47 * logic down to the page allocator, and simply doing the node accounting 48 * on the upper levels. In the event that a node id is explicitly 49 * provided, alloc_pages_exact_node() with the specified node id is used 50 * instead. The common case (or when the node id isn't explicitly provided) 51 * will default to the current node, as per numa_node_id(). 54 * these are scanned for by matching against the node id encoded in the 56 * the freelist will only be done so on pages residing on the same node, 57 * in order to prevent random node placement. 243 static void *slob_new_pages(gfp_t gfp, int order, int node) argument 248 if (node ! 321 slob_alloc(size_t size, gfp_t gfp, int align, int node) argument 479 __kmalloc_node(size_t size, gfp_t gfp, int node) argument 609 kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) argument [all...] |
H A D | compaction.c | 677 /* Compact all zones within a node */ 759 int compaction_register_node(struct node *node) argument 761 return device_create_file(&node->dev, &dev_attr_compact); 764 void compaction_unregister_node(struct node *node) argument 766 return device_remove_file(&node->dev, &dev_attr_compact);
|
H A D | memory_hotplug.c | 164 int node = pgdat->node_id; local 172 get_page_bootmem(node, page, NODE_INFO); 183 get_page_bootmem(node, page, NODE_INFO); 556 /* init node's zones as empty zones, we don't have any present pages.*/ 560 * The node we allocated has no zone fallback lists. For avoiding 579 * called by cpu_up() to online a node without onlined memory. 630 /* we online node here. we can't roll back from here. */ 636 * If sysfs file of new node can't create, cpu on the node 872 int ret, drain, retry_max, node; local [all...] |
H A D | migrate.c | 1069 int node; member in struct:page_to_node 1078 while (pm->node != MAX_NUMNODES && pm->page != p) 1081 if (pm->node == MAX_NUMNODES) 1086 return alloc_pages_exact_node(pm->node, 1093 * and the node number must contain a valid target node. 1094 * The pm array ends with node = MAX_NUMNODES. 1109 for (pp = pm; pp->node != MAX_NUMNODES; pp++) { 1135 if (err == pp->node) 1217 int node; local [all...] |
H A D | vmstat.c | 427 * node local memory. The per cpu pagesets on remote zones are placed 434 * with the global counters. These could cause remote node cache line 507 * When __GFP_OTHER_NODE is set assume the node of the preferred 508 * zone is the local node. This is useful for daemons who allocate 519 if (z->node == ((flags & __GFP_OTHER_NODE) ? 520 preferred_zone->node : numa_node_id())) 622 loff_t node = *pos; local 624 pgdat && node; 626 --node; 643 /* Walk all the zones in a node an [all...] |
H A D | huge_memory.c | 1583 struct hlist_node *node; local 1587 hlist_for_each_entry(mm_slot, node, bucket, hash) { 1838 int node) 1868 node, __GFP_OTHER_NODE); 2023 int node = -1; local 2055 * Chose the node of the first page. This could 2059 if (node == -1) 2060 node = page_to_nid(page); 2077 collapse_huge_page(mm, address, hpage, vma, node); 1834 collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, struct vm_area_struct *vma, int node) argument
|
H A D | kmemleak.c | 123 struct hlist_node node; member in struct:kmemleak_scan_area 402 struct prio_tree_node *node; local 407 node = prio_tree_next(&iter); 408 if (node) { 409 object = prio_tree_entry(node, struct kmemleak_object, 448 hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { 520 struct prio_tree_node *node; local 571 node = prio_tree_insert(&object_tree_root, &object->tree_node); 578 if (node != &object->tree_node) { 760 INIT_HLIST_NODE(&area->node); [all...] |
H A D | ksm.c | 103 * @seqnr: count of completed full scans (needed when removing unstable node) 115 * struct stable_node - node of the stable rbtree 116 * @node: rb node of this ksm page in the stable tree 121 struct rb_node node; member in struct:stable_node 133 * @node: rb node of this rmap_item in the unstable tree 144 struct rb_node node; /* when node of unstable tree */ member in union:rmap_item::__anon5 153 #define UNSTABLE_FLAG 0x100 /* is a node o 281 struct hlist_node *node; local 989 struct rb_node *node = root_stable_tree.rb_node; local 1777 struct rb_node *node; local [all...] |
H A D | mempolicy.c | 8 * NUMA policy allows the user to give hints in which node(s) memory should 24 * FIXME: memory is allocated starting with the first node 28 * preferred Try a specific node first before normal fallback. 29 * As a special case node -1 here means do the allocation 34 * default Allocate on the local node first, or when on a VMA 126 * disallowed nodes. In this way, we can avoid finding no node to alloc 354 int node = first_node(pol->w.user_nodemask); local 356 if (node_isset(node, *nodes)) { 357 pol->v.preferred_node = node; 378 * disallowed nodes. In this way, we can avoid finding no node t 918 new_node_page(struct page *page, unsigned long node, int **x) argument 1843 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, int node) argument [all...] |
H A D | nommu.c | 318 * vmalloc_node - allocate memory on a specific node 320 * @node: numa node 328 void *vmalloc_node(unsigned long size, int node) argument 335 * vzalloc_node - allocate memory on a specific node with zero fill 337 * @node: numa node 346 void *vzalloc_node(unsigned long size, int node) argument 419 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) argument
|
H A D | page-writeback.c | 177 int node; local 180 for_each_node_state(node, N_HIGH_MEMORY) { 182 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
|
H A D | vmalloc.c | 335 int node, gfp_t gfp_mask) 348 gfp_mask & GFP_RECLAIM_MASK, node); 785 int node, err; local 787 node = numa_node_id(); 790 gfp_mask & GFP_RECLAIM_MASK, node); 796 node, gfp_mask); 1086 * @node: prefer to allocate data structures on this node 1091 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) argument 1105 VMALLOC_START, VMALLOC_END, node, GFP_KERNE 332 alloc_vmap_area(unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend, int node, gfp_t gfp_mask) argument 1314 __get_vm_area_node(unsigned long size, unsigned long align, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask, void *caller) argument 1571 __vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, int node, void *caller) argument 1643 __vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, int node, void *caller) argument 1699 __vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, int node, void *caller) argument 1714 __vmalloc_node_flags(unsigned long size, int node, gfp_t flags) argument 1787 vmalloc_node(unsigned long size, int node) argument 1806 vzalloc_node(unsigned long size, int node) argument [all...] |
H A D | hugetlb.c | 30 #include <linux/node.h> 625 * node for alloc or free. 645 * returns the previously saved node ["this node"] from which to 647 * next node from which to allocate, handling wrap at end of node 692 * node ["this node"] from which to free a huge page. Advance the 693 * next node id whether or not we find a free huge page to free so 694 * that the next attempt to free addresses the next node 1647 hugetlb_unregister_node(struct node *node) argument 1689 hugetlb_register_node(struct node *node) argument 1727 struct node *node = &node_devices[nid]; local 1874 int node; local [all...] |
H A D | vmscan.c | 2524 * take care of from where we get pages. So the node where we start the 2525 * scan does not need to be the current node. 2567 * pgdat_balanced is used when checking if a node is balanced for high-order 2571 * for the node to be considered balanced. Forcing all zones to be balanced 2578 * would need to be at least 256M for it to be balance a whole node. 2580 * to balance a node on its own. These seemed like reasonable ratios. 2644 * For kswapd, balance_pgdat() will work across all this node's zones until 2868 * order-0: All zones must meet high watermark for a balanced node 2869 * high-order: Balanced zones must make up at least 25% of the node 2870 * for the node t 3609 scan_unevictable_register_node(struct node *node) argument 3614 scan_unevictable_unregister_node(struct node *node) argument [all...] |
H A D | slab.c | 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 281 unsigned int colour_next; /* Per-node cache coloring */ 283 struct array_cache *shared; /* shared per node */ 290 * Need this for bootstrapping a per node allocator. 301 int node); 624 * The lock annotation will be lost if all cpus of a node goes down and 662 static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) argument 669 int node; local 696 int node; local 710 slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) argument 881 int node; local 892 int node = __this_cpu_read(slab_reap_node); local 929 alloc_arraycache(int node, int entries, int batchcount, gfp_t gfp) argument 982 alloc_alien_cache(int node, int limit, gfp_t gfp) argument 1013 alloc_alien_cache(int node, int limit, gfp_t gfp) argument 1049 __drain_alien_cache(struct kmem_cache *cachep, struct array_cache *ac, int node) argument 1075 int node = __this_cpu_read(slab_reap_node); local 1110 int node; local 1150 init_cache_nodelists_node(int node) argument 1191 int node = cpu_to_mem(cpu); local 1256 int node = cpu_to_mem(cpu); local 1400 drain_cache_nodelists_node(int node) argument 1483 int node; local 1504 int node; local 2177 int node; local 2531 check_spinlock_acquired_node(struct kmem_cache *cachep, int node) argument 2554 int node = numa_mem_id(); local 2567 int node; local 3079 int node; local 3574 free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, int node) argument 3622 int node = numa_mem_id(); local 3772 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) argument 3783 __kmalloc_node(size_t size, gfp_t flags, int node) argument 3790 __kmalloc_node_track_caller(size_t size, gfp_t flags, int node, unsigned long caller) argument 3797 __kmalloc_node(size_t size, gfp_t flags, int node) argument 3917 int node; local 4120 drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, struct array_cache *ac, int force, int node) argument 4160 int node = numa_mem_id(); local 4268 int node; local 4515 int node; local [all...] |
H A D | memcontrol.c | 143 struct rb_node tree_node; /* RB tree node */ 564 int node, zone; local 568 for_each_node(node) { 570 mz = mem_cgroup_zoneinfo(memcg, node, zone); 571 mctz = soft_limit_tree_node_zone(node, zone); 591 * Remove the node now but someone else can add it back, 1479 * @nid: the node ID to be checked. 1483 * reclaimable pages on a node. Returns true if there are any reclaimable 1484 * pages in the node. 1502 * list or the wrong list here, we can start from some node an 1545 int node; local 3600 mem_cgroup_force_empty_list(struct mem_cgroup *memcg, int node, int zid, enum lru_list lru) argument 3663 int node, zid, shrink; local 4719 alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) argument 4751 free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) argument 4797 int node; local 4859 int tmp, node, zone; local 4895 int node; local [all...] |
H A D | page_alloc.c | 78 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 83 * Array of node states. 1569 * returns a pointer to the allowed node mask (either the current 1608 * 2) Check that the zones node (obtained from the zonelist_cache 1630 int n; /* node that zone *z is on */ 1827 * Large machines with many possible nodes should not always dump per-node 2214 * allowed per node queues are empty and that nodes are 2397 * of GFP_THISNODE and a memoryless node 2515 * pages on a node. 2516 * @nid: the preferred node I 2950 find_next_best_node(int node, nodemask_t *used_node_mask) argument 3002 build_zonelists_in_node_order(pg_data_t *pgdat, int node) argument 3040 int pos, j, node; local 3135 int j, node, load; local 3213 local_memory_node(int node) argument 3234 int node, local_node; local 3893 early_pfn_in_nid(unsigned long pfn, int node) argument 4405 unsigned int node; local [all...] |
H A D | slub.c | 3 * objects in per cpu and per node lists. 38 * 2. node->list_lock 59 * The list_lock protects the partial and full list on each node and 128 * - Variable sizing of the per node arrays 244 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) argument 246 return s->node[node]; 998 static inline unsigned long slabs_node(struct kmem_cache *s, int node) argument 1000 struct kmem_cache_node *n = get_node(s, node); 1010 static inline void inc_slabs_node(struct kmem_cache *s, int node, in argument 1025 dec_slabs_node(struct kmem_cache *s, int node, int objects) argument 1244 slabs_node(struct kmem_cache *s, int node) argument 1248 inc_slabs_node(struct kmem_cache *s, int node, int objects) argument 1250 dec_slabs_node(struct kmem_cache *s, int node, int objects) argument 1266 alloc_slab_page(gfp_t flags, int node, struct kmem_cache_order_objects oo) argument 1279 allocate_slab(struct kmem_cache *s, gfp_t flags, int node) argument 1350 new_slab(struct kmem_cache *s, gfp_t flags, int node) argument 1631 get_partial(struct kmem_cache *s, gfp_t flags, int node, struct kmem_cache_cpu *c) argument 2030 node_match(struct kmem_cache_cpu *c, int node) argument 2070 int node; local 2102 new_slab_objects(struct kmem_cache *s, gfp_t flags, int node, struct kmem_cache_cpu **pc) argument 2178 __slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr, struct kmem_cache_cpu *c) argument 2277 slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr) argument 2372 kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) argument 2384 kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) argument 2783 early_kmem_cache_node_alloc(int node) argument 2818 int node; local 2832 int node; local 3141 int node; local 3333 kmalloc_large_node(size_t size, gfp_t flags, int node) argument 3347 __kmalloc_node(size_t size, gfp_t flags, int node) argument 3463 int node; local 3647 int node; local 4014 __kmalloc_node_track_caller(size_t size, gfp_t gfpflags, int node, unsigned long caller) argument 4128 int node; local 4292 int node; local 4455 int node; local 4470 int node = ACCESS_ONCE(c->node); local 4547 int node; local 5429 int node; local [all...] |