Searched defs:nid (Results 1 - 24 of 24) sorted by relevance

/mm/
H A Dlist_lru.c15 int nid = page_to_nid(virt_to_page(item)); local
16 struct list_lru_node *nlru = &lru->node[nid];
23 node_set(nid, lru->active_nodes);
34 int nid = page_to_nid(virt_to_page(item)); local
35 struct list_lru_node *nlru = &lru->node[nid];
41 node_clear(nid, lru->active_nodes);
52 list_lru_count_node(struct list_lru *lru, int nid) argument
55 struct list_lru_node *nlru = &lru->node[nid];
67 list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, argument
71 struct list_lru_node *nlru = &lru->node[nid];
[all...]
H A Dmmzone.c19 int nid = next_online_node(pgdat->node_id); local
21 if (nid == MAX_NUMNODES)
23 return NODE_DATA(nid);
H A Dmm_init.c26 int nid; local
31 for_each_online_node(nid) {
32 pg_data_t *pgdat = NODE_DATA(nid);
51 listid > 0 ? "thisnode" : "general", nid,
134 unsigned long nid, unsigned long pfn)
136 BUG_ON(page_to_nid(page) != nid);
133 mminit_verify_page_links(struct page *page, enum zone_type zone, unsigned long nid, unsigned long pfn) argument
H A Dsparse-vmemmap.c179 struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) argument
189 if (vmemmap_populate(start, end, nid))
H A Dworkingset.c172 int zid, nid; local
177 nid = entry & ((1UL << NODES_SHIFT) - 1);
181 *zone = NODE_DATA(nid)->node_zones + zid;
278 shadow_nodes = list_lru_count_node(&workingset_shadow_nodes, sc->nid);
281 pages = node_present_pages(sc->nid);
379 ret = list_lru_walk_node(&workingset_shadow_nodes, sc->nid,
H A Dnobootmem.c35 static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, argument
44 addr = memblock_find_in_range_node(size, align, goal, limit, nid);
H A Doom_kill.c206 int nid; local
228 for_each_node_mask(nid, *nodemask)
229 *totalpages += node_spanned_pages(nid);
241 for_each_node_mask(nid, cpuset_current_mems_allowed)
242 *totalpages += node_spanned_pages(nid);
H A Dinternal.h342 enum zone_type zone, unsigned long nid, unsigned long pfn);
357 enum zone_type zone, unsigned long nid, unsigned long pfn)
356 mminit_verify_page_links(struct page *page, enum zone_type zone, unsigned long nid, unsigned long pfn) argument
H A Dpage_cgroup.c45 static int __init alloc_node_page_cgroup(int nid) argument
51 nr_pages = NODE_DATA(nid)->node_spanned_pages;
59 BOOTMEM_ALLOC_ACCESSIBLE, nid);
62 NODE_DATA(nid)->node_page_cgroup = base;
70 int nid, fail; local
75 for_each_online_node(nid) {
76 fail = alloc_node_page_cgroup(nid);
109 static void *__meminit alloc_page_cgroup(size_t size, int nid) argument
114 addr = alloc_pages_exact_nid(nid, size, flags);
120 if (node_state(nid, N_HIGH_MEMOR
128 init_section_page_cgroup(unsigned long pfn, int nid) argument
192 online_page_cgroup(unsigned long start_pfn, unsigned long nr_pages, int nid) argument
227 offline_page_cgroup(unsigned long start_pfn, unsigned long nr_pages, int nid) argument
274 int nid; local
[all...]
H A Dsparse.c51 static void set_section_nid(unsigned long section_nr, int nid) argument
53 section_to_node_table[section_nr] = nid;
56 static inline void set_section_nid(unsigned long section_nr, int nid) argument
62 static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) argument
69 if (node_state(nid, N_HIGH_MEMORY))
70 section = kzalloc_node(array_size, GFP_KERNEL, nid);
74 section = memblock_virt_alloc_node(array_size, nid);
80 static int __meminit sparse_index_init(unsigned long section_nr, int nid) argument
88 section = sparse_index_alloc(nid);
97 static inline int sparse_index_init(unsigned long section_nr, int nid) argument
133 sparse_encode_early_nid(int nid) argument
170 memory_present(int nid, unsigned long start, unsigned long end) argument
194 node_memmap_size_bytes(int nid, unsigned long start_pfn, unsigned long end_pfn) argument
269 int nid; local
294 check_usemap_section_nr(int nid, unsigned long *usemap) argument
340 check_usemap_section_nr(int nid, unsigned long *usemap) argument
372 sparse_mem_map_populate(unsigned long pnum, int nid) argument
453 int nid = sparse_early_nid(ms); local
598 kmalloc_section_memmap(unsigned long pnum, int nid) argument
641 kmalloc_section_memmap(unsigned long pnum, int nid) argument
[all...]
H A Dcompaction.c1449 static void compact_node(int nid) argument
1457 __compact_pgdat(NODE_DATA(nid), &cc);
1463 int nid; local
1468 for_each_online_node(nid)
1469 compact_node(nid);
1498 int nid = dev->id; local
1500 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1504 compact_node(nid);
[all...]
H A Dmemblock.c109 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
118 phys_addr_t size, phys_addr_t align, int nid)
123 for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) {
141 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
150 phys_addr_t size, phys_addr_t align, int nid)
155 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
176 * @nid: nid o
117 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align, int nid) argument
149 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align, int nid) argument
191 memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid) argument
465 memblock_insert_region(struct memblock_type *type, int idx, phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) argument
498 memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) argument
577 memblock_add_node(phys_addr_t base, phys_addr_t size, int nid) argument
697 memblock_reserve_region(phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) argument
795 __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid) argument
895 __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid) argument
981 __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, unsigned long *out_end_pfn, int *out_nid) argument
1022 memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid) argument
1040 memblock_alloc_range_nid(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid) argument
1067 memblock_alloc_base_nid(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr, int nid) argument
1074 memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) argument
1102 memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) argument
1138 memblock_virt_alloc_internal( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) argument
1219 memblock_virt_alloc_try_nid_nopanic( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) argument
1249 memblock_virt_alloc_try_nid( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) argument
[all...]
H A Dmemory-failure.c247 int nid = page_to_nid(p); local
252 node_set(nid, shrink.nodes_to_scan);
1467 int nid = page_to_nid(p); local
1470 nid);
1472 return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
H A Dmemory_hotplug.c329 int nid = zone->zone_pgdat->node_id; local
333 set_page_links(pfn_to_page(pfn), zid, nid, pfn); local
447 int nid = pgdat->node_id; local
462 memmap_init_zone(nr_pages, nid, zone_type,
467 static int __meminit __add_section(int nid, struct zone *zone, argument
485 return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
494 int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, argument
505 err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
523 static int find_smallest_section_pfn(int nid, struct zone *zone, argument
535 if (unlikely(pfn_to_nid(start_pfn) != nid))
548 find_biggest_section_pfn(int nid, struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) argument
583 int nid = zone_to_nid(zone); local
651 int nid = pgdat->node_id; local
891 int nid = zone_to_nid(zone); local
968 int nid; local
1081 hotadd_new_pgdat(int nid, u64 start) argument
1128 rollback_node_hotadd(int nid, pg_data_t *pgdat) argument
1141 try_online_node(int nid) argument
1193 should_add_memory_movable(int nid, u64 start, u64 size) argument
1208 zone_for_memory(int nid, u64 start, u64 size, int zone_default) argument
1217 add_memory(int nid, u64 start, u64 size) argument
1935 try_offline_node(int nid) argument
1999 remove_memory(int nid, u64 start, u64 size) argument
[all...]
H A Dmigrate.c1585 int nid = (int) data; local
1588 newpage = alloc_pages_exact_node(nid,
H A Dhuge_memory.c2253 static bool khugepaged_scan_abort(int nid) argument
2265 if (khugepaged_node_load[nid])
2271 if (node_distance(nid, i) > RECLAIM_DISTANCE)
2281 int nid, target_node = 0, max_value = 0; local
2284 for (nid = 0; nid < MAX_NUMNODES; nid++)
2285 if (khugepaged_node_load[nid] > max_value) {
2286 max_value = khugepaged_node_load[nid];
2287 target_node = nid;
[all...]
H A Dksm.c131 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
132 * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
145 int nid; member in struct:stable_node
153 * @nid: NUMA node id of unstable tree in which linked (may not match page)
166 int nid; /* when node of unstable tree */ member in union:rmap_item::__anon6
513 root_stable_tree + NUMA(stable_node->nid));
649 root_unstable_tree + NUMA(rmap_item->nid));
744 int nid; local
747 for (nid = 0; nid < ksm_nr_node_id
1153 int nid; local
1253 int nid; local
1329 int nid; local
1541 int nid; local
1997 int nid; local
[all...]
H A Dmempolicy.c496 int nid; local
509 nid = page_to_nid(page);
510 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
527 int nid; local
537 nid = page_to_nid(page);
538 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
1729 unsigned nid, next; local
1732 nid = me->il_next;
1733 next = next_node(nid, policy->v.nodes);
1738 return nid;
1794 int nid = NUMA_NO_NODE; local
1899 int nid; local
1976 alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) argument
2025 unsigned nid; local
2591 int nid, prefer = 0; local
[all...]
H A Dvmscan.c285 int nid = shrinkctl->nid; local
298 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
378 &shrinker->nr_deferred[nid]);
380 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
382 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
428 shrinkctl->nid = 0;
434 for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) {
435 if (node_online(shrinkctl->nid))
2811 int nid; local
3497 int nid; local
3518 kswapd_run(int nid) argument
3541 kswapd_stop(int nid) argument
3553 int nid; local
[all...]
H A Dhugetlb.c517 int nid = page_to_nid(page); local
518 list_move(&page->lru, &h->hugepage_freelists[nid]);
520 h->free_huge_pages_node[nid]++;
523 static struct page *dequeue_huge_page_node(struct hstate *h, int nid) argument
527 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
534 if (&h->hugepage_freelists[nid] == &page->lru)
539 h->free_huge_pages_node[nid]--;
616 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) argument
618 nid = next_node(nid, *nodes_allowe
626 get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) argument
642 int nid; local
660 int nid; local
744 alloc_gigantic_page(int nid, unsigned order) argument
782 alloc_fresh_gigantic_page_node(struct hstate *h, int nid) argument
865 int nid = page_to_nid(page); local
897 prep_new_huge_page(struct hstate *h, struct page *page, int nid) argument
983 alloc_fresh_huge_page_node(struct hstate *h, int nid) argument
1071 int nid = page_to_nid(page); local
1103 alloc_buddy_huge_page(struct hstate *h, int nid) argument
1185 alloc_huge_page_node(struct hstate *h, int nid) argument
1728 int nid; local
1739 __nr_hugepages_store_common(bool obey_mempolicy, struct hstate *h, int nid, unsigned long count, size_t len) argument
1787 int nid; local
1866 int nid; local
1891 int nid; local
1989 int nid; local
2036 int nid; local
2088 int nid; local
2349 hugetlb_report_node_meminfo(int nid, char *buf) argument
2366 int nid; local
3701 int nid = page_to_nid(hpage); local
3716 int nid = page_to_nid(hpage); local
[all...]
H A Dslab.c1313 int nid; local
1315 nid = mnb->status_change_nid;
1316 if (nid < 0)
1322 ret = init_cache_node_node(nid);
1327 ret = drain_cache_node_node(nid);
1448 int nid; local
1450 for_each_online_node(nid) {
1451 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1454 &init_kmem_cache_node[SIZE_NODE + nid], ni
2995 int nid; local
[all...]
H A Dmemcontrol.c672 int nid = zone_to_nid(zone); local
675 return &memcg->nodeinfo[nid]->zoneinfo[zid];
686 int nid = page_to_nid(page); local
689 return &memcg->nodeinfo[nid]->zoneinfo[zid];
693 soft_limit_tree_node_zone(int nid, int zid) argument
695 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
701 int nid = page_to_nid(page); local
704 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
798 int nid, zid; local
800 for_each_node(nid) {
939 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, int nid, unsigned int lru_mask) argument
966 int nid; local
1804 test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, int nid, bool noswap) argument
1826 int nid; local
1892 int nid; local
4346 int nid; local
4447 int nid, zid; local
[all...]
H A Dpage_alloc.c2937 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) argument
2944 page = alloc_pages_node(nid, gfp_mask, order);
3008 * @nid: the preferred node ID where memory should be allocated
3012 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
3017 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) argument
3020 struct page *p = alloc_pages_node(nid, gfp_mask, order);
3117 void si_meminfo_node(struct sysinfo *val, int nid) argument
3121 pg_data_t *pgdat = NODE_DATA(nid);
3126 val->sharedram = node_page_state(nid, NR_SHMEM);
3127 val->freeram = node_page_state(nid, NR_FREE_PAGE
3144 skip_free_areas_node(unsigned int flags, int nid) argument
3821 int nid; local
4102 memmap_init_zone(unsigned long size, int nid, unsigned long zone, unsigned long start_pfn, enum memmap_context context) argument
4421 int nid; local
4445 int nid; local
4457 int nid; local
4475 free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) argument
4498 sparse_memory_present_with_active_regions(int nid) argument
4518 get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn) argument
4567 adjust_zone_range_for_zone_movable(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *zone_start_pfn, unsigned long *zone_end_pfn) argument
4597 zone_spanned_pages_in_node(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *ignored) argument
4628 __absent_pages_in_range(int nid, unsigned long range_start_pfn, unsigned long range_end_pfn) argument
4658 zone_absent_pages_in_node(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *ignored) argument
4678 zone_spanned_pages_in_node(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *zones_size) argument
4687 zone_absent_pages_in_node(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *zholes_size) argument
4836 int nid = pgdat->node_id; local
4972 free_area_init_node(int nid, unsigned long *zones_size, unsigned long node_start_pfn, unsigned long *zholes_size) argument
5044 int i, nid; local
5071 find_min_pfn_for_node(int nid) argument
5109 int i, nid; local
5129 int i, nid; local
5290 check_for_memory(pg_data_t *pgdat, int nid) argument
5325 int i, nid; local
[all...]
H A Dslub.c2127 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) argument
2139 nid, gfpflags);
3460 int nid = marg->status_change_nid_normal; local
3467 if (nid < 0)
3488 s->node[nid] = n;

Completed in 230 milliseconds