memory_hotplug.c revision 6811378e7d8b9aa4fca2a1ca73d24c9d67c9cb12
1/* 2 * linux/mm/memory_hotplug.c 3 * 4 * Copyright (C) 5 */ 6 7#include <linux/config.h> 8#include <linux/stddef.h> 9#include <linux/mm.h> 10#include <linux/swap.h> 11#include <linux/interrupt.h> 12#include <linux/pagemap.h> 13#include <linux/bootmem.h> 14#include <linux/compiler.h> 15#include <linux/module.h> 16#include <linux/pagevec.h> 17#include <linux/slab.h> 18#include <linux/sysctl.h> 19#include <linux/cpu.h> 20#include <linux/memory.h> 21#include <linux/memory_hotplug.h> 22#include <linux/highmem.h> 23#include <linux/vmalloc.h> 24 25#include <asm/tlbflush.h> 26 27extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, 28 unsigned long size); 29static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) 30{ 31 struct pglist_data *pgdat = zone->zone_pgdat; 32 int nr_pages = PAGES_PER_SECTION; 33 int nid = pgdat->node_id; 34 int zone_type; 35 36 zone_type = zone - pgdat->node_zones; 37 if (!populated_zone(zone)) { 38 int ret = 0; 39 ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages); 40 if (ret < 0) 41 return ret; 42 } 43 memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn); 44 zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages); 45 return 0; 46} 47 48extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 49 int nr_pages); 50static int __add_section(struct zone *zone, unsigned long phys_start_pfn) 51{ 52 int nr_pages = PAGES_PER_SECTION; 53 int ret; 54 55 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); 56 57 if (ret < 0) 58 return ret; 59 60 ret = __add_zone(zone, phys_start_pfn); 61 62 if (ret < 0) 63 return ret; 64 65 return register_new_memory(__pfn_to_section(phys_start_pfn)); 66} 67 68/* 69 * Reasonably generic function for adding memory. It is 70 * expected that archs that support memory hotplug will 71 * call this function after deciding the zone to which to 72 * add the new pages. 73 */ 74int __add_pages(struct zone *zone, unsigned long phys_start_pfn, 75 unsigned long nr_pages) 76{ 77 unsigned long i; 78 int err = 0; 79 80 for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { 81 err = __add_section(zone, phys_start_pfn + i); 82 83 /* We want to keep adding the rest of the 84 * sections if the first ones already exist 85 */ 86 if (err && (err != -EEXIST)) 87 break; 88 } 89 90 return err; 91} 92EXPORT_SYMBOL_GPL(__add_pages); 93 94static void grow_zone_span(struct zone *zone, 95 unsigned long start_pfn, unsigned long end_pfn) 96{ 97 unsigned long old_zone_end_pfn; 98 99 zone_span_writelock(zone); 100 101 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 102 if (start_pfn < zone->zone_start_pfn) 103 zone->zone_start_pfn = start_pfn; 104 105 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - 106 zone->zone_start_pfn; 107 108 zone_span_writeunlock(zone); 109} 110 111static void grow_pgdat_span(struct pglist_data *pgdat, 112 unsigned long start_pfn, unsigned long end_pfn) 113{ 114 unsigned long old_pgdat_end_pfn = 115 pgdat->node_start_pfn + pgdat->node_spanned_pages; 116 117 if (start_pfn < pgdat->node_start_pfn) 118 pgdat->node_start_pfn = start_pfn; 119 120 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - 121 pgdat->node_start_pfn; 122} 123 124int online_pages(unsigned long pfn, unsigned long nr_pages) 125{ 126 unsigned long i; 127 unsigned long flags; 128 unsigned long onlined_pages = 0; 129 struct zone *zone; 130 int need_zonelists_rebuild = 0; 131 132 /* 133 * This doesn't need a lock to do pfn_to_page(). 134 * The section can't be removed here because of the 135 * memory_block->state_sem. 136 */ 137 zone = page_zone(pfn_to_page(pfn)); 138 pgdat_resize_lock(zone->zone_pgdat, &flags); 139 grow_zone_span(zone, pfn, pfn + nr_pages); 140 grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); 141 pgdat_resize_unlock(zone->zone_pgdat, &flags); 142 143 /* 144 * If this zone is not populated, then it is not in zonelist. 145 * This means the page allocator ignores this zone. 146 * So, zonelist must be updated after online. 147 */ 148 if (!populated_zone(zone)) 149 need_zonelists_rebuild = 1; 150 151 for (i = 0; i < nr_pages; i++) { 152 struct page *page = pfn_to_page(pfn + i); 153 online_page(page); 154 onlined_pages++; 155 } 156 zone->present_pages += onlined_pages; 157 zone->zone_pgdat->node_present_pages += onlined_pages; 158 159 setup_per_zone_pages_min(); 160 161 if (need_zonelists_rebuild) 162 build_all_zonelists(); 163 164 return 0; 165} 166