memory_hotplug.c revision 180c06efce691f2b721dd0d965079827bdd7ee03
1/*
2 *  linux/mm/memory_hotplug.c
3 *
4 *  Copyright (C)
5 */
6
7#include <linux/stddef.h>
8#include <linux/mm.h>
9#include <linux/swap.h>
10#include <linux/interrupt.h>
11#include <linux/pagemap.h>
12#include <linux/bootmem.h>
13#include <linux/compiler.h>
14#include <linux/module.h>
15#include <linux/pagevec.h>
16#include <linux/writeback.h>
17#include <linux/slab.h>
18#include <linux/sysctl.h>
19#include <linux/cpu.h>
20#include <linux/memory.h>
21#include <linux/memory_hotplug.h>
22#include <linux/highmem.h>
23#include <linux/vmalloc.h>
24#include <linux/ioport.h>
25#include <linux/cpuset.h>
26#include <linux/delay.h>
27#include <linux/migrate.h>
28#include <linux/page-isolation.h>
29
30#include <asm/tlbflush.h>
31
32/* add this memory to iomem resource */
33static struct resource *register_memory_resource(u64 start, u64 size)
34{
35	struct resource *res;
36	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
37	BUG_ON(!res);
38
39	res->name = "System RAM";
40	res->start = start;
41	res->end = start + size - 1;
42	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
43	if (request_resource(&iomem_resource, res) < 0) {
44		printk("System RAM resource %llx - %llx cannot be added\n",
45		(unsigned long long)res->start, (unsigned long long)res->end);
46		kfree(res);
47		res = NULL;
48	}
49	return res;
50}
51
52static void release_memory_resource(struct resource *res)
53{
54	if (!res)
55		return;
56	release_resource(res);
57	kfree(res);
58	return;
59}
60
61
62#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
63static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
64{
65	struct pglist_data *pgdat = zone->zone_pgdat;
66	int nr_pages = PAGES_PER_SECTION;
67	int nid = pgdat->node_id;
68	int zone_type;
69
70	zone_type = zone - pgdat->node_zones;
71	if (!zone->wait_table) {
72		int ret = 0;
73		ret = init_currently_empty_zone(zone, phys_start_pfn,
74						nr_pages, MEMMAP_HOTPLUG);
75		if (ret < 0)
76			return ret;
77	}
78	memmap_init_zone(nr_pages, nid, zone_type,
79			 phys_start_pfn, MEMMAP_HOTPLUG);
80	return 0;
81}
82
83static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
84{
85	int nr_pages = PAGES_PER_SECTION;
86	int ret;
87
88	if (pfn_valid(phys_start_pfn))
89		return -EEXIST;
90
91	ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
92
93	if (ret < 0)
94		return ret;
95
96	ret = __add_zone(zone, phys_start_pfn);
97
98	if (ret < 0)
99		return ret;
100
101	return register_new_memory(__pfn_to_section(phys_start_pfn));
102}
103
104static int __remove_section(struct zone *zone, struct mem_section *ms)
105{
106	unsigned long flags;
107	struct pglist_data *pgdat = zone->zone_pgdat;
108	int ret = -EINVAL;
109
110	if (!valid_section(ms))
111		return ret;
112
113	ret = unregister_memory_section(ms);
114	if (ret)
115		return ret;
116
117	pgdat_resize_lock(pgdat, &flags);
118	sparse_remove_one_section(zone, ms);
119	pgdat_resize_unlock(pgdat, &flags);
120	return 0;
121}
122
123/*
124 * Reasonably generic function for adding memory.  It is
125 * expected that archs that support memory hotplug will
126 * call this function after deciding the zone to which to
127 * add the new pages.
128 */
129int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
130		 unsigned long nr_pages)
131{
132	unsigned long i;
133	int err = 0;
134	int start_sec, end_sec;
135	/* during initialize mem_map, align hot-added range to section */
136	start_sec = pfn_to_section_nr(phys_start_pfn);
137	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
138
139	for (i = start_sec; i <= end_sec; i++) {
140		err = __add_section(zone, i << PFN_SECTION_SHIFT);
141
142		/*
143		 * EEXIST is finally dealt with by ioresource collision
144		 * check. see add_memory() => register_memory_resource()
145		 * Warning will be printed if there is collision.
146		 */
147		if (err && (err != -EEXIST))
148			break;
149		err = 0;
150	}
151
152	return err;
153}
154EXPORT_SYMBOL_GPL(__add_pages);
155
156/**
157 * __remove_pages() - remove sections of pages from a zone
158 * @zone: zone from which pages need to be removed
159 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
160 * @nr_pages: number of pages to remove (must be multiple of section size)
161 *
162 * Generic helper function to remove section mappings and sysfs entries
163 * for the section of the memory we are removing. Caller needs to make
164 * sure that pages are marked reserved and zones are adjust properly by
165 * calling offline_pages().
166 */
167int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
168		 unsigned long nr_pages)
169{
170	unsigned long i, ret = 0;
171	int sections_to_remove;
172
173	/*
174	 * We can only remove entire sections
175	 */
176	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
177	BUG_ON(nr_pages % PAGES_PER_SECTION);
178
179	release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE);
180
181	sections_to_remove = nr_pages / PAGES_PER_SECTION;
182	for (i = 0; i < sections_to_remove; i++) {
183		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
184		ret = __remove_section(zone, __pfn_to_section(pfn));
185		if (ret)
186			break;
187	}
188	return ret;
189}
190EXPORT_SYMBOL_GPL(__remove_pages);
191
192static void grow_zone_span(struct zone *zone,
193		unsigned long start_pfn, unsigned long end_pfn)
194{
195	unsigned long old_zone_end_pfn;
196
197	zone_span_writelock(zone);
198
199	old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
200	if (start_pfn < zone->zone_start_pfn)
201		zone->zone_start_pfn = start_pfn;
202
203	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
204				zone->zone_start_pfn;
205
206	zone_span_writeunlock(zone);
207}
208
209static void grow_pgdat_span(struct pglist_data *pgdat,
210		unsigned long start_pfn, unsigned long end_pfn)
211{
212	unsigned long old_pgdat_end_pfn =
213		pgdat->node_start_pfn + pgdat->node_spanned_pages;
214
215	if (start_pfn < pgdat->node_start_pfn)
216		pgdat->node_start_pfn = start_pfn;
217
218	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
219					pgdat->node_start_pfn;
220}
221
222void online_page(struct page *page)
223{
224	totalram_pages++;
225	num_physpages++;
226
227#ifdef CONFIG_HIGHMEM
228	if (PageHighMem(page))
229		totalhigh_pages++;
230#endif
231
232#ifdef CONFIG_FLATMEM
233	max_mapnr = max(page_to_pfn(page), max_mapnr);
234#endif
235
236	ClearPageReserved(page);
237	init_page_count(page);
238	__free_page(page);
239}
240
241static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
242			void *arg)
243{
244	unsigned long i;
245	unsigned long onlined_pages = *(unsigned long *)arg;
246	struct page *page;
247	if (PageReserved(pfn_to_page(start_pfn)))
248		for (i = 0; i < nr_pages; i++) {
249			page = pfn_to_page(start_pfn + i);
250			online_page(page);
251			onlined_pages++;
252		}
253	*(unsigned long *)arg = onlined_pages;
254	return 0;
255}
256
257
258int online_pages(unsigned long pfn, unsigned long nr_pages)
259{
260	unsigned long flags;
261	unsigned long onlined_pages = 0;
262	struct zone *zone;
263	int need_zonelists_rebuild = 0;
264	int nid;
265	int ret;
266	struct memory_notify arg;
267
268	arg.start_pfn = pfn;
269	arg.nr_pages = nr_pages;
270	arg.status_change_nid = -1;
271
272	nid = page_to_nid(pfn_to_page(pfn));
273	if (node_present_pages(nid) == 0)
274		arg.status_change_nid = nid;
275
276	ret = memory_notify(MEM_GOING_ONLINE, &arg);
277	ret = notifier_to_errno(ret);
278	if (ret) {
279		memory_notify(MEM_CANCEL_ONLINE, &arg);
280		return ret;
281	}
282	/*
283	 * This doesn't need a lock to do pfn_to_page().
284	 * The section can't be removed here because of the
285	 * memory_block->state_mutex.
286	 */
287	zone = page_zone(pfn_to_page(pfn));
288	pgdat_resize_lock(zone->zone_pgdat, &flags);
289	grow_zone_span(zone, pfn, pfn + nr_pages);
290	grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
291	pgdat_resize_unlock(zone->zone_pgdat, &flags);
292
293	/*
294	 * If this zone is not populated, then it is not in zonelist.
295	 * This means the page allocator ignores this zone.
296	 * So, zonelist must be updated after online.
297	 */
298	if (!populated_zone(zone))
299		need_zonelists_rebuild = 1;
300
301	walk_memory_resource(pfn, nr_pages, &onlined_pages,
302		online_pages_range);
303	zone->present_pages += onlined_pages;
304	zone->zone_pgdat->node_present_pages += onlined_pages;
305
306	setup_per_zone_pages_min();
307	if (onlined_pages) {
308		kswapd_run(zone_to_nid(zone));
309		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
310	}
311
312	if (need_zonelists_rebuild)
313		build_all_zonelists();
314	vm_total_pages = nr_free_pagecache_pages();
315	writeback_set_ratelimit();
316
317	if (onlined_pages)
318		memory_notify(MEM_ONLINE, &arg);
319
320	return 0;
321}
322#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
323
324static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
325{
326	struct pglist_data *pgdat;
327	unsigned long zones_size[MAX_NR_ZONES] = {0};
328	unsigned long zholes_size[MAX_NR_ZONES] = {0};
329	unsigned long start_pfn = start >> PAGE_SHIFT;
330
331	pgdat = arch_alloc_nodedata(nid);
332	if (!pgdat)
333		return NULL;
334
335	arch_refresh_nodedata(nid, pgdat);
336
337	/* we can use NODE_DATA(nid) from here */
338
339	/* init node's zones as empty zones, we don't have any present pages.*/
340	free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
341
342	return pgdat;
343}
344
345static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
346{
347	arch_refresh_nodedata(nid, NULL);
348	arch_free_nodedata(pgdat);
349	return;
350}
351
352
353int add_memory(int nid, u64 start, u64 size)
354{
355	pg_data_t *pgdat = NULL;
356	int new_pgdat = 0;
357	struct resource *res;
358	int ret;
359
360	res = register_memory_resource(start, size);
361	if (!res)
362		return -EEXIST;
363
364	if (!node_online(nid)) {
365		pgdat = hotadd_new_pgdat(nid, start);
366		if (!pgdat)
367			return -ENOMEM;
368		new_pgdat = 1;
369	}
370
371	/* call arch's memory hotadd */
372	ret = arch_add_memory(nid, start, size);
373
374	if (ret < 0)
375		goto error;
376
377	/* we online node here. we can't roll back from here. */
378	node_set_online(nid);
379
380	cpuset_track_online_nodes();
381
382	if (new_pgdat) {
383		ret = register_one_node(nid);
384		/*
385		 * If sysfs file of new node can't create, cpu on the node
386		 * can't be hot-added. There is no rollback way now.
387		 * So, check by BUG_ON() to catch it reluctantly..
388		 */
389		BUG_ON(ret);
390	}
391
392	return ret;
393error:
394	/* rollback pgdat allocation and others */
395	if (new_pgdat)
396		rollback_node_hotadd(nid, pgdat);
397	if (res)
398		release_memory_resource(res);
399
400	return ret;
401}
402EXPORT_SYMBOL_GPL(add_memory);
403
404#ifdef CONFIG_MEMORY_HOTREMOVE
405/*
406 * Confirm all pages in a range [start, end) is belongs to the same zone.
407 */
408static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
409{
410	unsigned long pfn;
411	struct zone *zone = NULL;
412	struct page *page;
413	int i;
414	for (pfn = start_pfn;
415	     pfn < end_pfn;
416	     pfn += MAX_ORDER_NR_PAGES) {
417		i = 0;
418		/* This is just a CONFIG_HOLES_IN_ZONE check.*/
419		while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
420			i++;
421		if (i == MAX_ORDER_NR_PAGES)
422			continue;
423		page = pfn_to_page(pfn + i);
424		if (zone && page_zone(page) != zone)
425			return 0;
426		zone = page_zone(page);
427	}
428	return 1;
429}
430
431/*
432 * Scanning pfn is much easier than scanning lru list.
433 * Scan pfn from start to end and Find LRU page.
434 */
435int scan_lru_pages(unsigned long start, unsigned long end)
436{
437	unsigned long pfn;
438	struct page *page;
439	for (pfn = start; pfn < end; pfn++) {
440		if (pfn_valid(pfn)) {
441			page = pfn_to_page(pfn);
442			if (PageLRU(page))
443				return pfn;
444		}
445	}
446	return 0;
447}
448
449static struct page *
450hotremove_migrate_alloc(struct page *page,
451			unsigned long private,
452			int **x)
453{
454	/* This should be improoooooved!! */
455	return alloc_page(GFP_HIGHUSER_PAGECACHE);
456}
457
458
459#define NR_OFFLINE_AT_ONCE_PAGES	(256)
460static int
461do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
462{
463	unsigned long pfn;
464	struct page *page;
465	int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
466	int not_managed = 0;
467	int ret = 0;
468	LIST_HEAD(source);
469
470	for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
471		if (!pfn_valid(pfn))
472			continue;
473		page = pfn_to_page(pfn);
474		if (!page_count(page))
475			continue;
476		/*
477		 * We can skip free pages. And we can only deal with pages on
478		 * LRU.
479		 */
480		ret = isolate_lru_page(page, &source);
481		if (!ret) { /* Success */
482			move_pages--;
483		} else {
484			/* Becasue we don't have big zone->lock. we should
485			   check this again here. */
486			if (page_count(page))
487				not_managed++;
488#ifdef CONFIG_DEBUG_VM
489			printk(KERN_INFO "removing from LRU failed"
490					 " %lx/%d/%lx\n",
491				pfn, page_count(page), page->flags);
492#endif
493		}
494	}
495	ret = -EBUSY;
496	if (not_managed) {
497		if (!list_empty(&source))
498			putback_lru_pages(&source);
499		goto out;
500	}
501	ret = 0;
502	if (list_empty(&source))
503		goto out;
504	/* this function returns # of failed pages */
505	ret = migrate_pages(&source, hotremove_migrate_alloc, 0);
506
507out:
508	return ret;
509}
510
511/*
512 * remove from free_area[] and mark all as Reserved.
513 */
514static int
515offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
516			void *data)
517{
518	__offline_isolated_pages(start, start + nr_pages);
519	return 0;
520}
521
522static void
523offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
524{
525	walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL,
526				offline_isolated_pages_cb);
527}
528
529/*
530 * Check all pages in range, recoreded as memory resource, are isolated.
531 */
532static int
533check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
534			void *data)
535{
536	int ret;
537	long offlined = *(long *)data;
538	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
539	offlined = nr_pages;
540	if (!ret)
541		*(long *)data += offlined;
542	return ret;
543}
544
545static long
546check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
547{
548	long offlined = 0;
549	int ret;
550
551	ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined,
552			check_pages_isolated_cb);
553	if (ret < 0)
554		offlined = (long)ret;
555	return offlined;
556}
557
558int offline_pages(unsigned long start_pfn,
559		  unsigned long end_pfn, unsigned long timeout)
560{
561	unsigned long pfn, nr_pages, expire;
562	long offlined_pages;
563	int ret, drain, retry_max, node;
564	struct zone *zone;
565	struct memory_notify arg;
566
567	BUG_ON(start_pfn >= end_pfn);
568	/* at least, alignment against pageblock is necessary */
569	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
570		return -EINVAL;
571	if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
572		return -EINVAL;
573	/* This makes hotplug much easier...and readable.
574	   we assume this for now. .*/
575	if (!test_pages_in_a_zone(start_pfn, end_pfn))
576		return -EINVAL;
577
578	zone = page_zone(pfn_to_page(start_pfn));
579	node = zone_to_nid(zone);
580	nr_pages = end_pfn - start_pfn;
581
582	/* set above range as isolated */
583	ret = start_isolate_page_range(start_pfn, end_pfn);
584	if (ret)
585		return ret;
586
587	arg.start_pfn = start_pfn;
588	arg.nr_pages = nr_pages;
589	arg.status_change_nid = -1;
590	if (nr_pages >= node_present_pages(node))
591		arg.status_change_nid = node;
592
593	ret = memory_notify(MEM_GOING_OFFLINE, &arg);
594	ret = notifier_to_errno(ret);
595	if (ret)
596		goto failed_removal;
597
598	pfn = start_pfn;
599	expire = jiffies + timeout;
600	drain = 0;
601	retry_max = 5;
602repeat:
603	/* start memory hot removal */
604	ret = -EAGAIN;
605	if (time_after(jiffies, expire))
606		goto failed_removal;
607	ret = -EINTR;
608	if (signal_pending(current))
609		goto failed_removal;
610	ret = 0;
611	if (drain) {
612		lru_add_drain_all();
613		flush_scheduled_work();
614		cond_resched();
615		drain_all_pages();
616	}
617
618	pfn = scan_lru_pages(start_pfn, end_pfn);
619	if (pfn) { /* We have page on LRU */
620		ret = do_migrate_range(pfn, end_pfn);
621		if (!ret) {
622			drain = 1;
623			goto repeat;
624		} else {
625			if (ret < 0)
626				if (--retry_max == 0)
627					goto failed_removal;
628			yield();
629			drain = 1;
630			goto repeat;
631		}
632	}
633	/* drain all zone's lru pagevec, this is asyncronous... */
634	lru_add_drain_all();
635	flush_scheduled_work();
636	yield();
637	/* drain pcp pages , this is synchrouns. */
638	drain_all_pages();
639	/* check again */
640	offlined_pages = check_pages_isolated(start_pfn, end_pfn);
641	if (offlined_pages < 0) {
642		ret = -EBUSY;
643		goto failed_removal;
644	}
645	printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
646	/* Ok, all of our target is islaoted.
647	   We cannot do rollback at this point. */
648	offline_isolated_pages(start_pfn, end_pfn);
649	/* reset pagetype flags and makes migrate type to be MOVABLE */
650	undo_isolate_page_range(start_pfn, end_pfn);
651	/* removal success */
652	zone->present_pages -= offlined_pages;
653	zone->zone_pgdat->node_present_pages -= offlined_pages;
654	totalram_pages -= offlined_pages;
655	num_physpages -= offlined_pages;
656
657	vm_total_pages = nr_free_pagecache_pages();
658	writeback_set_ratelimit();
659
660	memory_notify(MEM_OFFLINE, &arg);
661	return 0;
662
663failed_removal:
664	printk(KERN_INFO "memory offlining %lx to %lx failed\n",
665		start_pfn, end_pfn);
666	memory_notify(MEM_CANCEL_OFFLINE, &arg);
667	/* pushback to free area */
668	undo_isolate_page_range(start_pfn, end_pfn);
669
670	return ret;
671}
672#else
673int remove_memory(u64 start, u64 size)
674{
675	return -EINVAL;
676}
677EXPORT_SYMBOL_GPL(remove_memory);
678#endif /* CONFIG_MEMORY_HOTREMOVE */
679