migrate.c revision 98837c7f82ef78aa38f40462aa2fcac68fd3acbf
1/*
2 * Memory Migration functionality - linux/mm/migration.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter <clameter@sgi.com>
13 */
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
18#include <linux/swapops.h>
19#include <linux/pagemap.h>
20#include <linux/buffer_head.h>
21#include <linux/mm_inline.h>
22#include <linux/nsproxy.h>
23#include <linux/pagevec.h>
24#include <linux/rmap.h>
25#include <linux/topology.h>
26#include <linux/cpu.h>
27#include <linux/cpuset.h>
28#include <linux/writeback.h>
29#include <linux/mempolicy.h>
30#include <linux/vmalloc.h>
31#include <linux/security.h>
32#include <linux/memcontrol.h>
33
34#include "internal.h"
35
36#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
37
38/*
39 * Isolate one page from the LRU lists. If successful put it onto
40 * the indicated list with elevated page count.
41 *
42 * Result:
43 *  -EBUSY: page not on LRU list
44 *  0: page removed from LRU list and added to the specified list.
45 */
46int isolate_lru_page(struct page *page, struct list_head *pagelist)
47{
48	int ret = -EBUSY;
49
50	if (PageLRU(page)) {
51		struct zone *zone = page_zone(page);
52
53		spin_lock_irq(&zone->lru_lock);
54		if (PageLRU(page) && get_page_unless_zero(page)) {
55			ret = 0;
56			ClearPageLRU(page);
57			if (PageActive(page))
58				del_page_from_active_list(zone, page);
59			else
60				del_page_from_inactive_list(zone, page);
61			list_add_tail(&page->lru, pagelist);
62		}
63		spin_unlock_irq(&zone->lru_lock);
64	}
65	return ret;
66}
67
68/*
69 * migrate_prep() needs to be called before we start compiling a list of pages
70 * to be migrated using isolate_lru_page().
71 */
72int migrate_prep(void)
73{
74	/*
75	 * Clear the LRU lists so pages can be isolated.
76	 * Note that pages may be moved off the LRU after we have
77	 * drained them. Those pages will fail to migrate like other
78	 * pages that may be busy.
79	 */
80	lru_add_drain_all();
81
82	return 0;
83}
84
85static inline void move_to_lru(struct page *page)
86{
87	if (PageActive(page)) {
88		/*
89		 * lru_cache_add_active checks that
90		 * the PG_active bit is off.
91		 */
92		ClearPageActive(page);
93		lru_cache_add_active(page);
94	} else {
95		lru_cache_add(page);
96	}
97	put_page(page);
98}
99
100/*
101 * Add isolated pages on the list back to the LRU.
102 *
103 * returns the number of pages put back.
104 */
105int putback_lru_pages(struct list_head *l)
106{
107	struct page *page;
108	struct page *page2;
109	int count = 0;
110
111	list_for_each_entry_safe(page, page2, l, lru) {
112		list_del(&page->lru);
113		move_to_lru(page);
114		count++;
115	}
116	return count;
117}
118
119/*
120 * Restore a potential migration pte to a working pte entry
121 */
122static void remove_migration_pte(struct vm_area_struct *vma,
123		struct page *old, struct page *new)
124{
125	struct mm_struct *mm = vma->vm_mm;
126	swp_entry_t entry;
127 	pgd_t *pgd;
128 	pud_t *pud;
129 	pmd_t *pmd;
130	pte_t *ptep, pte;
131 	spinlock_t *ptl;
132	unsigned long addr = page_address_in_vma(new, vma);
133
134	if (addr == -EFAULT)
135		return;
136
137 	pgd = pgd_offset(mm, addr);
138	if (!pgd_present(*pgd))
139                return;
140
141	pud = pud_offset(pgd, addr);
142	if (!pud_present(*pud))
143                return;
144
145	pmd = pmd_offset(pud, addr);
146	if (!pmd_present(*pmd))
147		return;
148
149	ptep = pte_offset_map(pmd, addr);
150
151	if (!is_swap_pte(*ptep)) {
152		pte_unmap(ptep);
153 		return;
154 	}
155
156 	ptl = pte_lockptr(mm, pmd);
157 	spin_lock(ptl);
158	pte = *ptep;
159	if (!is_swap_pte(pte))
160		goto out;
161
162	entry = pte_to_swp_entry(pte);
163
164	if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
165		goto out;
166
167	/*
168	 * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge.
169	 * Failure is not an option here: we're now expected to remove every
170	 * migration pte, and will cause crashes otherwise.  Normally this
171	 * is not an issue: mem_cgroup_prepare_migration bumped up the old
172	 * page_cgroup count for safety, that's now attached to the new page,
173	 * so this charge should just be another incrementation of the count,
174	 * to keep in balance with rmap.c's mem_cgroup_uncharging.  But if
175	 * there's been a force_empty, those reference counts may no longer
176	 * be reliable, and this charge can actually fail: oh well, we don't
177	 * make the situation any worse by proceeding as if it had succeeded.
178	 */
179	mem_cgroup_charge(new, mm, GFP_ATOMIC);
180
181	get_page(new);
182	pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
183	if (is_write_migration_entry(entry))
184		pte = pte_mkwrite(pte);
185	flush_cache_page(vma, addr, pte_pfn(pte));
186	set_pte_at(mm, addr, ptep, pte);
187
188	if (PageAnon(new))
189		page_add_anon_rmap(new, vma, addr);
190	else
191		page_add_file_rmap(new);
192
193	/* No need to invalidate - it was non-present before */
194	update_mmu_cache(vma, addr, pte);
195
196out:
197	pte_unmap_unlock(ptep, ptl);
198}
199
200/*
201 * Note that remove_file_migration_ptes will only work on regular mappings,
202 * Nonlinear mappings do not use migration entries.
203 */
204static void remove_file_migration_ptes(struct page *old, struct page *new)
205{
206	struct vm_area_struct *vma;
207	struct address_space *mapping = page_mapping(new);
208	struct prio_tree_iter iter;
209	pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
210
211	if (!mapping)
212		return;
213
214	spin_lock(&mapping->i_mmap_lock);
215
216	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
217		remove_migration_pte(vma, old, new);
218
219	spin_unlock(&mapping->i_mmap_lock);
220}
221
222/*
223 * Must hold mmap_sem lock on at least one of the vmas containing
224 * the page so that the anon_vma cannot vanish.
225 */
226static void remove_anon_migration_ptes(struct page *old, struct page *new)
227{
228	struct anon_vma *anon_vma;
229	struct vm_area_struct *vma;
230	unsigned long mapping;
231
232	mapping = (unsigned long)new->mapping;
233
234	if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
235		return;
236
237	/*
238	 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
239	 */
240	anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
241	spin_lock(&anon_vma->lock);
242
243	list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
244		remove_migration_pte(vma, old, new);
245
246	spin_unlock(&anon_vma->lock);
247}
248
249/*
250 * Get rid of all migration entries and replace them by
251 * references to the indicated page.
252 */
253static void remove_migration_ptes(struct page *old, struct page *new)
254{
255	if (PageAnon(new))
256		remove_anon_migration_ptes(old, new);
257	else
258		remove_file_migration_ptes(old, new);
259}
260
261/*
262 * Something used the pte of a page under migration. We need to
263 * get to the page and wait until migration is finished.
264 * When we return from this function the fault will be retried.
265 *
266 * This function is called from do_swap_page().
267 */
268void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
269				unsigned long address)
270{
271	pte_t *ptep, pte;
272	spinlock_t *ptl;
273	swp_entry_t entry;
274	struct page *page;
275
276	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
277	pte = *ptep;
278	if (!is_swap_pte(pte))
279		goto out;
280
281	entry = pte_to_swp_entry(pte);
282	if (!is_migration_entry(entry))
283		goto out;
284
285	page = migration_entry_to_page(entry);
286
287	get_page(page);
288	pte_unmap_unlock(ptep, ptl);
289	wait_on_page_locked(page);
290	put_page(page);
291	return;
292out:
293	pte_unmap_unlock(ptep, ptl);
294}
295
296/*
297 * Replace the page in the mapping.
298 *
299 * The number of remaining references must be:
300 * 1 for anonymous pages without a mapping
301 * 2 for pages with a mapping
302 * 3 for pages with a mapping and PagePrivate set.
303 */
304static int migrate_page_move_mapping(struct address_space *mapping,
305		struct page *newpage, struct page *page)
306{
307	void **pslot;
308
309	if (!mapping) {
310		/* Anonymous page without mapping */
311		if (page_count(page) != 1)
312			return -EAGAIN;
313		return 0;
314	}
315
316	write_lock_irq(&mapping->tree_lock);
317
318	pslot = radix_tree_lookup_slot(&mapping->page_tree,
319 					page_index(page));
320
321	if (page_count(page) != 2 + !!PagePrivate(page) ||
322			(struct page *)radix_tree_deref_slot(pslot) != page) {
323		write_unlock_irq(&mapping->tree_lock);
324		return -EAGAIN;
325	}
326
327	/*
328	 * Now we know that no one else is looking at the page.
329	 */
330	get_page(newpage);	/* add cache reference */
331#ifdef CONFIG_SWAP
332	if (PageSwapCache(page)) {
333		SetPageSwapCache(newpage);
334		set_page_private(newpage, page_private(page));
335	}
336#endif
337
338	radix_tree_replace_slot(pslot, newpage);
339
340	/*
341	 * Drop cache reference from old page.
342	 * We know this isn't the last reference.
343	 */
344	__put_page(page);
345
346	/*
347	 * If moved to a different zone then also account
348	 * the page for that zone. Other VM counters will be
349	 * taken care of when we establish references to the
350	 * new page and drop references to the old page.
351	 *
352	 * Note that anonymous pages are accounted for
353	 * via NR_FILE_PAGES and NR_ANON_PAGES if they
354	 * are mapped to swap space.
355	 */
356	__dec_zone_page_state(page, NR_FILE_PAGES);
357	__inc_zone_page_state(newpage, NR_FILE_PAGES);
358
359	write_unlock_irq(&mapping->tree_lock);
360
361	return 0;
362}
363
364/*
365 * Copy the page to its new location
366 */
367static void migrate_page_copy(struct page *newpage, struct page *page)
368{
369	copy_highpage(newpage, page);
370
371	if (PageError(page))
372		SetPageError(newpage);
373	if (PageReferenced(page))
374		SetPageReferenced(newpage);
375	if (PageUptodate(page))
376		SetPageUptodate(newpage);
377	if (PageActive(page))
378		SetPageActive(newpage);
379	if (PageChecked(page))
380		SetPageChecked(newpage);
381	if (PageMappedToDisk(page))
382		SetPageMappedToDisk(newpage);
383
384	if (PageDirty(page)) {
385		clear_page_dirty_for_io(page);
386		set_page_dirty(newpage);
387 	}
388
389#ifdef CONFIG_SWAP
390	ClearPageSwapCache(page);
391#endif
392	ClearPageActive(page);
393	ClearPagePrivate(page);
394	set_page_private(page, 0);
395	page->mapping = NULL;
396
397	/*
398	 * If any waiters have accumulated on the new page then
399	 * wake them up.
400	 */
401	if (PageWriteback(newpage))
402		end_page_writeback(newpage);
403}
404
405/************************************************************
406 *                    Migration functions
407 ***********************************************************/
408
409/* Always fail migration. Used for mappings that are not movable */
410int fail_migrate_page(struct address_space *mapping,
411			struct page *newpage, struct page *page)
412{
413	return -EIO;
414}
415EXPORT_SYMBOL(fail_migrate_page);
416
417/*
418 * Common logic to directly migrate a single page suitable for
419 * pages that do not use PagePrivate.
420 *
421 * Pages are locked upon entry and exit.
422 */
423int migrate_page(struct address_space *mapping,
424		struct page *newpage, struct page *page)
425{
426	int rc;
427
428	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
429
430	rc = migrate_page_move_mapping(mapping, newpage, page);
431
432	if (rc)
433		return rc;
434
435	migrate_page_copy(newpage, page);
436	return 0;
437}
438EXPORT_SYMBOL(migrate_page);
439
440#ifdef CONFIG_BLOCK
441/*
442 * Migration function for pages with buffers. This function can only be used
443 * if the underlying filesystem guarantees that no other references to "page"
444 * exist.
445 */
446int buffer_migrate_page(struct address_space *mapping,
447		struct page *newpage, struct page *page)
448{
449	struct buffer_head *bh, *head;
450	int rc;
451
452	if (!page_has_buffers(page))
453		return migrate_page(mapping, newpage, page);
454
455	head = page_buffers(page);
456
457	rc = migrate_page_move_mapping(mapping, newpage, page);
458
459	if (rc)
460		return rc;
461
462	bh = head;
463	do {
464		get_bh(bh);
465		lock_buffer(bh);
466		bh = bh->b_this_page;
467
468	} while (bh != head);
469
470	ClearPagePrivate(page);
471	set_page_private(newpage, page_private(page));
472	set_page_private(page, 0);
473	put_page(page);
474	get_page(newpage);
475
476	bh = head;
477	do {
478		set_bh_page(bh, newpage, bh_offset(bh));
479		bh = bh->b_this_page;
480
481	} while (bh != head);
482
483	SetPagePrivate(newpage);
484
485	migrate_page_copy(newpage, page);
486
487	bh = head;
488	do {
489		unlock_buffer(bh);
490 		put_bh(bh);
491		bh = bh->b_this_page;
492
493	} while (bh != head);
494
495	return 0;
496}
497EXPORT_SYMBOL(buffer_migrate_page);
498#endif
499
500/*
501 * Writeback a page to clean the dirty state
502 */
503static int writeout(struct address_space *mapping, struct page *page)
504{
505	struct writeback_control wbc = {
506		.sync_mode = WB_SYNC_NONE,
507		.nr_to_write = 1,
508		.range_start = 0,
509		.range_end = LLONG_MAX,
510		.nonblocking = 1,
511		.for_reclaim = 1
512	};
513	int rc;
514
515	if (!mapping->a_ops->writepage)
516		/* No write method for the address space */
517		return -EINVAL;
518
519	if (!clear_page_dirty_for_io(page))
520		/* Someone else already triggered a write */
521		return -EAGAIN;
522
523	/*
524	 * A dirty page may imply that the underlying filesystem has
525	 * the page on some queue. So the page must be clean for
526	 * migration. Writeout may mean we loose the lock and the
527	 * page state is no longer what we checked for earlier.
528	 * At this point we know that the migration attempt cannot
529	 * be successful.
530	 */
531	remove_migration_ptes(page, page);
532
533	rc = mapping->a_ops->writepage(page, &wbc);
534	if (rc < 0)
535		/* I/O Error writing */
536		return -EIO;
537
538	if (rc != AOP_WRITEPAGE_ACTIVATE)
539		/* unlocked. Relock */
540		lock_page(page);
541
542	return -EAGAIN;
543}
544
545/*
546 * Default handling if a filesystem does not provide a migration function.
547 */
548static int fallback_migrate_page(struct address_space *mapping,
549	struct page *newpage, struct page *page)
550{
551	if (PageDirty(page))
552		return writeout(mapping, page);
553
554	/*
555	 * Buffers may be managed in a filesystem specific way.
556	 * We must have no buffers or drop them.
557	 */
558	if (PagePrivate(page) &&
559	    !try_to_release_page(page, GFP_KERNEL))
560		return -EAGAIN;
561
562	return migrate_page(mapping, newpage, page);
563}
564
565/*
566 * Move a page to a newly allocated page
567 * The page is locked and all ptes have been successfully removed.
568 *
569 * The new page will have replaced the old page if this function
570 * is successful.
571 */
572static int move_to_new_page(struct page *newpage, struct page *page)
573{
574	struct address_space *mapping;
575	int rc;
576
577	/*
578	 * Block others from accessing the page when we get around to
579	 * establishing additional references. We are the only one
580	 * holding a reference to the new page at this point.
581	 */
582	if (TestSetPageLocked(newpage))
583		BUG();
584
585	/* Prepare mapping for the new page.*/
586	newpage->index = page->index;
587	newpage->mapping = page->mapping;
588
589	mapping = page_mapping(page);
590	if (!mapping)
591		rc = migrate_page(mapping, newpage, page);
592	else if (mapping->a_ops->migratepage)
593		/*
594		 * Most pages have a mapping and most filesystems
595		 * should provide a migration function. Anonymous
596		 * pages are part of swap space which also has its
597		 * own migration function. This is the most common
598		 * path for page migration.
599		 */
600		rc = mapping->a_ops->migratepage(mapping,
601						newpage, page);
602	else
603		rc = fallback_migrate_page(mapping, newpage, page);
604
605	if (!rc) {
606		mem_cgroup_page_migration(page, newpage);
607		remove_migration_ptes(page, newpage);
608	} else
609		newpage->mapping = NULL;
610
611	unlock_page(newpage);
612
613	return rc;
614}
615
616/*
617 * Obtain the lock on page, remove all ptes and migrate the page
618 * to the newly allocated page in newpage.
619 */
620static int unmap_and_move(new_page_t get_new_page, unsigned long private,
621			struct page *page, int force)
622{
623	int rc = 0;
624	int *result = NULL;
625	struct page *newpage = get_new_page(page, private, &result);
626	int rcu_locked = 0;
627	int charge = 0;
628
629	if (!newpage)
630		return -ENOMEM;
631
632	if (page_count(page) == 1)
633		/* page was freed from under us. So we are done. */
634		goto move_newpage;
635
636	rc = -EAGAIN;
637	if (TestSetPageLocked(page)) {
638		if (!force)
639			goto move_newpage;
640		lock_page(page);
641	}
642
643	if (PageWriteback(page)) {
644		if (!force)
645			goto unlock;
646		wait_on_page_writeback(page);
647	}
648	/*
649	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
650	 * we cannot notice that anon_vma is freed while we migrates a page.
651	 * This rcu_read_lock() delays freeing anon_vma pointer until the end
652	 * of migration. File cache pages are no problem because of page_lock()
653	 * File Caches may use write_page() or lock_page() in migration, then,
654	 * just care Anon page here.
655	 */
656	if (PageAnon(page)) {
657		rcu_read_lock();
658		rcu_locked = 1;
659	}
660
661	/*
662	 * Corner case handling:
663	 * 1. When a new swap-cache page is read into, it is added to the LRU
664	 * and treated as swapcache but it has no rmap yet.
665	 * Calling try_to_unmap() against a page->mapping==NULL page will
666	 * trigger a BUG.  So handle it here.
667	 * 2. An orphaned page (see truncate_complete_page) might have
668	 * fs-private metadata. The page can be picked up due to memory
669	 * offlining.  Everywhere else except page reclaim, the page is
670	 * invisible to the vm, so the page can not be migrated.  So try to
671	 * free the metadata, so the page can be freed.
672	 */
673	if (!page->mapping) {
674		if (!PageAnon(page) && PagePrivate(page)) {
675			/*
676			 * Go direct to try_to_free_buffers() here because
677			 * a) that's what try_to_release_page() would do anyway
678			 * b) we may be under rcu_read_lock() here, so we can't
679			 *    use GFP_KERNEL which is what try_to_release_page()
680			 *    needs to be effective.
681			 */
682			try_to_free_buffers(page);
683		}
684		goto rcu_unlock;
685	}
686
687	charge = mem_cgroup_prepare_migration(page);
688	/* Establish migration ptes or remove ptes */
689	try_to_unmap(page, 1);
690
691	if (!page_mapped(page))
692		rc = move_to_new_page(newpage, page);
693
694	if (rc) {
695		remove_migration_ptes(page, page);
696		if (charge)
697			mem_cgroup_end_migration(page);
698	} else if (charge)
699 		mem_cgroup_end_migration(newpage);
700rcu_unlock:
701	if (rcu_locked)
702		rcu_read_unlock();
703
704unlock:
705
706	unlock_page(page);
707
708	if (rc != -EAGAIN) {
709 		/*
710 		 * A page that has been migrated has all references
711 		 * removed and will be freed. A page that has not been
712 		 * migrated will have kepts its references and be
713 		 * restored.
714 		 */
715 		list_del(&page->lru);
716 		move_to_lru(page);
717	}
718
719move_newpage:
720	/*
721	 * Move the new page to the LRU. If migration was not successful
722	 * then this will free the page.
723	 */
724	move_to_lru(newpage);
725	if (result) {
726		if (rc)
727			*result = rc;
728		else
729			*result = page_to_nid(newpage);
730	}
731	return rc;
732}
733
734/*
735 * migrate_pages
736 *
737 * The function takes one list of pages to migrate and a function
738 * that determines from the page to be migrated and the private data
739 * the target of the move and allocates the page.
740 *
741 * The function returns after 10 attempts or if no pages
742 * are movable anymore because to has become empty
743 * or no retryable pages exist anymore. All pages will be
744 * returned to the LRU or freed.
745 *
746 * Return: Number of pages not migrated or error code.
747 */
748int migrate_pages(struct list_head *from,
749		new_page_t get_new_page, unsigned long private)
750{
751	int retry = 1;
752	int nr_failed = 0;
753	int pass = 0;
754	struct page *page;
755	struct page *page2;
756	int swapwrite = current->flags & PF_SWAPWRITE;
757	int rc;
758
759	if (!swapwrite)
760		current->flags |= PF_SWAPWRITE;
761
762	for(pass = 0; pass < 10 && retry; pass++) {
763		retry = 0;
764
765		list_for_each_entry_safe(page, page2, from, lru) {
766			cond_resched();
767
768			rc = unmap_and_move(get_new_page, private,
769						page, pass > 2);
770
771			switch(rc) {
772			case -ENOMEM:
773				goto out;
774			case -EAGAIN:
775				retry++;
776				break;
777			case 0:
778				break;
779			default:
780				/* Permanent failure */
781				nr_failed++;
782				break;
783			}
784		}
785	}
786	rc = 0;
787out:
788	if (!swapwrite)
789		current->flags &= ~PF_SWAPWRITE;
790
791	putback_lru_pages(from);
792
793	if (rc)
794		return rc;
795
796	return nr_failed + retry;
797}
798
799#ifdef CONFIG_NUMA
800/*
801 * Move a list of individual pages
802 */
803struct page_to_node {
804	unsigned long addr;
805	struct page *page;
806	int node;
807	int status;
808};
809
810static struct page *new_page_node(struct page *p, unsigned long private,
811		int **result)
812{
813	struct page_to_node *pm = (struct page_to_node *)private;
814
815	while (pm->node != MAX_NUMNODES && pm->page != p)
816		pm++;
817
818	if (pm->node == MAX_NUMNODES)
819		return NULL;
820
821	*result = &pm->status;
822
823	return alloc_pages_node(pm->node,
824				GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
825}
826
827/*
828 * Move a set of pages as indicated in the pm array. The addr
829 * field must be set to the virtual address of the page to be moved
830 * and the node number must contain a valid target node.
831 */
832static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm,
833				int migrate_all)
834{
835	int err;
836	struct page_to_node *pp;
837	LIST_HEAD(pagelist);
838
839	down_read(&mm->mmap_sem);
840
841	/*
842	 * Build a list of pages to migrate
843	 */
844	migrate_prep();
845	for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
846		struct vm_area_struct *vma;
847		struct page *page;
848
849		/*
850		 * A valid page pointer that will not match any of the
851		 * pages that will be moved.
852		 */
853		pp->page = ZERO_PAGE(0);
854
855		err = -EFAULT;
856		vma = find_vma(mm, pp->addr);
857		if (!vma || !vma_migratable(vma))
858			goto set_status;
859
860		page = follow_page(vma, pp->addr, FOLL_GET);
861		err = -ENOENT;
862		if (!page)
863			goto set_status;
864
865		if (PageReserved(page))		/* Check for zero page */
866			goto put_and_set;
867
868		pp->page = page;
869		err = page_to_nid(page);
870
871		if (err == pp->node)
872			/*
873			 * Node already in the right place
874			 */
875			goto put_and_set;
876
877		err = -EACCES;
878		if (page_mapcount(page) > 1 &&
879				!migrate_all)
880			goto put_and_set;
881
882		err = isolate_lru_page(page, &pagelist);
883put_and_set:
884		/*
885		 * Either remove the duplicate refcount from
886		 * isolate_lru_page() or drop the page ref if it was
887		 * not isolated.
888		 */
889		put_page(page);
890set_status:
891		pp->status = err;
892	}
893
894	if (!list_empty(&pagelist))
895		err = migrate_pages(&pagelist, new_page_node,
896				(unsigned long)pm);
897	else
898		err = -ENOENT;
899
900	up_read(&mm->mmap_sem);
901	return err;
902}
903
904/*
905 * Determine the nodes of a list of pages. The addr in the pm array
906 * must have been set to the virtual address of which we want to determine
907 * the node number.
908 */
909static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm)
910{
911	down_read(&mm->mmap_sem);
912
913	for ( ; pm->node != MAX_NUMNODES; pm++) {
914		struct vm_area_struct *vma;
915		struct page *page;
916		int err;
917
918		err = -EFAULT;
919		vma = find_vma(mm, pm->addr);
920		if (!vma)
921			goto set_status;
922
923		page = follow_page(vma, pm->addr, 0);
924		err = -ENOENT;
925		/* Use PageReserved to check for zero page */
926		if (!page || PageReserved(page))
927			goto set_status;
928
929		err = page_to_nid(page);
930set_status:
931		pm->status = err;
932	}
933
934	up_read(&mm->mmap_sem);
935	return 0;
936}
937
938/*
939 * Move a list of pages in the address space of the currently executing
940 * process.
941 */
942asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
943			const void __user * __user *pages,
944			const int __user *nodes,
945			int __user *status, int flags)
946{
947	int err = 0;
948	int i;
949	struct task_struct *task;
950	nodemask_t task_nodes;
951	struct mm_struct *mm;
952	struct page_to_node *pm = NULL;
953
954	/* Check flags */
955	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
956		return -EINVAL;
957
958	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
959		return -EPERM;
960
961	/* Find the mm_struct */
962	read_lock(&tasklist_lock);
963	task = pid ? find_task_by_vpid(pid) : current;
964	if (!task) {
965		read_unlock(&tasklist_lock);
966		return -ESRCH;
967	}
968	mm = get_task_mm(task);
969	read_unlock(&tasklist_lock);
970
971	if (!mm)
972		return -EINVAL;
973
974	/*
975	 * Check if this process has the right to modify the specified
976	 * process. The right exists if the process has administrative
977	 * capabilities, superuser privileges or the same
978	 * userid as the target process.
979	 */
980	if ((current->euid != task->suid) && (current->euid != task->uid) &&
981	    (current->uid != task->suid) && (current->uid != task->uid) &&
982	    !capable(CAP_SYS_NICE)) {
983		err = -EPERM;
984		goto out2;
985	}
986
987 	err = security_task_movememory(task);
988 	if (err)
989 		goto out2;
990
991
992	task_nodes = cpuset_mems_allowed(task);
993
994	/* Limit nr_pages so that the multiplication may not overflow */
995	if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) {
996		err = -E2BIG;
997		goto out2;
998	}
999
1000	pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
1001	if (!pm) {
1002		err = -ENOMEM;
1003		goto out2;
1004	}
1005
1006	/*
1007	 * Get parameters from user space and initialize the pm
1008	 * array. Return various errors if the user did something wrong.
1009	 */
1010	for (i = 0; i < nr_pages; i++) {
1011		const void __user *p;
1012
1013		err = -EFAULT;
1014		if (get_user(p, pages + i))
1015			goto out;
1016
1017		pm[i].addr = (unsigned long)p;
1018		if (nodes) {
1019			int node;
1020
1021			if (get_user(node, nodes + i))
1022				goto out;
1023
1024			err = -ENODEV;
1025			if (!node_state(node, N_HIGH_MEMORY))
1026				goto out;
1027
1028			err = -EACCES;
1029			if (!node_isset(node, task_nodes))
1030				goto out;
1031
1032			pm[i].node = node;
1033		} else
1034			pm[i].node = 0;	/* anything to not match MAX_NUMNODES */
1035	}
1036	/* End marker */
1037	pm[nr_pages].node = MAX_NUMNODES;
1038
1039	if (nodes)
1040		err = do_move_pages(mm, pm, flags & MPOL_MF_MOVE_ALL);
1041	else
1042		err = do_pages_stat(mm, pm);
1043
1044	if (err >= 0)
1045		/* Return status information */
1046		for (i = 0; i < nr_pages; i++)
1047			if (put_user(pm[i].status, status + i))
1048				err = -EFAULT;
1049
1050out:
1051	vfree(pm);
1052out2:
1053	mmput(mm);
1054	return err;
1055}
1056#endif
1057
1058/*
1059 * Call migration functions in the vma_ops that may prepare
1060 * memory in a vm for migration. migration functions may perform
1061 * the migration for vmas that do not have an underlying page struct.
1062 */
1063int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1064	const nodemask_t *from, unsigned long flags)
1065{
1066 	struct vm_area_struct *vma;
1067 	int err = 0;
1068
1069 	for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) {
1070 		if (vma->vm_ops && vma->vm_ops->migrate) {
1071 			err = vma->vm_ops->migrate(vma, to, from, flags);
1072 			if (err)
1073 				break;
1074 		}
1075 	}
1076 	return err;
1077}
1078