mempolicy.c revision 06808b0827e1cd14eedc96bac2655d5b37ac246c
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave     Allocate memory interleaved over a set of nodes,
16 *                with normal fallback if it fails.
17 *                For VMA based allocations this interleaves based on the
18 *                offset into the backing object or offset into the mapping
19 *                for anonymous memory. For process policy an process counter
20 *                is used.
21 *
22 * bind           Only allocate memory on a specific set of nodes,
23 *                no fallback.
24 *                FIXME: memory is allocated starting with the first node
25 *                to the last. It would be better if bind would truly restrict
26 *                the allocation to memory nodes instead
27 *
28 * preferred       Try a specific node first before normal fallback.
29 *                As a special case node -1 here means do the allocation
30 *                on the local CPU. This is normally identical to default,
31 *                but useful to set in a VMA when you have a non default
32 *                process policy.
33 *
34 * default        Allocate on the local node first, or when on a VMA
35 *                use the process policy. This is what Linux always did
36 *		  in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57   fix mmap readahead to honour policy and enable policy for any page cache
58   object
59   statistics for bigpages
60   global policy for page cache? currently it uses process policy. Requires
61   first item above.
62   handle mremap for shared memory (currently ignored for the policy)
63   grows down?
64   make bind policy root only? It can trigger oom much faster and the
65   kernel is not always grateful with that.
66*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
76#include <linux/gfp.h>
77#include <linux/slab.h>
78#include <linux/string.h>
79#include <linux/module.h>
80#include <linux/nsproxy.h>
81#include <linux/interrupt.h>
82#include <linux/init.h>
83#include <linux/compat.h>
84#include <linux/swap.h>
85#include <linux/seq_file.h>
86#include <linux/proc_fs.h>
87#include <linux/migrate.h>
88#include <linux/rmap.h>
89#include <linux/security.h>
90#include <linux/syscalls.h>
91#include <linux/ctype.h>
92#include <linux/mm_inline.h>
93
94#include <asm/tlbflush.h>
95#include <asm/uaccess.h>
96
97#include "internal.h"
98
99/* Internal flags */
100#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
101#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
102#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)		/* Gather statistics */
103
104static struct kmem_cache *policy_cache;
105static struct kmem_cache *sn_cache;
106
107/* Highest zone. An specific allocation for a zone below that is not
108   policied. */
109enum zone_type policy_zone = 0;
110
111/*
112 * run-time system-wide default policy => local allocation
113 */
114struct mempolicy default_policy = {
115	.refcnt = ATOMIC_INIT(1), /* never free it */
116	.mode = MPOL_PREFERRED,
117	.flags = MPOL_F_LOCAL,
118};
119
120static const struct mempolicy_operations {
121	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
122	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
123} mpol_ops[MPOL_MAX];
124
125/* Check that the nodemask contains at least one populated zone */
126static int is_valid_nodemask(const nodemask_t *nodemask)
127{
128	int nd, k;
129
130	/* Check that there is something useful in this mask */
131	k = policy_zone;
132
133	for_each_node_mask(nd, *nodemask) {
134		struct zone *z;
135
136		for (k = 0; k <= policy_zone; k++) {
137			z = &NODE_DATA(nd)->node_zones[k];
138			if (z->present_pages > 0)
139				return 1;
140		}
141	}
142
143	return 0;
144}
145
146static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
147{
148	return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
149}
150
151static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
152				   const nodemask_t *rel)
153{
154	nodemask_t tmp;
155	nodes_fold(tmp, *orig, nodes_weight(*rel));
156	nodes_onto(*ret, tmp, *rel);
157}
158
159static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
160{
161	if (nodes_empty(*nodes))
162		return -EINVAL;
163	pol->v.nodes = *nodes;
164	return 0;
165}
166
167static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
168{
169	if (!nodes)
170		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
171	else if (nodes_empty(*nodes))
172		return -EINVAL;			/*  no allowed nodes */
173	else
174		pol->v.preferred_node = first_node(*nodes);
175	return 0;
176}
177
178static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
179{
180	if (!is_valid_nodemask(nodes))
181		return -EINVAL;
182	pol->v.nodes = *nodes;
183	return 0;
184}
185
186/*
187 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
188 * any, for the new policy.  mpol_new() has already validated the nodes
189 * parameter with respect to the policy mode and flags.  But, we need to
190 * handle an empty nodemask with MPOL_PREFERRED here.
191 *
192 * Must be called holding task's alloc_lock to protect task's mems_allowed
193 * and mempolicy.  May also be called holding the mmap_semaphore for write.
194 */
195static int mpol_set_nodemask(struct mempolicy *pol,
196		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
197{
198	int ret;
199
200	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
201	if (pol == NULL)
202		return 0;
203	/* Check N_HIGH_MEMORY */
204	nodes_and(nsc->mask1,
205		  cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
206
207	VM_BUG_ON(!nodes);
208	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
209		nodes = NULL;	/* explicit local allocation */
210	else {
211		if (pol->flags & MPOL_F_RELATIVE_NODES)
212			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
213		else
214			nodes_and(nsc->mask2, *nodes, nsc->mask1);
215
216		if (mpol_store_user_nodemask(pol))
217			pol->w.user_nodemask = *nodes;
218		else
219			pol->w.cpuset_mems_allowed =
220						cpuset_current_mems_allowed;
221	}
222
223	if (nodes)
224		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
225	else
226		ret = mpol_ops[pol->mode].create(pol, NULL);
227	return ret;
228}
229
230/*
231 * This function just creates a new policy, does some check and simple
232 * initialization. You must invoke mpol_set_nodemask() to set nodes.
233 */
234static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
235				  nodemask_t *nodes)
236{
237	struct mempolicy *policy;
238
239	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
240		 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
241
242	if (mode == MPOL_DEFAULT) {
243		if (nodes && !nodes_empty(*nodes))
244			return ERR_PTR(-EINVAL);
245		return NULL;	/* simply delete any existing policy */
246	}
247	VM_BUG_ON(!nodes);
248
249	/*
250	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
251	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
252	 * All other modes require a valid pointer to a non-empty nodemask.
253	 */
254	if (mode == MPOL_PREFERRED) {
255		if (nodes_empty(*nodes)) {
256			if (((flags & MPOL_F_STATIC_NODES) ||
257			     (flags & MPOL_F_RELATIVE_NODES)))
258				return ERR_PTR(-EINVAL);
259		}
260	} else if (nodes_empty(*nodes))
261		return ERR_PTR(-EINVAL);
262	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
263	if (!policy)
264		return ERR_PTR(-ENOMEM);
265	atomic_set(&policy->refcnt, 1);
266	policy->mode = mode;
267	policy->flags = flags;
268
269	return policy;
270}
271
272/* Slow path of a mpol destructor. */
273void __mpol_put(struct mempolicy *p)
274{
275	if (!atomic_dec_and_test(&p->refcnt))
276		return;
277	kmem_cache_free(policy_cache, p);
278}
279
280static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
281{
282}
283
284static void mpol_rebind_nodemask(struct mempolicy *pol,
285				 const nodemask_t *nodes)
286{
287	nodemask_t tmp;
288
289	if (pol->flags & MPOL_F_STATIC_NODES)
290		nodes_and(tmp, pol->w.user_nodemask, *nodes);
291	else if (pol->flags & MPOL_F_RELATIVE_NODES)
292		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
293	else {
294		nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
295			    *nodes);
296		pol->w.cpuset_mems_allowed = *nodes;
297	}
298
299	pol->v.nodes = tmp;
300	if (!node_isset(current->il_next, tmp)) {
301		current->il_next = next_node(current->il_next, tmp);
302		if (current->il_next >= MAX_NUMNODES)
303			current->il_next = first_node(tmp);
304		if (current->il_next >= MAX_NUMNODES)
305			current->il_next = numa_node_id();
306	}
307}
308
309static void mpol_rebind_preferred(struct mempolicy *pol,
310				  const nodemask_t *nodes)
311{
312	nodemask_t tmp;
313
314	if (pol->flags & MPOL_F_STATIC_NODES) {
315		int node = first_node(pol->w.user_nodemask);
316
317		if (node_isset(node, *nodes)) {
318			pol->v.preferred_node = node;
319			pol->flags &= ~MPOL_F_LOCAL;
320		} else
321			pol->flags |= MPOL_F_LOCAL;
322	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
323		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
324		pol->v.preferred_node = first_node(tmp);
325	} else if (!(pol->flags & MPOL_F_LOCAL)) {
326		pol->v.preferred_node = node_remap(pol->v.preferred_node,
327						   pol->w.cpuset_mems_allowed,
328						   *nodes);
329		pol->w.cpuset_mems_allowed = *nodes;
330	}
331}
332
333/* Migrate a policy to a different set of nodes */
334static void mpol_rebind_policy(struct mempolicy *pol,
335			       const nodemask_t *newmask)
336{
337	if (!pol)
338		return;
339	if (!mpol_store_user_nodemask(pol) &&
340	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
341		return;
342	mpol_ops[pol->mode].rebind(pol, newmask);
343}
344
345/*
346 * Wrapper for mpol_rebind_policy() that just requires task
347 * pointer, and updates task mempolicy.
348 *
349 * Called with task's alloc_lock held.
350 */
351
352void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
353{
354	mpol_rebind_policy(tsk->mempolicy, new);
355}
356
357/*
358 * Rebind each vma in mm to new nodemask.
359 *
360 * Call holding a reference to mm.  Takes mm->mmap_sem during call.
361 */
362
363void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
364{
365	struct vm_area_struct *vma;
366
367	down_write(&mm->mmap_sem);
368	for (vma = mm->mmap; vma; vma = vma->vm_next)
369		mpol_rebind_policy(vma->vm_policy, new);
370	up_write(&mm->mmap_sem);
371}
372
373static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
374	[MPOL_DEFAULT] = {
375		.rebind = mpol_rebind_default,
376	},
377	[MPOL_INTERLEAVE] = {
378		.create = mpol_new_interleave,
379		.rebind = mpol_rebind_nodemask,
380	},
381	[MPOL_PREFERRED] = {
382		.create = mpol_new_preferred,
383		.rebind = mpol_rebind_preferred,
384	},
385	[MPOL_BIND] = {
386		.create = mpol_new_bind,
387		.rebind = mpol_rebind_nodemask,
388	},
389};
390
391static void gather_stats(struct page *, void *, int pte_dirty);
392static void migrate_page_add(struct page *page, struct list_head *pagelist,
393				unsigned long flags);
394
395/* Scan through pages checking if pages follow certain conditions. */
396static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
397		unsigned long addr, unsigned long end,
398		const nodemask_t *nodes, unsigned long flags,
399		void *private)
400{
401	pte_t *orig_pte;
402	pte_t *pte;
403	spinlock_t *ptl;
404
405	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
406	do {
407		struct page *page;
408		int nid;
409
410		if (!pte_present(*pte))
411			continue;
412		page = vm_normal_page(vma, addr, *pte);
413		if (!page)
414			continue;
415		/*
416		 * The check for PageReserved here is important to avoid
417		 * handling zero pages and other pages that may have been
418		 * marked special by the system.
419		 *
420		 * If the PageReserved would not be checked here then f.e.
421		 * the location of the zero page could have an influence
422		 * on MPOL_MF_STRICT, zero pages would be counted for
423		 * the per node stats, and there would be useless attempts
424		 * to put zero pages on the migration list.
425		 */
426		if (PageReserved(page))
427			continue;
428		nid = page_to_nid(page);
429		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
430			continue;
431
432		if (flags & MPOL_MF_STATS)
433			gather_stats(page, private, pte_dirty(*pte));
434		else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
435			migrate_page_add(page, private, flags);
436		else
437			break;
438	} while (pte++, addr += PAGE_SIZE, addr != end);
439	pte_unmap_unlock(orig_pte, ptl);
440	return addr != end;
441}
442
443static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
444		unsigned long addr, unsigned long end,
445		const nodemask_t *nodes, unsigned long flags,
446		void *private)
447{
448	pmd_t *pmd;
449	unsigned long next;
450
451	pmd = pmd_offset(pud, addr);
452	do {
453		next = pmd_addr_end(addr, end);
454		if (pmd_none_or_clear_bad(pmd))
455			continue;
456		if (check_pte_range(vma, pmd, addr, next, nodes,
457				    flags, private))
458			return -EIO;
459	} while (pmd++, addr = next, addr != end);
460	return 0;
461}
462
463static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
464		unsigned long addr, unsigned long end,
465		const nodemask_t *nodes, unsigned long flags,
466		void *private)
467{
468	pud_t *pud;
469	unsigned long next;
470
471	pud = pud_offset(pgd, addr);
472	do {
473		next = pud_addr_end(addr, end);
474		if (pud_none_or_clear_bad(pud))
475			continue;
476		if (check_pmd_range(vma, pud, addr, next, nodes,
477				    flags, private))
478			return -EIO;
479	} while (pud++, addr = next, addr != end);
480	return 0;
481}
482
483static inline int check_pgd_range(struct vm_area_struct *vma,
484		unsigned long addr, unsigned long end,
485		const nodemask_t *nodes, unsigned long flags,
486		void *private)
487{
488	pgd_t *pgd;
489	unsigned long next;
490
491	pgd = pgd_offset(vma->vm_mm, addr);
492	do {
493		next = pgd_addr_end(addr, end);
494		if (pgd_none_or_clear_bad(pgd))
495			continue;
496		if (check_pud_range(vma, pgd, addr, next, nodes,
497				    flags, private))
498			return -EIO;
499	} while (pgd++, addr = next, addr != end);
500	return 0;
501}
502
503/*
504 * Check if all pages in a range are on a set of nodes.
505 * If pagelist != NULL then isolate pages from the LRU and
506 * put them on the pagelist.
507 */
508static struct vm_area_struct *
509check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
510		const nodemask_t *nodes, unsigned long flags, void *private)
511{
512	int err;
513	struct vm_area_struct *first, *vma, *prev;
514
515
516	first = find_vma(mm, start);
517	if (!first)
518		return ERR_PTR(-EFAULT);
519	prev = NULL;
520	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
521		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
522			if (!vma->vm_next && vma->vm_end < end)
523				return ERR_PTR(-EFAULT);
524			if (prev && prev->vm_end < vma->vm_start)
525				return ERR_PTR(-EFAULT);
526		}
527		if (!is_vm_hugetlb_page(vma) &&
528		    ((flags & MPOL_MF_STRICT) ||
529		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
530				vma_migratable(vma)))) {
531			unsigned long endvma = vma->vm_end;
532
533			if (endvma > end)
534				endvma = end;
535			if (vma->vm_start > start)
536				start = vma->vm_start;
537			err = check_pgd_range(vma, start, endvma, nodes,
538						flags, private);
539			if (err) {
540				first = ERR_PTR(err);
541				break;
542			}
543		}
544		prev = vma;
545	}
546	return first;
547}
548
549/* Apply policy to a single VMA */
550static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
551{
552	int err = 0;
553	struct mempolicy *old = vma->vm_policy;
554
555	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
556		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
557		 vma->vm_ops, vma->vm_file,
558		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
559
560	if (vma->vm_ops && vma->vm_ops->set_policy)
561		err = vma->vm_ops->set_policy(vma, new);
562	if (!err) {
563		mpol_get(new);
564		vma->vm_policy = new;
565		mpol_put(old);
566	}
567	return err;
568}
569
570/* Step 2: apply policy to a range and do splits. */
571static int mbind_range(struct vm_area_struct *vma, unsigned long start,
572		       unsigned long end, struct mempolicy *new)
573{
574	struct vm_area_struct *next;
575	int err;
576
577	err = 0;
578	for (; vma && vma->vm_start < end; vma = next) {
579		next = vma->vm_next;
580		if (vma->vm_start < start)
581			err = split_vma(vma->vm_mm, vma, start, 1);
582		if (!err && vma->vm_end > end)
583			err = split_vma(vma->vm_mm, vma, end, 0);
584		if (!err)
585			err = policy_vma(vma, new);
586		if (err)
587			break;
588	}
589	return err;
590}
591
592/*
593 * Update task->flags PF_MEMPOLICY bit: set iff non-default
594 * mempolicy.  Allows more rapid checking of this (combined perhaps
595 * with other PF_* flag bits) on memory allocation hot code paths.
596 *
597 * If called from outside this file, the task 'p' should -only- be
598 * a newly forked child not yet visible on the task list, because
599 * manipulating the task flags of a visible task is not safe.
600 *
601 * The above limitation is why this routine has the funny name
602 * mpol_fix_fork_child_flag().
603 *
604 * It is also safe to call this with a task pointer of current,
605 * which the static wrapper mpol_set_task_struct_flag() does,
606 * for use within this file.
607 */
608
609void mpol_fix_fork_child_flag(struct task_struct *p)
610{
611	if (p->mempolicy)
612		p->flags |= PF_MEMPOLICY;
613	else
614		p->flags &= ~PF_MEMPOLICY;
615}
616
617static void mpol_set_task_struct_flag(void)
618{
619	mpol_fix_fork_child_flag(current);
620}
621
622/* Set the process memory policy */
623static long do_set_mempolicy(unsigned short mode, unsigned short flags,
624			     nodemask_t *nodes)
625{
626	struct mempolicy *new, *old;
627	struct mm_struct *mm = current->mm;
628	NODEMASK_SCRATCH(scratch);
629	int ret;
630
631	if (!scratch)
632		return -ENOMEM;
633
634	new = mpol_new(mode, flags, nodes);
635	if (IS_ERR(new)) {
636		ret = PTR_ERR(new);
637		goto out;
638	}
639	/*
640	 * prevent changing our mempolicy while show_numa_maps()
641	 * is using it.
642	 * Note:  do_set_mempolicy() can be called at init time
643	 * with no 'mm'.
644	 */
645	if (mm)
646		down_write(&mm->mmap_sem);
647	task_lock(current);
648	ret = mpol_set_nodemask(new, nodes, scratch);
649	if (ret) {
650		task_unlock(current);
651		if (mm)
652			up_write(&mm->mmap_sem);
653		mpol_put(new);
654		goto out;
655	}
656	old = current->mempolicy;
657	current->mempolicy = new;
658	mpol_set_task_struct_flag();
659	if (new && new->mode == MPOL_INTERLEAVE &&
660	    nodes_weight(new->v.nodes))
661		current->il_next = first_node(new->v.nodes);
662	task_unlock(current);
663	if (mm)
664		up_write(&mm->mmap_sem);
665
666	mpol_put(old);
667	ret = 0;
668out:
669	NODEMASK_SCRATCH_FREE(scratch);
670	return ret;
671}
672
673/*
674 * Return nodemask for policy for get_mempolicy() query
675 *
676 * Called with task's alloc_lock held
677 */
678static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
679{
680	nodes_clear(*nodes);
681	if (p == &default_policy)
682		return;
683
684	switch (p->mode) {
685	case MPOL_BIND:
686		/* Fall through */
687	case MPOL_INTERLEAVE:
688		*nodes = p->v.nodes;
689		break;
690	case MPOL_PREFERRED:
691		if (!(p->flags & MPOL_F_LOCAL))
692			node_set(p->v.preferred_node, *nodes);
693		/* else return empty node mask for local allocation */
694		break;
695	default:
696		BUG();
697	}
698}
699
700static int lookup_node(struct mm_struct *mm, unsigned long addr)
701{
702	struct page *p;
703	int err;
704
705	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
706	if (err >= 0) {
707		err = page_to_nid(p);
708		put_page(p);
709	}
710	return err;
711}
712
713/* Retrieve NUMA policy */
714static long do_get_mempolicy(int *policy, nodemask_t *nmask,
715			     unsigned long addr, unsigned long flags)
716{
717	int err;
718	struct mm_struct *mm = current->mm;
719	struct vm_area_struct *vma = NULL;
720	struct mempolicy *pol = current->mempolicy;
721
722	if (flags &
723		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
724		return -EINVAL;
725
726	if (flags & MPOL_F_MEMS_ALLOWED) {
727		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
728			return -EINVAL;
729		*policy = 0;	/* just so it's initialized */
730		task_lock(current);
731		*nmask  = cpuset_current_mems_allowed;
732		task_unlock(current);
733		return 0;
734	}
735
736	if (flags & MPOL_F_ADDR) {
737		/*
738		 * Do NOT fall back to task policy if the
739		 * vma/shared policy at addr is NULL.  We
740		 * want to return MPOL_DEFAULT in this case.
741		 */
742		down_read(&mm->mmap_sem);
743		vma = find_vma_intersection(mm, addr, addr+1);
744		if (!vma) {
745			up_read(&mm->mmap_sem);
746			return -EFAULT;
747		}
748		if (vma->vm_ops && vma->vm_ops->get_policy)
749			pol = vma->vm_ops->get_policy(vma, addr);
750		else
751			pol = vma->vm_policy;
752	} else if (addr)
753		return -EINVAL;
754
755	if (!pol)
756		pol = &default_policy;	/* indicates default behavior */
757
758	if (flags & MPOL_F_NODE) {
759		if (flags & MPOL_F_ADDR) {
760			err = lookup_node(mm, addr);
761			if (err < 0)
762				goto out;
763			*policy = err;
764		} else if (pol == current->mempolicy &&
765				pol->mode == MPOL_INTERLEAVE) {
766			*policy = current->il_next;
767		} else {
768			err = -EINVAL;
769			goto out;
770		}
771	} else {
772		*policy = pol == &default_policy ? MPOL_DEFAULT :
773						pol->mode;
774		/*
775		 * Internal mempolicy flags must be masked off before exposing
776		 * the policy to userspace.
777		 */
778		*policy |= (pol->flags & MPOL_MODE_FLAGS);
779	}
780
781	if (vma) {
782		up_read(&current->mm->mmap_sem);
783		vma = NULL;
784	}
785
786	err = 0;
787	if (nmask) {
788		task_lock(current);
789		get_policy_nodemask(pol, nmask);
790		task_unlock(current);
791	}
792
793 out:
794	mpol_cond_put(pol);
795	if (vma)
796		up_read(&current->mm->mmap_sem);
797	return err;
798}
799
800#ifdef CONFIG_MIGRATION
801/*
802 * page migration
803 */
804static void migrate_page_add(struct page *page, struct list_head *pagelist,
805				unsigned long flags)
806{
807	/*
808	 * Avoid migrating a page that is shared with others.
809	 */
810	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
811		if (!isolate_lru_page(page)) {
812			list_add_tail(&page->lru, pagelist);
813			inc_zone_page_state(page, NR_ISOLATED_ANON +
814					    page_is_file_cache(page));
815		}
816	}
817}
818
819static struct page *new_node_page(struct page *page, unsigned long node, int **x)
820{
821	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
822}
823
824/*
825 * Migrate pages from one node to a target node.
826 * Returns error or the number of pages not migrated.
827 */
828static int migrate_to_node(struct mm_struct *mm, int source, int dest,
829			   int flags)
830{
831	nodemask_t nmask;
832	LIST_HEAD(pagelist);
833	int err = 0;
834
835	nodes_clear(nmask);
836	node_set(source, nmask);
837
838	check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
839			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
840
841	if (!list_empty(&pagelist))
842		err = migrate_pages(&pagelist, new_node_page, dest);
843
844	return err;
845}
846
847/*
848 * Move pages between the two nodesets so as to preserve the physical
849 * layout as much as possible.
850 *
851 * Returns the number of page that could not be moved.
852 */
853int do_migrate_pages(struct mm_struct *mm,
854	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
855{
856	int busy = 0;
857	int err;
858	nodemask_t tmp;
859
860	err = migrate_prep();
861	if (err)
862		return err;
863
864	down_read(&mm->mmap_sem);
865
866	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
867	if (err)
868		goto out;
869
870/*
871 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
872 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
873 * bit in 'tmp', and return that <source, dest> pair for migration.
874 * The pair of nodemasks 'to' and 'from' define the map.
875 *
876 * If no pair of bits is found that way, fallback to picking some
877 * pair of 'source' and 'dest' bits that are not the same.  If the
878 * 'source' and 'dest' bits are the same, this represents a node
879 * that will be migrating to itself, so no pages need move.
880 *
881 * If no bits are left in 'tmp', or if all remaining bits left
882 * in 'tmp' correspond to the same bit in 'to', return false
883 * (nothing left to migrate).
884 *
885 * This lets us pick a pair of nodes to migrate between, such that
886 * if possible the dest node is not already occupied by some other
887 * source node, minimizing the risk of overloading the memory on a
888 * node that would happen if we migrated incoming memory to a node
889 * before migrating outgoing memory source that same node.
890 *
891 * A single scan of tmp is sufficient.  As we go, we remember the
892 * most recent <s, d> pair that moved (s != d).  If we find a pair
893 * that not only moved, but what's better, moved to an empty slot
894 * (d is not set in tmp), then we break out then, with that pair.
895 * Otherwise when we finish scannng from_tmp, we at least have the
896 * most recent <s, d> pair that moved.  If we get all the way through
897 * the scan of tmp without finding any node that moved, much less
898 * moved to an empty node, then there is nothing left worth migrating.
899 */
900
901	tmp = *from_nodes;
902	while (!nodes_empty(tmp)) {
903		int s,d;
904		int source = -1;
905		int dest = 0;
906
907		for_each_node_mask(s, tmp) {
908			d = node_remap(s, *from_nodes, *to_nodes);
909			if (s == d)
910				continue;
911
912			source = s;	/* Node moved. Memorize */
913			dest = d;
914
915			/* dest not in remaining from nodes? */
916			if (!node_isset(dest, tmp))
917				break;
918		}
919		if (source == -1)
920			break;
921
922		node_clear(source, tmp);
923		err = migrate_to_node(mm, source, dest, flags);
924		if (err > 0)
925			busy += err;
926		if (err < 0)
927			break;
928	}
929out:
930	up_read(&mm->mmap_sem);
931	if (err < 0)
932		return err;
933	return busy;
934
935}
936
937/*
938 * Allocate a new page for page migration based on vma policy.
939 * Start assuming that page is mapped by vma pointed to by @private.
940 * Search forward from there, if not.  N.B., this assumes that the
941 * list of pages handed to migrate_pages()--which is how we get here--
942 * is in virtual address order.
943 */
944static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
945{
946	struct vm_area_struct *vma = (struct vm_area_struct *)private;
947	unsigned long uninitialized_var(address);
948
949	while (vma) {
950		address = page_address_in_vma(page, vma);
951		if (address != -EFAULT)
952			break;
953		vma = vma->vm_next;
954	}
955
956	/*
957	 * if !vma, alloc_page_vma() will use task or system default policy
958	 */
959	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
960}
961#else
962
963static void migrate_page_add(struct page *page, struct list_head *pagelist,
964				unsigned long flags)
965{
966}
967
968int do_migrate_pages(struct mm_struct *mm,
969	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
970{
971	return -ENOSYS;
972}
973
974static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
975{
976	return NULL;
977}
978#endif
979
980static long do_mbind(unsigned long start, unsigned long len,
981		     unsigned short mode, unsigned short mode_flags,
982		     nodemask_t *nmask, unsigned long flags)
983{
984	struct vm_area_struct *vma;
985	struct mm_struct *mm = current->mm;
986	struct mempolicy *new;
987	unsigned long end;
988	int err;
989	LIST_HEAD(pagelist);
990
991	if (flags & ~(unsigned long)(MPOL_MF_STRICT |
992				     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
993		return -EINVAL;
994	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
995		return -EPERM;
996
997	if (start & ~PAGE_MASK)
998		return -EINVAL;
999
1000	if (mode == MPOL_DEFAULT)
1001		flags &= ~MPOL_MF_STRICT;
1002
1003	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1004	end = start + len;
1005
1006	if (end < start)
1007		return -EINVAL;
1008	if (end == start)
1009		return 0;
1010
1011	new = mpol_new(mode, mode_flags, nmask);
1012	if (IS_ERR(new))
1013		return PTR_ERR(new);
1014
1015	/*
1016	 * If we are using the default policy then operation
1017	 * on discontinuous address spaces is okay after all
1018	 */
1019	if (!new)
1020		flags |= MPOL_MF_DISCONTIG_OK;
1021
1022	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1023		 start, start + len, mode, mode_flags,
1024		 nmask ? nodes_addr(*nmask)[0] : -1);
1025
1026	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1027
1028		err = migrate_prep();
1029		if (err)
1030			goto mpol_out;
1031	}
1032	{
1033		NODEMASK_SCRATCH(scratch);
1034		if (scratch) {
1035			down_write(&mm->mmap_sem);
1036			task_lock(current);
1037			err = mpol_set_nodemask(new, nmask, scratch);
1038			task_unlock(current);
1039			if (err)
1040				up_write(&mm->mmap_sem);
1041		} else
1042			err = -ENOMEM;
1043		NODEMASK_SCRATCH_FREE(scratch);
1044	}
1045	if (err)
1046		goto mpol_out;
1047
1048	vma = check_range(mm, start, end, nmask,
1049			  flags | MPOL_MF_INVERT, &pagelist);
1050
1051	err = PTR_ERR(vma);
1052	if (!IS_ERR(vma)) {
1053		int nr_failed = 0;
1054
1055		err = mbind_range(vma, start, end, new);
1056
1057		if (!list_empty(&pagelist))
1058			nr_failed = migrate_pages(&pagelist, new_vma_page,
1059						(unsigned long)vma);
1060
1061		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1062			err = -EIO;
1063	} else
1064		putback_lru_pages(&pagelist);
1065
1066	up_write(&mm->mmap_sem);
1067 mpol_out:
1068	mpol_put(new);
1069	return err;
1070}
1071
1072/*
1073 * User space interface with variable sized bitmaps for nodelists.
1074 */
1075
1076/* Copy a node mask from user space. */
1077static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1078		     unsigned long maxnode)
1079{
1080	unsigned long k;
1081	unsigned long nlongs;
1082	unsigned long endmask;
1083
1084	--maxnode;
1085	nodes_clear(*nodes);
1086	if (maxnode == 0 || !nmask)
1087		return 0;
1088	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1089		return -EINVAL;
1090
1091	nlongs = BITS_TO_LONGS(maxnode);
1092	if ((maxnode % BITS_PER_LONG) == 0)
1093		endmask = ~0UL;
1094	else
1095		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1096
1097	/* When the user specified more nodes than supported just check
1098	   if the non supported part is all zero. */
1099	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1100		if (nlongs > PAGE_SIZE/sizeof(long))
1101			return -EINVAL;
1102		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1103			unsigned long t;
1104			if (get_user(t, nmask + k))
1105				return -EFAULT;
1106			if (k == nlongs - 1) {
1107				if (t & endmask)
1108					return -EINVAL;
1109			} else if (t)
1110				return -EINVAL;
1111		}
1112		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1113		endmask = ~0UL;
1114	}
1115
1116	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1117		return -EFAULT;
1118	nodes_addr(*nodes)[nlongs-1] &= endmask;
1119	return 0;
1120}
1121
1122/* Copy a kernel node mask to user space */
1123static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1124			      nodemask_t *nodes)
1125{
1126	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1127	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1128
1129	if (copy > nbytes) {
1130		if (copy > PAGE_SIZE)
1131			return -EINVAL;
1132		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1133			return -EFAULT;
1134		copy = nbytes;
1135	}
1136	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1137}
1138
1139SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1140		unsigned long, mode, unsigned long __user *, nmask,
1141		unsigned long, maxnode, unsigned, flags)
1142{
1143	nodemask_t nodes;
1144	int err;
1145	unsigned short mode_flags;
1146
1147	mode_flags = mode & MPOL_MODE_FLAGS;
1148	mode &= ~MPOL_MODE_FLAGS;
1149	if (mode >= MPOL_MAX)
1150		return -EINVAL;
1151	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1152	    (mode_flags & MPOL_F_RELATIVE_NODES))
1153		return -EINVAL;
1154	err = get_nodes(&nodes, nmask, maxnode);
1155	if (err)
1156		return err;
1157	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1158}
1159
1160/* Set the process memory policy */
1161SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1162		unsigned long, maxnode)
1163{
1164	int err;
1165	nodemask_t nodes;
1166	unsigned short flags;
1167
1168	flags = mode & MPOL_MODE_FLAGS;
1169	mode &= ~MPOL_MODE_FLAGS;
1170	if ((unsigned int)mode >= MPOL_MAX)
1171		return -EINVAL;
1172	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1173		return -EINVAL;
1174	err = get_nodes(&nodes, nmask, maxnode);
1175	if (err)
1176		return err;
1177	return do_set_mempolicy(mode, flags, &nodes);
1178}
1179
1180SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1181		const unsigned long __user *, old_nodes,
1182		const unsigned long __user *, new_nodes)
1183{
1184	const struct cred *cred = current_cred(), *tcred;
1185	struct mm_struct *mm;
1186	struct task_struct *task;
1187	nodemask_t old;
1188	nodemask_t new;
1189	nodemask_t task_nodes;
1190	int err;
1191
1192	err = get_nodes(&old, old_nodes, maxnode);
1193	if (err)
1194		return err;
1195
1196	err = get_nodes(&new, new_nodes, maxnode);
1197	if (err)
1198		return err;
1199
1200	/* Find the mm_struct */
1201	read_lock(&tasklist_lock);
1202	task = pid ? find_task_by_vpid(pid) : current;
1203	if (!task) {
1204		read_unlock(&tasklist_lock);
1205		return -ESRCH;
1206	}
1207	mm = get_task_mm(task);
1208	read_unlock(&tasklist_lock);
1209
1210	if (!mm)
1211		return -EINVAL;
1212
1213	/*
1214	 * Check if this process has the right to modify the specified
1215	 * process. The right exists if the process has administrative
1216	 * capabilities, superuser privileges or the same
1217	 * userid as the target process.
1218	 */
1219	rcu_read_lock();
1220	tcred = __task_cred(task);
1221	if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1222	    cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
1223	    !capable(CAP_SYS_NICE)) {
1224		rcu_read_unlock();
1225		err = -EPERM;
1226		goto out;
1227	}
1228	rcu_read_unlock();
1229
1230	task_nodes = cpuset_mems_allowed(task);
1231	/* Is the user allowed to access the target nodes? */
1232	if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
1233		err = -EPERM;
1234		goto out;
1235	}
1236
1237	if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
1238		err = -EINVAL;
1239		goto out;
1240	}
1241
1242	err = security_task_movememory(task);
1243	if (err)
1244		goto out;
1245
1246	err = do_migrate_pages(mm, &old, &new,
1247		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1248out:
1249	mmput(mm);
1250	return err;
1251}
1252
1253
1254/* Retrieve NUMA policy */
1255SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1256		unsigned long __user *, nmask, unsigned long, maxnode,
1257		unsigned long, addr, unsigned long, flags)
1258{
1259	int err;
1260	int uninitialized_var(pval);
1261	nodemask_t nodes;
1262
1263	if (nmask != NULL && maxnode < MAX_NUMNODES)
1264		return -EINVAL;
1265
1266	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1267
1268	if (err)
1269		return err;
1270
1271	if (policy && put_user(pval, policy))
1272		return -EFAULT;
1273
1274	if (nmask)
1275		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1276
1277	return err;
1278}
1279
1280#ifdef CONFIG_COMPAT
1281
1282asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1283				     compat_ulong_t __user *nmask,
1284				     compat_ulong_t maxnode,
1285				     compat_ulong_t addr, compat_ulong_t flags)
1286{
1287	long err;
1288	unsigned long __user *nm = NULL;
1289	unsigned long nr_bits, alloc_size;
1290	DECLARE_BITMAP(bm, MAX_NUMNODES);
1291
1292	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1293	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1294
1295	if (nmask)
1296		nm = compat_alloc_user_space(alloc_size);
1297
1298	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1299
1300	if (!err && nmask) {
1301		err = copy_from_user(bm, nm, alloc_size);
1302		/* ensure entire bitmap is zeroed */
1303		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1304		err |= compat_put_bitmap(nmask, bm, nr_bits);
1305	}
1306
1307	return err;
1308}
1309
1310asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1311				     compat_ulong_t maxnode)
1312{
1313	long err = 0;
1314	unsigned long __user *nm = NULL;
1315	unsigned long nr_bits, alloc_size;
1316	DECLARE_BITMAP(bm, MAX_NUMNODES);
1317
1318	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1319	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1320
1321	if (nmask) {
1322		err = compat_get_bitmap(bm, nmask, nr_bits);
1323		nm = compat_alloc_user_space(alloc_size);
1324		err |= copy_to_user(nm, bm, alloc_size);
1325	}
1326
1327	if (err)
1328		return -EFAULT;
1329
1330	return sys_set_mempolicy(mode, nm, nr_bits+1);
1331}
1332
1333asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1334			     compat_ulong_t mode, compat_ulong_t __user *nmask,
1335			     compat_ulong_t maxnode, compat_ulong_t flags)
1336{
1337	long err = 0;
1338	unsigned long __user *nm = NULL;
1339	unsigned long nr_bits, alloc_size;
1340	nodemask_t bm;
1341
1342	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1343	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1344
1345	if (nmask) {
1346		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1347		nm = compat_alloc_user_space(alloc_size);
1348		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1349	}
1350
1351	if (err)
1352		return -EFAULT;
1353
1354	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1355}
1356
1357#endif
1358
1359/*
1360 * get_vma_policy(@task, @vma, @addr)
1361 * @task - task for fallback if vma policy == default
1362 * @vma   - virtual memory area whose policy is sought
1363 * @addr  - address in @vma for shared policy lookup
1364 *
1365 * Returns effective policy for a VMA at specified address.
1366 * Falls back to @task or system default policy, as necessary.
1367 * Current or other task's task mempolicy and non-shared vma policies
1368 * are protected by the task's mmap_sem, which must be held for read by
1369 * the caller.
1370 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1371 * count--added by the get_policy() vm_op, as appropriate--to protect against
1372 * freeing by another task.  It is the caller's responsibility to free the
1373 * extra reference for shared policies.
1374 */
1375static struct mempolicy *get_vma_policy(struct task_struct *task,
1376		struct vm_area_struct *vma, unsigned long addr)
1377{
1378	struct mempolicy *pol = task->mempolicy;
1379
1380	if (vma) {
1381		if (vma->vm_ops && vma->vm_ops->get_policy) {
1382			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1383									addr);
1384			if (vpol)
1385				pol = vpol;
1386		} else if (vma->vm_policy)
1387			pol = vma->vm_policy;
1388	}
1389	if (!pol)
1390		pol = &default_policy;
1391	return pol;
1392}
1393
1394/*
1395 * Return a nodemask representing a mempolicy for filtering nodes for
1396 * page allocation
1397 */
1398static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1399{
1400	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1401	if (unlikely(policy->mode == MPOL_BIND) &&
1402			gfp_zone(gfp) >= policy_zone &&
1403			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1404		return &policy->v.nodes;
1405
1406	return NULL;
1407}
1408
1409/* Return a zonelist indicated by gfp for node representing a mempolicy */
1410static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1411{
1412	int nd = numa_node_id();
1413
1414	switch (policy->mode) {
1415	case MPOL_PREFERRED:
1416		if (!(policy->flags & MPOL_F_LOCAL))
1417			nd = policy->v.preferred_node;
1418		break;
1419	case MPOL_BIND:
1420		/*
1421		 * Normally, MPOL_BIND allocations are node-local within the
1422		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
1423		 * current node is part of the mask, we use the zonelist for
1424		 * the first node in the mask instead.
1425		 */
1426		if (unlikely(gfp & __GFP_THISNODE) &&
1427				unlikely(!node_isset(nd, policy->v.nodes)))
1428			nd = first_node(policy->v.nodes);
1429		break;
1430	case MPOL_INTERLEAVE: /* should not happen */
1431		break;
1432	default:
1433		BUG();
1434	}
1435	return node_zonelist(nd, gfp);
1436}
1437
1438/* Do dynamic interleaving for a process */
1439static unsigned interleave_nodes(struct mempolicy *policy)
1440{
1441	unsigned nid, next;
1442	struct task_struct *me = current;
1443
1444	nid = me->il_next;
1445	next = next_node(nid, policy->v.nodes);
1446	if (next >= MAX_NUMNODES)
1447		next = first_node(policy->v.nodes);
1448	if (next < MAX_NUMNODES)
1449		me->il_next = next;
1450	return nid;
1451}
1452
1453/*
1454 * Depending on the memory policy provide a node from which to allocate the
1455 * next slab entry.
1456 * @policy must be protected by freeing by the caller.  If @policy is
1457 * the current task's mempolicy, this protection is implicit, as only the
1458 * task can change it's policy.  The system default policy requires no
1459 * such protection.
1460 */
1461unsigned slab_node(struct mempolicy *policy)
1462{
1463	if (!policy || policy->flags & MPOL_F_LOCAL)
1464		return numa_node_id();
1465
1466	switch (policy->mode) {
1467	case MPOL_PREFERRED:
1468		/*
1469		 * handled MPOL_F_LOCAL above
1470		 */
1471		return policy->v.preferred_node;
1472
1473	case MPOL_INTERLEAVE:
1474		return interleave_nodes(policy);
1475
1476	case MPOL_BIND: {
1477		/*
1478		 * Follow bind policy behavior and start allocation at the
1479		 * first node.
1480		 */
1481		struct zonelist *zonelist;
1482		struct zone *zone;
1483		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1484		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1485		(void)first_zones_zonelist(zonelist, highest_zoneidx,
1486							&policy->v.nodes,
1487							&zone);
1488		return zone->node;
1489	}
1490
1491	default:
1492		BUG();
1493	}
1494}
1495
1496/* Do static interleaving for a VMA with known offset. */
1497static unsigned offset_il_node(struct mempolicy *pol,
1498		struct vm_area_struct *vma, unsigned long off)
1499{
1500	unsigned nnodes = nodes_weight(pol->v.nodes);
1501	unsigned target;
1502	int c;
1503	int nid = -1;
1504
1505	if (!nnodes)
1506		return numa_node_id();
1507	target = (unsigned int)off % nnodes;
1508	c = 0;
1509	do {
1510		nid = next_node(nid, pol->v.nodes);
1511		c++;
1512	} while (c <= target);
1513	return nid;
1514}
1515
1516/* Determine a node number for interleave */
1517static inline unsigned interleave_nid(struct mempolicy *pol,
1518		 struct vm_area_struct *vma, unsigned long addr, int shift)
1519{
1520	if (vma) {
1521		unsigned long off;
1522
1523		/*
1524		 * for small pages, there is no difference between
1525		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1526		 * for huge pages, since vm_pgoff is in units of small
1527		 * pages, we need to shift off the always 0 bits to get
1528		 * a useful offset.
1529		 */
1530		BUG_ON(shift < PAGE_SHIFT);
1531		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1532		off += (addr - vma->vm_start) >> shift;
1533		return offset_il_node(pol, vma, off);
1534	} else
1535		return interleave_nodes(pol);
1536}
1537
1538#ifdef CONFIG_HUGETLBFS
1539/*
1540 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1541 * @vma = virtual memory area whose policy is sought
1542 * @addr = address in @vma for shared policy lookup and interleave policy
1543 * @gfp_flags = for requested zone
1544 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1545 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1546 *
1547 * Returns a zonelist suitable for a huge page allocation and a pointer
1548 * to the struct mempolicy for conditional unref after allocation.
1549 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1550 * @nodemask for filtering the zonelist.
1551 */
1552struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1553				gfp_t gfp_flags, struct mempolicy **mpol,
1554				nodemask_t **nodemask)
1555{
1556	struct zonelist *zl;
1557
1558	*mpol = get_vma_policy(current, vma, addr);
1559	*nodemask = NULL;	/* assume !MPOL_BIND */
1560
1561	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1562		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1563				huge_page_shift(hstate_vma(vma))), gfp_flags);
1564	} else {
1565		zl = policy_zonelist(gfp_flags, *mpol);
1566		if ((*mpol)->mode == MPOL_BIND)
1567			*nodemask = &(*mpol)->v.nodes;
1568	}
1569	return zl;
1570}
1571
1572/*
1573 * init_nodemask_of_mempolicy
1574 *
1575 * If the current task's mempolicy is "default" [NULL], return 'false'
1576 * to indicate default policy.  Otherwise, extract the policy nodemask
1577 * for 'bind' or 'interleave' policy into the argument nodemask, or
1578 * initialize the argument nodemask to contain the single node for
1579 * 'preferred' or 'local' policy and return 'true' to indicate presence
1580 * of non-default mempolicy.
1581 *
1582 * We don't bother with reference counting the mempolicy [mpol_get/put]
1583 * because the current task is examining it's own mempolicy and a task's
1584 * mempolicy is only ever changed by the task itself.
1585 *
1586 * N.B., it is the caller's responsibility to free a returned nodemask.
1587 */
1588bool init_nodemask_of_mempolicy(nodemask_t *mask)
1589{
1590	struct mempolicy *mempolicy;
1591	int nid;
1592
1593	if (!(mask && current->mempolicy))
1594		return false;
1595
1596	mempolicy = current->mempolicy;
1597	switch (mempolicy->mode) {
1598	case MPOL_PREFERRED:
1599		if (mempolicy->flags & MPOL_F_LOCAL)
1600			nid = numa_node_id();
1601		else
1602			nid = mempolicy->v.preferred_node;
1603		init_nodemask_of_node(mask, nid);
1604		break;
1605
1606	case MPOL_BIND:
1607		/* Fall through */
1608	case MPOL_INTERLEAVE:
1609		*mask =  mempolicy->v.nodes;
1610		break;
1611
1612	default:
1613		BUG();
1614	}
1615
1616	return true;
1617}
1618#endif
1619
1620/* Allocate a page in interleaved policy.
1621   Own path because it needs to do special accounting. */
1622static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1623					unsigned nid)
1624{
1625	struct zonelist *zl;
1626	struct page *page;
1627
1628	zl = node_zonelist(nid, gfp);
1629	page = __alloc_pages(gfp, order, zl);
1630	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1631		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1632	return page;
1633}
1634
1635/**
1636 * 	alloc_page_vma	- Allocate a page for a VMA.
1637 *
1638 * 	@gfp:
1639 *      %GFP_USER    user allocation.
1640 *      %GFP_KERNEL  kernel allocations,
1641 *      %GFP_HIGHMEM highmem/user allocations,
1642 *      %GFP_FS      allocation should not call back into a file system.
1643 *      %GFP_ATOMIC  don't sleep.
1644 *
1645 * 	@vma:  Pointer to VMA or NULL if not available.
1646 *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1647 *
1648 * 	This function allocates a page from the kernel page pool and applies
1649 *	a NUMA policy associated with the VMA or the current process.
1650 *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
1651 *	mm_struct of the VMA to prevent it from going away. Should be used for
1652 *	all allocations for pages that will be mapped into
1653 * 	user space. Returns NULL when no page can be allocated.
1654 *
1655 *	Should be called with the mm_sem of the vma hold.
1656 */
1657struct page *
1658alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1659{
1660	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1661	struct zonelist *zl;
1662
1663	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1664		unsigned nid;
1665
1666		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1667		mpol_cond_put(pol);
1668		return alloc_page_interleave(gfp, 0, nid);
1669	}
1670	zl = policy_zonelist(gfp, pol);
1671	if (unlikely(mpol_needs_cond_ref(pol))) {
1672		/*
1673		 * slow path: ref counted shared policy
1674		 */
1675		struct page *page =  __alloc_pages_nodemask(gfp, 0,
1676						zl, policy_nodemask(gfp, pol));
1677		__mpol_put(pol);
1678		return page;
1679	}
1680	/*
1681	 * fast path:  default or task policy
1682	 */
1683	return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
1684}
1685
1686/**
1687 * 	alloc_pages_current - Allocate pages.
1688 *
1689 *	@gfp:
1690 *		%GFP_USER   user allocation,
1691 *      	%GFP_KERNEL kernel allocation,
1692 *      	%GFP_HIGHMEM highmem allocation,
1693 *      	%GFP_FS     don't call back into a file system.
1694 *      	%GFP_ATOMIC don't sleep.
1695 *	@order: Power of two of allocation size in pages. 0 is a single page.
1696 *
1697 *	Allocate a page from the kernel page pool.  When not in
1698 *	interrupt context and apply the current process NUMA policy.
1699 *	Returns NULL when no page can be allocated.
1700 *
1701 *	Don't call cpuset_update_task_memory_state() unless
1702 *	1) it's ok to take cpuset_sem (can WAIT), and
1703 *	2) allocating for current task (not interrupt).
1704 */
1705struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1706{
1707	struct mempolicy *pol = current->mempolicy;
1708
1709	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1710		pol = &default_policy;
1711
1712	/*
1713	 * No reference counting needed for current->mempolicy
1714	 * nor system default_policy
1715	 */
1716	if (pol->mode == MPOL_INTERLEAVE)
1717		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1718	return __alloc_pages_nodemask(gfp, order,
1719			policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
1720}
1721EXPORT_SYMBOL(alloc_pages_current);
1722
1723/*
1724 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1725 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1726 * with the mems_allowed returned by cpuset_mems_allowed().  This
1727 * keeps mempolicies cpuset relative after its cpuset moves.  See
1728 * further kernel/cpuset.c update_nodemask().
1729 */
1730
1731/* Slow path of a mempolicy duplicate */
1732struct mempolicy *__mpol_dup(struct mempolicy *old)
1733{
1734	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1735
1736	if (!new)
1737		return ERR_PTR(-ENOMEM);
1738	if (current_cpuset_is_being_rebound()) {
1739		nodemask_t mems = cpuset_mems_allowed(current);
1740		mpol_rebind_policy(old, &mems);
1741	}
1742	*new = *old;
1743	atomic_set(&new->refcnt, 1);
1744	return new;
1745}
1746
1747/*
1748 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1749 * eliminate the * MPOL_F_* flags that require conditional ref and
1750 * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
1751 * after return.  Use the returned value.
1752 *
1753 * Allows use of a mempolicy for, e.g., multiple allocations with a single
1754 * policy lookup, even if the policy needs/has extra ref on lookup.
1755 * shmem_readahead needs this.
1756 */
1757struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1758						struct mempolicy *frompol)
1759{
1760	if (!mpol_needs_cond_ref(frompol))
1761		return frompol;
1762
1763	*tompol = *frompol;
1764	tompol->flags &= ~MPOL_F_SHARED;	/* copy doesn't need unref */
1765	__mpol_put(frompol);
1766	return tompol;
1767}
1768
1769static int mpol_match_intent(const struct mempolicy *a,
1770			     const struct mempolicy *b)
1771{
1772	if (a->flags != b->flags)
1773		return 0;
1774	if (!mpol_store_user_nodemask(a))
1775		return 1;
1776	return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1777}
1778
1779/* Slow path of a mempolicy comparison */
1780int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1781{
1782	if (!a || !b)
1783		return 0;
1784	if (a->mode != b->mode)
1785		return 0;
1786	if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
1787		return 0;
1788	switch (a->mode) {
1789	case MPOL_BIND:
1790		/* Fall through */
1791	case MPOL_INTERLEAVE:
1792		return nodes_equal(a->v.nodes, b->v.nodes);
1793	case MPOL_PREFERRED:
1794		return a->v.preferred_node == b->v.preferred_node &&
1795			a->flags == b->flags;
1796	default:
1797		BUG();
1798		return 0;
1799	}
1800}
1801
1802/*
1803 * Shared memory backing store policy support.
1804 *
1805 * Remember policies even when nobody has shared memory mapped.
1806 * The policies are kept in Red-Black tree linked from the inode.
1807 * They are protected by the sp->lock spinlock, which should be held
1808 * for any accesses to the tree.
1809 */
1810
1811/* lookup first element intersecting start-end */
1812/* Caller holds sp->lock */
1813static struct sp_node *
1814sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1815{
1816	struct rb_node *n = sp->root.rb_node;
1817
1818	while (n) {
1819		struct sp_node *p = rb_entry(n, struct sp_node, nd);
1820
1821		if (start >= p->end)
1822			n = n->rb_right;
1823		else if (end <= p->start)
1824			n = n->rb_left;
1825		else
1826			break;
1827	}
1828	if (!n)
1829		return NULL;
1830	for (;;) {
1831		struct sp_node *w = NULL;
1832		struct rb_node *prev = rb_prev(n);
1833		if (!prev)
1834			break;
1835		w = rb_entry(prev, struct sp_node, nd);
1836		if (w->end <= start)
1837			break;
1838		n = prev;
1839	}
1840	return rb_entry(n, struct sp_node, nd);
1841}
1842
1843/* Insert a new shared policy into the list. */
1844/* Caller holds sp->lock */
1845static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1846{
1847	struct rb_node **p = &sp->root.rb_node;
1848	struct rb_node *parent = NULL;
1849	struct sp_node *nd;
1850
1851	while (*p) {
1852		parent = *p;
1853		nd = rb_entry(parent, struct sp_node, nd);
1854		if (new->start < nd->start)
1855			p = &(*p)->rb_left;
1856		else if (new->end > nd->end)
1857			p = &(*p)->rb_right;
1858		else
1859			BUG();
1860	}
1861	rb_link_node(&new->nd, parent, p);
1862	rb_insert_color(&new->nd, &sp->root);
1863	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1864		 new->policy ? new->policy->mode : 0);
1865}
1866
1867/* Find shared policy intersecting idx */
1868struct mempolicy *
1869mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1870{
1871	struct mempolicy *pol = NULL;
1872	struct sp_node *sn;
1873
1874	if (!sp->root.rb_node)
1875		return NULL;
1876	spin_lock(&sp->lock);
1877	sn = sp_lookup(sp, idx, idx+1);
1878	if (sn) {
1879		mpol_get(sn->policy);
1880		pol = sn->policy;
1881	}
1882	spin_unlock(&sp->lock);
1883	return pol;
1884}
1885
1886static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1887{
1888	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1889	rb_erase(&n->nd, &sp->root);
1890	mpol_put(n->policy);
1891	kmem_cache_free(sn_cache, n);
1892}
1893
1894static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1895				struct mempolicy *pol)
1896{
1897	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1898
1899	if (!n)
1900		return NULL;
1901	n->start = start;
1902	n->end = end;
1903	mpol_get(pol);
1904	pol->flags |= MPOL_F_SHARED;	/* for unref */
1905	n->policy = pol;
1906	return n;
1907}
1908
1909/* Replace a policy range. */
1910static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1911				 unsigned long end, struct sp_node *new)
1912{
1913	struct sp_node *n, *new2 = NULL;
1914
1915restart:
1916	spin_lock(&sp->lock);
1917	n = sp_lookup(sp, start, end);
1918	/* Take care of old policies in the same range. */
1919	while (n && n->start < end) {
1920		struct rb_node *next = rb_next(&n->nd);
1921		if (n->start >= start) {
1922			if (n->end <= end)
1923				sp_delete(sp, n);
1924			else
1925				n->start = end;
1926		} else {
1927			/* Old policy spanning whole new range. */
1928			if (n->end > end) {
1929				if (!new2) {
1930					spin_unlock(&sp->lock);
1931					new2 = sp_alloc(end, n->end, n->policy);
1932					if (!new2)
1933						return -ENOMEM;
1934					goto restart;
1935				}
1936				n->end = start;
1937				sp_insert(sp, new2);
1938				new2 = NULL;
1939				break;
1940			} else
1941				n->end = start;
1942		}
1943		if (!next)
1944			break;
1945		n = rb_entry(next, struct sp_node, nd);
1946	}
1947	if (new)
1948		sp_insert(sp, new);
1949	spin_unlock(&sp->lock);
1950	if (new2) {
1951		mpol_put(new2->policy);
1952		kmem_cache_free(sn_cache, new2);
1953	}
1954	return 0;
1955}
1956
1957/**
1958 * mpol_shared_policy_init - initialize shared policy for inode
1959 * @sp: pointer to inode shared policy
1960 * @mpol:  struct mempolicy to install
1961 *
1962 * Install non-NULL @mpol in inode's shared policy rb-tree.
1963 * On entry, the current task has a reference on a non-NULL @mpol.
1964 * This must be released on exit.
1965 * This is called at get_inode() calls and we can use GFP_KERNEL.
1966 */
1967void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
1968{
1969	int ret;
1970
1971	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
1972	spin_lock_init(&sp->lock);
1973
1974	if (mpol) {
1975		struct vm_area_struct pvma;
1976		struct mempolicy *new;
1977		NODEMASK_SCRATCH(scratch);
1978
1979		if (!scratch)
1980			return;
1981		/* contextualize the tmpfs mount point mempolicy */
1982		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
1983		if (IS_ERR(new)) {
1984			mpol_put(mpol);	/* drop our ref on sb mpol */
1985			NODEMASK_SCRATCH_FREE(scratch);
1986			return;		/* no valid nodemask intersection */
1987		}
1988
1989		task_lock(current);
1990		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
1991		task_unlock(current);
1992		mpol_put(mpol);	/* drop our ref on sb mpol */
1993		if (ret) {
1994			NODEMASK_SCRATCH_FREE(scratch);
1995			mpol_put(new);
1996			return;
1997		}
1998
1999		/* Create pseudo-vma that contains just the policy */
2000		memset(&pvma, 0, sizeof(struct vm_area_struct));
2001		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2002		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2003		mpol_put(new);			/* drop initial ref */
2004		NODEMASK_SCRATCH_FREE(scratch);
2005	}
2006}
2007
2008int mpol_set_shared_policy(struct shared_policy *info,
2009			struct vm_area_struct *vma, struct mempolicy *npol)
2010{
2011	int err;
2012	struct sp_node *new = NULL;
2013	unsigned long sz = vma_pages(vma);
2014
2015	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2016		 vma->vm_pgoff,
2017		 sz, npol ? npol->mode : -1,
2018		 npol ? npol->flags : -1,
2019		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
2020
2021	if (npol) {
2022		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2023		if (!new)
2024			return -ENOMEM;
2025	}
2026	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2027	if (err && new)
2028		kmem_cache_free(sn_cache, new);
2029	return err;
2030}
2031
2032/* Free a backing policy store on inode delete. */
2033void mpol_free_shared_policy(struct shared_policy *p)
2034{
2035	struct sp_node *n;
2036	struct rb_node *next;
2037
2038	if (!p->root.rb_node)
2039		return;
2040	spin_lock(&p->lock);
2041	next = rb_first(&p->root);
2042	while (next) {
2043		n = rb_entry(next, struct sp_node, nd);
2044		next = rb_next(&n->nd);
2045		rb_erase(&n->nd, &p->root);
2046		mpol_put(n->policy);
2047		kmem_cache_free(sn_cache, n);
2048	}
2049	spin_unlock(&p->lock);
2050}
2051
2052/* assumes fs == KERNEL_DS */
2053void __init numa_policy_init(void)
2054{
2055	nodemask_t interleave_nodes;
2056	unsigned long largest = 0;
2057	int nid, prefer = 0;
2058
2059	policy_cache = kmem_cache_create("numa_policy",
2060					 sizeof(struct mempolicy),
2061					 0, SLAB_PANIC, NULL);
2062
2063	sn_cache = kmem_cache_create("shared_policy_node",
2064				     sizeof(struct sp_node),
2065				     0, SLAB_PANIC, NULL);
2066
2067	/*
2068	 * Set interleaving policy for system init. Interleaving is only
2069	 * enabled across suitably sized nodes (default is >= 16MB), or
2070	 * fall back to the largest node if they're all smaller.
2071	 */
2072	nodes_clear(interleave_nodes);
2073	for_each_node_state(nid, N_HIGH_MEMORY) {
2074		unsigned long total_pages = node_present_pages(nid);
2075
2076		/* Preserve the largest node */
2077		if (largest < total_pages) {
2078			largest = total_pages;
2079			prefer = nid;
2080		}
2081
2082		/* Interleave this node? */
2083		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2084			node_set(nid, interleave_nodes);
2085	}
2086
2087	/* All too small, use the largest */
2088	if (unlikely(nodes_empty(interleave_nodes)))
2089		node_set(prefer, interleave_nodes);
2090
2091	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2092		printk("numa_policy_init: interleaving failed\n");
2093}
2094
2095/* Reset policy of current process to default */
2096void numa_default_policy(void)
2097{
2098	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2099}
2100
2101/*
2102 * Parse and format mempolicy from/to strings
2103 */
2104
2105/*
2106 * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
2107 * Used only for mpol_parse_str() and mpol_to_str()
2108 */
2109#define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
2110static const char * const policy_types[] =
2111	{ "default", "prefer", "bind", "interleave", "local" };
2112
2113
2114#ifdef CONFIG_TMPFS
2115/**
2116 * mpol_parse_str - parse string to mempolicy
2117 * @str:  string containing mempolicy to parse
2118 * @mpol:  pointer to struct mempolicy pointer, returned on success.
2119 * @no_context:  flag whether to "contextualize" the mempolicy
2120 *
2121 * Format of input:
2122 *	<mode>[=<flags>][:<nodelist>]
2123 *
2124 * if @no_context is true, save the input nodemask in w.user_nodemask in
2125 * the returned mempolicy.  This will be used to "clone" the mempolicy in
2126 * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
2127 * mount option.  Note that if 'static' or 'relative' mode flags were
2128 * specified, the input nodemask will already have been saved.  Saving
2129 * it again is redundant, but safe.
2130 *
2131 * On success, returns 0, else 1
2132 */
2133int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2134{
2135	struct mempolicy *new = NULL;
2136	unsigned short uninitialized_var(mode);
2137	unsigned short uninitialized_var(mode_flags);
2138	nodemask_t nodes;
2139	char *nodelist = strchr(str, ':');
2140	char *flags = strchr(str, '=');
2141	int i;
2142	int err = 1;
2143
2144	if (nodelist) {
2145		/* NUL-terminate mode or flags string */
2146		*nodelist++ = '\0';
2147		if (nodelist_parse(nodelist, nodes))
2148			goto out;
2149		if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2150			goto out;
2151	} else
2152		nodes_clear(nodes);
2153
2154	if (flags)
2155		*flags++ = '\0';	/* terminate mode string */
2156
2157	for (i = 0; i <= MPOL_LOCAL; i++) {
2158		if (!strcmp(str, policy_types[i])) {
2159			mode = i;
2160			break;
2161		}
2162	}
2163	if (i > MPOL_LOCAL)
2164		goto out;
2165
2166	switch (mode) {
2167	case MPOL_PREFERRED:
2168		/*
2169		 * Insist on a nodelist of one node only
2170		 */
2171		if (nodelist) {
2172			char *rest = nodelist;
2173			while (isdigit(*rest))
2174				rest++;
2175			if (!*rest)
2176				err = 0;
2177		}
2178		break;
2179	case MPOL_INTERLEAVE:
2180		/*
2181		 * Default to online nodes with memory if no nodelist
2182		 */
2183		if (!nodelist)
2184			nodes = node_states[N_HIGH_MEMORY];
2185		err = 0;
2186		break;
2187	case MPOL_LOCAL:
2188		/*
2189		 * Don't allow a nodelist;  mpol_new() checks flags
2190		 */
2191		if (nodelist)
2192			goto out;
2193		mode = MPOL_PREFERRED;
2194		break;
2195
2196	/*
2197	 * case MPOL_BIND:    mpol_new() enforces non-empty nodemask.
2198	 * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
2199	 */
2200	}
2201
2202	mode_flags = 0;
2203	if (flags) {
2204		/*
2205		 * Currently, we only support two mutually exclusive
2206		 * mode flags.
2207		 */
2208		if (!strcmp(flags, "static"))
2209			mode_flags |= MPOL_F_STATIC_NODES;
2210		else if (!strcmp(flags, "relative"))
2211			mode_flags |= MPOL_F_RELATIVE_NODES;
2212		else
2213			err = 1;
2214	}
2215
2216	new = mpol_new(mode, mode_flags, &nodes);
2217	if (IS_ERR(new))
2218		err = 1;
2219	else {
2220		int ret;
2221		NODEMASK_SCRATCH(scratch);
2222		if (scratch) {
2223			task_lock(current);
2224			ret = mpol_set_nodemask(new, &nodes, scratch);
2225			task_unlock(current);
2226		} else
2227			ret = -ENOMEM;
2228		NODEMASK_SCRATCH_FREE(scratch);
2229		if (ret) {
2230			err = 1;
2231			mpol_put(new);
2232		} else if (no_context) {
2233			/* save for contextualization */
2234			new->w.user_nodemask = nodes;
2235		}
2236	}
2237
2238out:
2239	/* Restore string for error message */
2240	if (nodelist)
2241		*--nodelist = ':';
2242	if (flags)
2243		*--flags = '=';
2244	if (!err)
2245		*mpol = new;
2246	return err;
2247}
2248#endif /* CONFIG_TMPFS */
2249
2250/**
2251 * mpol_to_str - format a mempolicy structure for printing
2252 * @buffer:  to contain formatted mempolicy string
2253 * @maxlen:  length of @buffer
2254 * @pol:  pointer to mempolicy to be formatted
2255 * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
2256 *
2257 * Convert a mempolicy into a string.
2258 * Returns the number of characters in buffer (if positive)
2259 * or an error (negative)
2260 */
2261int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2262{
2263	char *p = buffer;
2264	int l;
2265	nodemask_t nodes;
2266	unsigned short mode;
2267	unsigned short flags = pol ? pol->flags : 0;
2268
2269	/*
2270	 * Sanity check:  room for longest mode, flag and some nodes
2271	 */
2272	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2273
2274	if (!pol || pol == &default_policy)
2275		mode = MPOL_DEFAULT;
2276	else
2277		mode = pol->mode;
2278
2279	switch (mode) {
2280	case MPOL_DEFAULT:
2281		nodes_clear(nodes);
2282		break;
2283
2284	case MPOL_PREFERRED:
2285		nodes_clear(nodes);
2286		if (flags & MPOL_F_LOCAL)
2287			mode = MPOL_LOCAL;	/* pseudo-policy */
2288		else
2289			node_set(pol->v.preferred_node, nodes);
2290		break;
2291
2292	case MPOL_BIND:
2293		/* Fall through */
2294	case MPOL_INTERLEAVE:
2295		if (no_context)
2296			nodes = pol->w.user_nodemask;
2297		else
2298			nodes = pol->v.nodes;
2299		break;
2300
2301	default:
2302		BUG();
2303	}
2304
2305	l = strlen(policy_types[mode]);
2306	if (buffer + maxlen < p + l + 1)
2307		return -ENOSPC;
2308
2309	strcpy(p, policy_types[mode]);
2310	p += l;
2311
2312	if (flags & MPOL_MODE_FLAGS) {
2313		if (buffer + maxlen < p + 2)
2314			return -ENOSPC;
2315		*p++ = '=';
2316
2317		/*
2318		 * Currently, the only defined flags are mutually exclusive
2319		 */
2320		if (flags & MPOL_F_STATIC_NODES)
2321			p += snprintf(p, buffer + maxlen - p, "static");
2322		else if (flags & MPOL_F_RELATIVE_NODES)
2323			p += snprintf(p, buffer + maxlen - p, "relative");
2324	}
2325
2326	if (!nodes_empty(nodes)) {
2327		if (buffer + maxlen < p + 2)
2328			return -ENOSPC;
2329		*p++ = ':';
2330	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2331	}
2332	return p - buffer;
2333}
2334
2335struct numa_maps {
2336	unsigned long pages;
2337	unsigned long anon;
2338	unsigned long active;
2339	unsigned long writeback;
2340	unsigned long mapcount_max;
2341	unsigned long dirty;
2342	unsigned long swapcache;
2343	unsigned long node[MAX_NUMNODES];
2344};
2345
2346static void gather_stats(struct page *page, void *private, int pte_dirty)
2347{
2348	struct numa_maps *md = private;
2349	int count = page_mapcount(page);
2350
2351	md->pages++;
2352	if (pte_dirty || PageDirty(page))
2353		md->dirty++;
2354
2355	if (PageSwapCache(page))
2356		md->swapcache++;
2357
2358	if (PageActive(page) || PageUnevictable(page))
2359		md->active++;
2360
2361	if (PageWriteback(page))
2362		md->writeback++;
2363
2364	if (PageAnon(page))
2365		md->anon++;
2366
2367	if (count > md->mapcount_max)
2368		md->mapcount_max = count;
2369
2370	md->node[page_to_nid(page)]++;
2371}
2372
2373#ifdef CONFIG_HUGETLB_PAGE
2374static void check_huge_range(struct vm_area_struct *vma,
2375		unsigned long start, unsigned long end,
2376		struct numa_maps *md)
2377{
2378	unsigned long addr;
2379	struct page *page;
2380	struct hstate *h = hstate_vma(vma);
2381	unsigned long sz = huge_page_size(h);
2382
2383	for (addr = start; addr < end; addr += sz) {
2384		pte_t *ptep = huge_pte_offset(vma->vm_mm,
2385						addr & huge_page_mask(h));
2386		pte_t pte;
2387
2388		if (!ptep)
2389			continue;
2390
2391		pte = *ptep;
2392		if (pte_none(pte))
2393			continue;
2394
2395		page = pte_page(pte);
2396		if (!page)
2397			continue;
2398
2399		gather_stats(page, md, pte_dirty(*ptep));
2400	}
2401}
2402#else
2403static inline void check_huge_range(struct vm_area_struct *vma,
2404		unsigned long start, unsigned long end,
2405		struct numa_maps *md)
2406{
2407}
2408#endif
2409
2410/*
2411 * Display pages allocated per node and memory policy via /proc.
2412 */
2413int show_numa_map(struct seq_file *m, void *v)
2414{
2415	struct proc_maps_private *priv = m->private;
2416	struct vm_area_struct *vma = v;
2417	struct numa_maps *md;
2418	struct file *file = vma->vm_file;
2419	struct mm_struct *mm = vma->vm_mm;
2420	struct mempolicy *pol;
2421	int n;
2422	char buffer[50];
2423
2424	if (!mm)
2425		return 0;
2426
2427	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2428	if (!md)
2429		return 0;
2430
2431	pol = get_vma_policy(priv->task, vma, vma->vm_start);
2432	mpol_to_str(buffer, sizeof(buffer), pol, 0);
2433	mpol_cond_put(pol);
2434
2435	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2436
2437	if (file) {
2438		seq_printf(m, " file=");
2439		seq_path(m, &file->f_path, "\n\t= ");
2440	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2441		seq_printf(m, " heap");
2442	} else if (vma->vm_start <= mm->start_stack &&
2443			vma->vm_end >= mm->start_stack) {
2444		seq_printf(m, " stack");
2445	}
2446
2447	if (is_vm_hugetlb_page(vma)) {
2448		check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2449		seq_printf(m, " huge");
2450	} else {
2451		check_pgd_range(vma, vma->vm_start, vma->vm_end,
2452			&node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2453	}
2454
2455	if (!md->pages)
2456		goto out;
2457
2458	if (md->anon)
2459		seq_printf(m," anon=%lu",md->anon);
2460
2461	if (md->dirty)
2462		seq_printf(m," dirty=%lu",md->dirty);
2463
2464	if (md->pages != md->anon && md->pages != md->dirty)
2465		seq_printf(m, " mapped=%lu", md->pages);
2466
2467	if (md->mapcount_max > 1)
2468		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2469
2470	if (md->swapcache)
2471		seq_printf(m," swapcache=%lu", md->swapcache);
2472
2473	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2474		seq_printf(m," active=%lu", md->active);
2475
2476	if (md->writeback)
2477		seq_printf(m," writeback=%lu", md->writeback);
2478
2479	for_each_node_state(n, N_HIGH_MEMORY)
2480		if (md->node[n])
2481			seq_printf(m, " N%d=%lu", n, md->node[n]);
2482out:
2483	seq_putc(m, '\n');
2484	kfree(md);
2485
2486	if (m->count < m->size)
2487		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
2488	return 0;
2489}
2490