hugetlb.c revision 6de2b1aab94355482bd2accdc115666509667458
1/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/list.h>
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/mm.h>
9#include <linux/seq_file.h>
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/mmu_notifier.h>
13#include <linux/nodemask.h>
14#include <linux/pagemap.h>
15#include <linux/mempolicy.h>
16#include <linux/cpuset.h>
17#include <linux/mutex.h>
18#include <linux/bootmem.h>
19#include <linux/sysfs.h>
20#include <linux/slab.h>
21#include <linux/rmap.h>
22#include <linux/swap.h>
23#include <linux/swapops.h>
24
25#include <asm/page.h>
26#include <asm/pgtable.h>
27#include <asm/io.h>
28
29#include <linux/hugetlb.h>
30#include <linux/node.h>
31#include "internal.h"
32
33const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
34static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35unsigned long hugepages_treat_as_movable;
36
37static int max_hstate;
38unsigned int default_hstate_idx;
39struct hstate hstates[HUGE_MAX_HSTATE];
40
41__initdata LIST_HEAD(huge_boot_pages);
42
43/* for command line parsing */
44static struct hstate * __initdata parsed_hstate;
45static unsigned long __initdata default_hstate_max_huge_pages;
46static unsigned long __initdata default_hstate_size;
47
48#define for_each_hstate(h) \
49	for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
50
51/*
52 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
53 */
54static DEFINE_SPINLOCK(hugetlb_lock);
55
56/*
57 * Region tracking -- allows tracking of reservations and instantiated pages
58 *                    across the pages in a mapping.
59 *
60 * The region data structures are protected by a combination of the mmap_sem
61 * and the hugetlb_instantion_mutex.  To access or modify a region the caller
62 * must either hold the mmap_sem for write, or the mmap_sem for read and
63 * the hugetlb_instantiation mutex:
64 *
65 * 	down_write(&mm->mmap_sem);
66 * or
67 * 	down_read(&mm->mmap_sem);
68 * 	mutex_lock(&hugetlb_instantiation_mutex);
69 */
70struct file_region {
71	struct list_head link;
72	long from;
73	long to;
74};
75
76static long region_add(struct list_head *head, long f, long t)
77{
78	struct file_region *rg, *nrg, *trg;
79
80	/* Locate the region we are either in or before. */
81	list_for_each_entry(rg, head, link)
82		if (f <= rg->to)
83			break;
84
85	/* Round our left edge to the current segment if it encloses us. */
86	if (f > rg->from)
87		f = rg->from;
88
89	/* Check for and consume any regions we now overlap with. */
90	nrg = rg;
91	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
92		if (&rg->link == head)
93			break;
94		if (rg->from > t)
95			break;
96
97		/* If this area reaches higher then extend our area to
98		 * include it completely.  If this is not the first area
99		 * which we intend to reuse, free it. */
100		if (rg->to > t)
101			t = rg->to;
102		if (rg != nrg) {
103			list_del(&rg->link);
104			kfree(rg);
105		}
106	}
107	nrg->from = f;
108	nrg->to = t;
109	return 0;
110}
111
112static long region_chg(struct list_head *head, long f, long t)
113{
114	struct file_region *rg, *nrg;
115	long chg = 0;
116
117	/* Locate the region we are before or in. */
118	list_for_each_entry(rg, head, link)
119		if (f <= rg->to)
120			break;
121
122	/* If we are below the current region then a new region is required.
123	 * Subtle, allocate a new region at the position but make it zero
124	 * size such that we can guarantee to record the reservation. */
125	if (&rg->link == head || t < rg->from) {
126		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
127		if (!nrg)
128			return -ENOMEM;
129		nrg->from = f;
130		nrg->to   = f;
131		INIT_LIST_HEAD(&nrg->link);
132		list_add(&nrg->link, rg->link.prev);
133
134		return t - f;
135	}
136
137	/* Round our left edge to the current segment if it encloses us. */
138	if (f > rg->from)
139		f = rg->from;
140	chg = t - f;
141
142	/* Check for and consume any regions we now overlap with. */
143	list_for_each_entry(rg, rg->link.prev, link) {
144		if (&rg->link == head)
145			break;
146		if (rg->from > t)
147			return chg;
148
149		/* We overlap with this area, if it extends futher than
150		 * us then we must extend ourselves.  Account for its
151		 * existing reservation. */
152		if (rg->to > t) {
153			chg += rg->to - t;
154			t = rg->to;
155		}
156		chg -= rg->to - rg->from;
157	}
158	return chg;
159}
160
161static long region_truncate(struct list_head *head, long end)
162{
163	struct file_region *rg, *trg;
164	long chg = 0;
165
166	/* Locate the region we are either in or before. */
167	list_for_each_entry(rg, head, link)
168		if (end <= rg->to)
169			break;
170	if (&rg->link == head)
171		return 0;
172
173	/* If we are in the middle of a region then adjust it. */
174	if (end > rg->from) {
175		chg = rg->to - end;
176		rg->to = end;
177		rg = list_entry(rg->link.next, typeof(*rg), link);
178	}
179
180	/* Drop any remaining regions. */
181	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
182		if (&rg->link == head)
183			break;
184		chg += rg->to - rg->from;
185		list_del(&rg->link);
186		kfree(rg);
187	}
188	return chg;
189}
190
191static long region_count(struct list_head *head, long f, long t)
192{
193	struct file_region *rg;
194	long chg = 0;
195
196	/* Locate each segment we overlap with, and count that overlap. */
197	list_for_each_entry(rg, head, link) {
198		int seg_from;
199		int seg_to;
200
201		if (rg->to <= f)
202			continue;
203		if (rg->from >= t)
204			break;
205
206		seg_from = max(rg->from, f);
207		seg_to = min(rg->to, t);
208
209		chg += seg_to - seg_from;
210	}
211
212	return chg;
213}
214
215/*
216 * Convert the address within this vma to the page offset within
217 * the mapping, in pagecache page units; huge pages here.
218 */
219static pgoff_t vma_hugecache_offset(struct hstate *h,
220			struct vm_area_struct *vma, unsigned long address)
221{
222	return ((address - vma->vm_start) >> huge_page_shift(h)) +
223			(vma->vm_pgoff >> huge_page_order(h));
224}
225
226pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
227				     unsigned long address)
228{
229	return vma_hugecache_offset(hstate_vma(vma), vma, address);
230}
231
232/*
233 * Return the size of the pages allocated when backing a VMA. In the majority
234 * cases this will be same size as used by the page table entries.
235 */
236unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
237{
238	struct hstate *hstate;
239
240	if (!is_vm_hugetlb_page(vma))
241		return PAGE_SIZE;
242
243	hstate = hstate_vma(vma);
244
245	return 1UL << (hstate->order + PAGE_SHIFT);
246}
247EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
248
249/*
250 * Return the page size being used by the MMU to back a VMA. In the majority
251 * of cases, the page size used by the kernel matches the MMU size. On
252 * architectures where it differs, an architecture-specific version of this
253 * function is required.
254 */
255#ifndef vma_mmu_pagesize
256unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
257{
258	return vma_kernel_pagesize(vma);
259}
260#endif
261
262/*
263 * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
264 * bits of the reservation map pointer, which are always clear due to
265 * alignment.
266 */
267#define HPAGE_RESV_OWNER    (1UL << 0)
268#define HPAGE_RESV_UNMAPPED (1UL << 1)
269#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
270
271/*
272 * These helpers are used to track how many pages are reserved for
273 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
274 * is guaranteed to have their future faults succeed.
275 *
276 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
277 * the reserve counters are updated with the hugetlb_lock held. It is safe
278 * to reset the VMA at fork() time as it is not in use yet and there is no
279 * chance of the global counters getting corrupted as a result of the values.
280 *
281 * The private mapping reservation is represented in a subtly different
282 * manner to a shared mapping.  A shared mapping has a region map associated
283 * with the underlying file, this region map represents the backing file
284 * pages which have ever had a reservation assigned which this persists even
285 * after the page is instantiated.  A private mapping has a region map
286 * associated with the original mmap which is attached to all VMAs which
287 * reference it, this region map represents those offsets which have consumed
288 * reservation ie. where pages have been instantiated.
289 */
290static unsigned long get_vma_private_data(struct vm_area_struct *vma)
291{
292	return (unsigned long)vma->vm_private_data;
293}
294
295static void set_vma_private_data(struct vm_area_struct *vma,
296							unsigned long value)
297{
298	vma->vm_private_data = (void *)value;
299}
300
301struct resv_map {
302	struct kref refs;
303	struct list_head regions;
304};
305
306static struct resv_map *resv_map_alloc(void)
307{
308	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
309	if (!resv_map)
310		return NULL;
311
312	kref_init(&resv_map->refs);
313	INIT_LIST_HEAD(&resv_map->regions);
314
315	return resv_map;
316}
317
318static void resv_map_release(struct kref *ref)
319{
320	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
321
322	/* Clear out any active regions before we release the map. */
323	region_truncate(&resv_map->regions, 0);
324	kfree(resv_map);
325}
326
327static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
328{
329	VM_BUG_ON(!is_vm_hugetlb_page(vma));
330	if (!(vma->vm_flags & VM_MAYSHARE))
331		return (struct resv_map *)(get_vma_private_data(vma) &
332							~HPAGE_RESV_MASK);
333	return NULL;
334}
335
336static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
337{
338	VM_BUG_ON(!is_vm_hugetlb_page(vma));
339	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
340
341	set_vma_private_data(vma, (get_vma_private_data(vma) &
342				HPAGE_RESV_MASK) | (unsigned long)map);
343}
344
345static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
346{
347	VM_BUG_ON(!is_vm_hugetlb_page(vma));
348	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
349
350	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
351}
352
353static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
354{
355	VM_BUG_ON(!is_vm_hugetlb_page(vma));
356
357	return (get_vma_private_data(vma) & flag) != 0;
358}
359
360/* Decrement the reserved pages in the hugepage pool by one */
361static void decrement_hugepage_resv_vma(struct hstate *h,
362			struct vm_area_struct *vma)
363{
364	if (vma->vm_flags & VM_NORESERVE)
365		return;
366
367	if (vma->vm_flags & VM_MAYSHARE) {
368		/* Shared mappings always use reserves */
369		h->resv_huge_pages--;
370	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
371		/*
372		 * Only the process that called mmap() has reserves for
373		 * private mappings.
374		 */
375		h->resv_huge_pages--;
376	}
377}
378
379/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
380void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
381{
382	VM_BUG_ON(!is_vm_hugetlb_page(vma));
383	if (!(vma->vm_flags & VM_MAYSHARE))
384		vma->vm_private_data = (void *)0;
385}
386
387/* Returns true if the VMA has associated reserve pages */
388static int vma_has_reserves(struct vm_area_struct *vma)
389{
390	if (vma->vm_flags & VM_MAYSHARE)
391		return 1;
392	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
393		return 1;
394	return 0;
395}
396
397static void clear_gigantic_page(struct page *page,
398			unsigned long addr, unsigned long sz)
399{
400	int i;
401	struct page *p = page;
402
403	might_sleep();
404	for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
405		cond_resched();
406		clear_user_highpage(p, addr + i * PAGE_SIZE);
407	}
408}
409static void clear_huge_page(struct page *page,
410			unsigned long addr, unsigned long sz)
411{
412	int i;
413
414	if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
415		clear_gigantic_page(page, addr, sz);
416		return;
417	}
418
419	might_sleep();
420	for (i = 0; i < sz/PAGE_SIZE; i++) {
421		cond_resched();
422		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
423	}
424}
425
426static void copy_user_gigantic_page(struct page *dst, struct page *src,
427			   unsigned long addr, struct vm_area_struct *vma)
428{
429	int i;
430	struct hstate *h = hstate_vma(vma);
431	struct page *dst_base = dst;
432	struct page *src_base = src;
433
434	for (i = 0; i < pages_per_huge_page(h); ) {
435		cond_resched();
436		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
437
438		i++;
439		dst = mem_map_next(dst, dst_base, i);
440		src = mem_map_next(src, src_base, i);
441	}
442}
443
444static void copy_user_huge_page(struct page *dst, struct page *src,
445			   unsigned long addr, struct vm_area_struct *vma)
446{
447	int i;
448	struct hstate *h = hstate_vma(vma);
449
450	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
451		copy_user_gigantic_page(dst, src, addr, vma);
452		return;
453	}
454
455	might_sleep();
456	for (i = 0; i < pages_per_huge_page(h); i++) {
457		cond_resched();
458		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
459	}
460}
461
462static void copy_gigantic_page(struct page *dst, struct page *src)
463{
464	int i;
465	struct hstate *h = page_hstate(src);
466	struct page *dst_base = dst;
467	struct page *src_base = src;
468
469	for (i = 0; i < pages_per_huge_page(h); ) {
470		cond_resched();
471		copy_highpage(dst, src);
472
473		i++;
474		dst = mem_map_next(dst, dst_base, i);
475		src = mem_map_next(src, src_base, i);
476	}
477}
478
479void copy_huge_page(struct page *dst, struct page *src)
480{
481	int i;
482	struct hstate *h = page_hstate(src);
483
484	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
485		copy_gigantic_page(dst, src);
486		return;
487	}
488
489	might_sleep();
490	for (i = 0; i < pages_per_huge_page(h); i++) {
491		cond_resched();
492		copy_highpage(dst + i, src + i);
493	}
494}
495
496static void enqueue_huge_page(struct hstate *h, struct page *page)
497{
498	int nid = page_to_nid(page);
499	list_add(&page->lru, &h->hugepage_freelists[nid]);
500	h->free_huge_pages++;
501	h->free_huge_pages_node[nid]++;
502}
503
504static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
505{
506	struct page *page;
507
508	if (list_empty(&h->hugepage_freelists[nid]))
509		return NULL;
510	page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
511	list_del(&page->lru);
512	h->free_huge_pages--;
513	h->free_huge_pages_node[nid]--;
514	return page;
515}
516
517static struct page *dequeue_huge_page_vma(struct hstate *h,
518				struct vm_area_struct *vma,
519				unsigned long address, int avoid_reserve)
520{
521	struct page *page = NULL;
522	struct mempolicy *mpol;
523	nodemask_t *nodemask;
524	struct zonelist *zonelist;
525	struct zone *zone;
526	struct zoneref *z;
527
528	get_mems_allowed();
529	zonelist = huge_zonelist(vma, address,
530					htlb_alloc_mask, &mpol, &nodemask);
531	/*
532	 * A child process with MAP_PRIVATE mappings created by their parent
533	 * have no page reserves. This check ensures that reservations are
534	 * not "stolen". The child may still get SIGKILLed
535	 */
536	if (!vma_has_reserves(vma) &&
537			h->free_huge_pages - h->resv_huge_pages == 0)
538		goto err;
539
540	/* If reserves cannot be used, ensure enough pages are in the pool */
541	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
542		goto err;;
543
544	for_each_zone_zonelist_nodemask(zone, z, zonelist,
545						MAX_NR_ZONES - 1, nodemask) {
546		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
547			page = dequeue_huge_page_node(h, zone_to_nid(zone));
548			if (page) {
549				if (!avoid_reserve)
550					decrement_hugepage_resv_vma(h, vma);
551				break;
552			}
553		}
554	}
555err:
556	mpol_cond_put(mpol);
557	put_mems_allowed();
558	return page;
559}
560
561static void update_and_free_page(struct hstate *h, struct page *page)
562{
563	int i;
564
565	VM_BUG_ON(h->order >= MAX_ORDER);
566
567	h->nr_huge_pages--;
568	h->nr_huge_pages_node[page_to_nid(page)]--;
569	for (i = 0; i < pages_per_huge_page(h); i++) {
570		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
571				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
572				1 << PG_private | 1<< PG_writeback);
573	}
574	set_compound_page_dtor(page, NULL);
575	set_page_refcounted(page);
576	arch_release_hugepage(page);
577	__free_pages(page, huge_page_order(h));
578}
579
580struct hstate *size_to_hstate(unsigned long size)
581{
582	struct hstate *h;
583
584	for_each_hstate(h) {
585		if (huge_page_size(h) == size)
586			return h;
587	}
588	return NULL;
589}
590
591static void free_huge_page(struct page *page)
592{
593	/*
594	 * Can't pass hstate in here because it is called from the
595	 * compound page destructor.
596	 */
597	struct hstate *h = page_hstate(page);
598	int nid = page_to_nid(page);
599	struct address_space *mapping;
600
601	mapping = (struct address_space *) page_private(page);
602	set_page_private(page, 0);
603	page->mapping = NULL;
604	BUG_ON(page_count(page));
605	BUG_ON(page_mapcount(page));
606	INIT_LIST_HEAD(&page->lru);
607
608	spin_lock(&hugetlb_lock);
609	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
610		update_and_free_page(h, page);
611		h->surplus_huge_pages--;
612		h->surplus_huge_pages_node[nid]--;
613	} else {
614		enqueue_huge_page(h, page);
615	}
616	spin_unlock(&hugetlb_lock);
617	if (mapping)
618		hugetlb_put_quota(mapping, 1);
619}
620
621static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
622{
623	set_compound_page_dtor(page, free_huge_page);
624	spin_lock(&hugetlb_lock);
625	h->nr_huge_pages++;
626	h->nr_huge_pages_node[nid]++;
627	spin_unlock(&hugetlb_lock);
628	put_page(page); /* free it into the hugepage allocator */
629}
630
631static void prep_compound_gigantic_page(struct page *page, unsigned long order)
632{
633	int i;
634	int nr_pages = 1 << order;
635	struct page *p = page + 1;
636
637	/* we rely on prep_new_huge_page to set the destructor */
638	set_compound_order(page, order);
639	__SetPageHead(page);
640	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
641		__SetPageTail(p);
642		p->first_page = page;
643	}
644}
645
646int PageHuge(struct page *page)
647{
648	compound_page_dtor *dtor;
649
650	if (!PageCompound(page))
651		return 0;
652
653	page = compound_head(page);
654	dtor = get_compound_page_dtor(page);
655
656	return dtor == free_huge_page;
657}
658
659EXPORT_SYMBOL_GPL(PageHuge);
660
661static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
662{
663	struct page *page;
664
665	if (h->order >= MAX_ORDER)
666		return NULL;
667
668	page = alloc_pages_exact_node(nid,
669		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
670						__GFP_REPEAT|__GFP_NOWARN,
671		huge_page_order(h));
672	if (page) {
673		if (arch_prepare_hugepage(page)) {
674			__free_pages(page, huge_page_order(h));
675			return NULL;
676		}
677		prep_new_huge_page(h, page, nid);
678	}
679
680	return page;
681}
682
683/*
684 * common helper functions for hstate_next_node_to_{alloc|free}.
685 * We may have allocated or freed a huge page based on a different
686 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
687 * be outside of *nodes_allowed.  Ensure that we use an allowed
688 * node for alloc or free.
689 */
690static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
691{
692	nid = next_node(nid, *nodes_allowed);
693	if (nid == MAX_NUMNODES)
694		nid = first_node(*nodes_allowed);
695	VM_BUG_ON(nid >= MAX_NUMNODES);
696
697	return nid;
698}
699
700static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
701{
702	if (!node_isset(nid, *nodes_allowed))
703		nid = next_node_allowed(nid, nodes_allowed);
704	return nid;
705}
706
707/*
708 * returns the previously saved node ["this node"] from which to
709 * allocate a persistent huge page for the pool and advance the
710 * next node from which to allocate, handling wrap at end of node
711 * mask.
712 */
713static int hstate_next_node_to_alloc(struct hstate *h,
714					nodemask_t *nodes_allowed)
715{
716	int nid;
717
718	VM_BUG_ON(!nodes_allowed);
719
720	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
721	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
722
723	return nid;
724}
725
726static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
727{
728	struct page *page;
729	int start_nid;
730	int next_nid;
731	int ret = 0;
732
733	start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
734	next_nid = start_nid;
735
736	do {
737		page = alloc_fresh_huge_page_node(h, next_nid);
738		if (page) {
739			ret = 1;
740			break;
741		}
742		next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
743	} while (next_nid != start_nid);
744
745	if (ret)
746		count_vm_event(HTLB_BUDDY_PGALLOC);
747	else
748		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
749
750	return ret;
751}
752
753/*
754 * helper for free_pool_huge_page() - return the previously saved
755 * node ["this node"] from which to free a huge page.  Advance the
756 * next node id whether or not we find a free huge page to free so
757 * that the next attempt to free addresses the next node.
758 */
759static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
760{
761	int nid;
762
763	VM_BUG_ON(!nodes_allowed);
764
765	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
766	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
767
768	return nid;
769}
770
771/*
772 * Free huge page from pool from next node to free.
773 * Attempt to keep persistent huge pages more or less
774 * balanced over allowed nodes.
775 * Called with hugetlb_lock locked.
776 */
777static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
778							 bool acct_surplus)
779{
780	int start_nid;
781	int next_nid;
782	int ret = 0;
783
784	start_nid = hstate_next_node_to_free(h, nodes_allowed);
785	next_nid = start_nid;
786
787	do {
788		/*
789		 * If we're returning unused surplus pages, only examine
790		 * nodes with surplus pages.
791		 */
792		if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
793		    !list_empty(&h->hugepage_freelists[next_nid])) {
794			struct page *page =
795				list_entry(h->hugepage_freelists[next_nid].next,
796					  struct page, lru);
797			list_del(&page->lru);
798			h->free_huge_pages--;
799			h->free_huge_pages_node[next_nid]--;
800			if (acct_surplus) {
801				h->surplus_huge_pages--;
802				h->surplus_huge_pages_node[next_nid]--;
803			}
804			update_and_free_page(h, page);
805			ret = 1;
806			break;
807		}
808		next_nid = hstate_next_node_to_free(h, nodes_allowed);
809	} while (next_nid != start_nid);
810
811	return ret;
812}
813
814static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
815{
816	struct page *page;
817	unsigned int r_nid;
818
819	if (h->order >= MAX_ORDER)
820		return NULL;
821
822	/*
823	 * Assume we will successfully allocate the surplus page to
824	 * prevent racing processes from causing the surplus to exceed
825	 * overcommit
826	 *
827	 * This however introduces a different race, where a process B
828	 * tries to grow the static hugepage pool while alloc_pages() is
829	 * called by process A. B will only examine the per-node
830	 * counters in determining if surplus huge pages can be
831	 * converted to normal huge pages in adjust_pool_surplus(). A
832	 * won't be able to increment the per-node counter, until the
833	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
834	 * no more huge pages can be converted from surplus to normal
835	 * state (and doesn't try to convert again). Thus, we have a
836	 * case where a surplus huge page exists, the pool is grown, and
837	 * the surplus huge page still exists after, even though it
838	 * should just have been converted to a normal huge page. This
839	 * does not leak memory, though, as the hugepage will be freed
840	 * once it is out of use. It also does not allow the counters to
841	 * go out of whack in adjust_pool_surplus() as we don't modify
842	 * the node values until we've gotten the hugepage and only the
843	 * per-node value is checked there.
844	 */
845	spin_lock(&hugetlb_lock);
846	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
847		spin_unlock(&hugetlb_lock);
848		return NULL;
849	} else {
850		h->nr_huge_pages++;
851		h->surplus_huge_pages++;
852	}
853	spin_unlock(&hugetlb_lock);
854
855	if (nid == NUMA_NO_NODE)
856		page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
857				   __GFP_REPEAT|__GFP_NOWARN,
858				   huge_page_order(h));
859	else
860		page = alloc_pages_exact_node(nid,
861			htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
862			__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
863
864	if (page && arch_prepare_hugepage(page)) {
865		__free_pages(page, huge_page_order(h));
866		return NULL;
867	}
868
869	spin_lock(&hugetlb_lock);
870	if (page) {
871		/*
872		 * This page is now managed by the hugetlb allocator and has
873		 * no users -- drop the buddy allocator's reference.
874		 */
875		put_page_testzero(page);
876		VM_BUG_ON(page_count(page));
877		r_nid = page_to_nid(page);
878		set_compound_page_dtor(page, free_huge_page);
879		/*
880		 * We incremented the global counters already
881		 */
882		h->nr_huge_pages_node[r_nid]++;
883		h->surplus_huge_pages_node[r_nid]++;
884		__count_vm_event(HTLB_BUDDY_PGALLOC);
885	} else {
886		h->nr_huge_pages--;
887		h->surplus_huge_pages--;
888		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
889	}
890	spin_unlock(&hugetlb_lock);
891
892	return page;
893}
894
895/*
896 * This allocation function is useful in the context where vma is irrelevant.
897 * E.g. soft-offlining uses this function because it only cares physical
898 * address of error page.
899 */
900struct page *alloc_huge_page_node(struct hstate *h, int nid)
901{
902	struct page *page;
903
904	spin_lock(&hugetlb_lock);
905	page = dequeue_huge_page_node(h, nid);
906	spin_unlock(&hugetlb_lock);
907
908	if (!page)
909		page = alloc_buddy_huge_page(h, nid);
910
911	return page;
912}
913
914/*
915 * Increase the hugetlb pool such that it can accomodate a reservation
916 * of size 'delta'.
917 */
918static int gather_surplus_pages(struct hstate *h, int delta)
919{
920	struct list_head surplus_list;
921	struct page *page, *tmp;
922	int ret, i;
923	int needed, allocated;
924
925	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
926	if (needed <= 0) {
927		h->resv_huge_pages += delta;
928		return 0;
929	}
930
931	allocated = 0;
932	INIT_LIST_HEAD(&surplus_list);
933
934	ret = -ENOMEM;
935retry:
936	spin_unlock(&hugetlb_lock);
937	for (i = 0; i < needed; i++) {
938		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
939		if (!page) {
940			/*
941			 * We were not able to allocate enough pages to
942			 * satisfy the entire reservation so we free what
943			 * we've allocated so far.
944			 */
945			spin_lock(&hugetlb_lock);
946			needed = 0;
947			goto free;
948		}
949
950		list_add(&page->lru, &surplus_list);
951	}
952	allocated += needed;
953
954	/*
955	 * After retaking hugetlb_lock, we need to recalculate 'needed'
956	 * because either resv_huge_pages or free_huge_pages may have changed.
957	 */
958	spin_lock(&hugetlb_lock);
959	needed = (h->resv_huge_pages + delta) -
960			(h->free_huge_pages + allocated);
961	if (needed > 0)
962		goto retry;
963
964	/*
965	 * The surplus_list now contains _at_least_ the number of extra pages
966	 * needed to accomodate the reservation.  Add the appropriate number
967	 * of pages to the hugetlb pool and free the extras back to the buddy
968	 * allocator.  Commit the entire reservation here to prevent another
969	 * process from stealing the pages as they are added to the pool but
970	 * before they are reserved.
971	 */
972	needed += allocated;
973	h->resv_huge_pages += delta;
974	ret = 0;
975free:
976	/* Free the needed pages to the hugetlb pool */
977	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
978		if ((--needed) < 0)
979			break;
980		list_del(&page->lru);
981		enqueue_huge_page(h, page);
982	}
983
984	/* Free unnecessary surplus pages to the buddy allocator */
985	if (!list_empty(&surplus_list)) {
986		spin_unlock(&hugetlb_lock);
987		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
988			list_del(&page->lru);
989			/*
990			 * The page has a reference count of zero already, so
991			 * call free_huge_page directly instead of using
992			 * put_page.  This must be done with hugetlb_lock
993			 * unlocked which is safe because free_huge_page takes
994			 * hugetlb_lock before deciding how to free the page.
995			 */
996			free_huge_page(page);
997		}
998		spin_lock(&hugetlb_lock);
999	}
1000
1001	return ret;
1002}
1003
1004/*
1005 * When releasing a hugetlb pool reservation, any surplus pages that were
1006 * allocated to satisfy the reservation must be explicitly freed if they were
1007 * never used.
1008 * Called with hugetlb_lock held.
1009 */
1010static void return_unused_surplus_pages(struct hstate *h,
1011					unsigned long unused_resv_pages)
1012{
1013	unsigned long nr_pages;
1014
1015	/* Uncommit the reservation */
1016	h->resv_huge_pages -= unused_resv_pages;
1017
1018	/* Cannot return gigantic pages currently */
1019	if (h->order >= MAX_ORDER)
1020		return;
1021
1022	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1023
1024	/*
1025	 * We want to release as many surplus pages as possible, spread
1026	 * evenly across all nodes with memory. Iterate across these nodes
1027	 * until we can no longer free unreserved surplus pages. This occurs
1028	 * when the nodes with surplus pages have no free pages.
1029	 * free_pool_huge_page() will balance the the freed pages across the
1030	 * on-line nodes with memory and will handle the hstate accounting.
1031	 */
1032	while (nr_pages--) {
1033		if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
1034			break;
1035	}
1036}
1037
1038/*
1039 * Determine if the huge page at addr within the vma has an associated
1040 * reservation.  Where it does not we will need to logically increase
1041 * reservation and actually increase quota before an allocation can occur.
1042 * Where any new reservation would be required the reservation change is
1043 * prepared, but not committed.  Once the page has been quota'd allocated
1044 * an instantiated the change should be committed via vma_commit_reservation.
1045 * No action is required on failure.
1046 */
1047static long vma_needs_reservation(struct hstate *h,
1048			struct vm_area_struct *vma, unsigned long addr)
1049{
1050	struct address_space *mapping = vma->vm_file->f_mapping;
1051	struct inode *inode = mapping->host;
1052
1053	if (vma->vm_flags & VM_MAYSHARE) {
1054		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1055		return region_chg(&inode->i_mapping->private_list,
1056							idx, idx + 1);
1057
1058	} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1059		return 1;
1060
1061	} else  {
1062		long err;
1063		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1064		struct resv_map *reservations = vma_resv_map(vma);
1065
1066		err = region_chg(&reservations->regions, idx, idx + 1);
1067		if (err < 0)
1068			return err;
1069		return 0;
1070	}
1071}
1072static void vma_commit_reservation(struct hstate *h,
1073			struct vm_area_struct *vma, unsigned long addr)
1074{
1075	struct address_space *mapping = vma->vm_file->f_mapping;
1076	struct inode *inode = mapping->host;
1077
1078	if (vma->vm_flags & VM_MAYSHARE) {
1079		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1080		region_add(&inode->i_mapping->private_list, idx, idx + 1);
1081
1082	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1083		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1084		struct resv_map *reservations = vma_resv_map(vma);
1085
1086		/* Mark this page used in the map. */
1087		region_add(&reservations->regions, idx, idx + 1);
1088	}
1089}
1090
1091static struct page *alloc_huge_page(struct vm_area_struct *vma,
1092				    unsigned long addr, int avoid_reserve)
1093{
1094	struct hstate *h = hstate_vma(vma);
1095	struct page *page;
1096	struct address_space *mapping = vma->vm_file->f_mapping;
1097	struct inode *inode = mapping->host;
1098	long chg;
1099
1100	/*
1101	 * Processes that did not create the mapping will have no reserves and
1102	 * will not have accounted against quota. Check that the quota can be
1103	 * made before satisfying the allocation
1104	 * MAP_NORESERVE mappings may also need pages and quota allocated
1105	 * if no reserve mapping overlaps.
1106	 */
1107	chg = vma_needs_reservation(h, vma, addr);
1108	if (chg < 0)
1109		return ERR_PTR(chg);
1110	if (chg)
1111		if (hugetlb_get_quota(inode->i_mapping, chg))
1112			return ERR_PTR(-ENOSPC);
1113
1114	spin_lock(&hugetlb_lock);
1115	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1116	spin_unlock(&hugetlb_lock);
1117
1118	if (!page) {
1119		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1120		if (!page) {
1121			hugetlb_put_quota(inode->i_mapping, chg);
1122			return ERR_PTR(-VM_FAULT_SIGBUS);
1123		}
1124	}
1125
1126	set_page_refcounted(page);
1127	set_page_private(page, (unsigned long) mapping);
1128
1129	vma_commit_reservation(h, vma, addr);
1130
1131	return page;
1132}
1133
1134int __weak alloc_bootmem_huge_page(struct hstate *h)
1135{
1136	struct huge_bootmem_page *m;
1137	int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1138
1139	while (nr_nodes) {
1140		void *addr;
1141
1142		addr = __alloc_bootmem_node_nopanic(
1143				NODE_DATA(hstate_next_node_to_alloc(h,
1144						&node_states[N_HIGH_MEMORY])),
1145				huge_page_size(h), huge_page_size(h), 0);
1146
1147		if (addr) {
1148			/*
1149			 * Use the beginning of the huge page to store the
1150			 * huge_bootmem_page struct (until gather_bootmem
1151			 * puts them into the mem_map).
1152			 */
1153			m = addr;
1154			goto found;
1155		}
1156		nr_nodes--;
1157	}
1158	return 0;
1159
1160found:
1161	BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1162	/* Put them into a private list first because mem_map is not up yet */
1163	list_add(&m->list, &huge_boot_pages);
1164	m->hstate = h;
1165	return 1;
1166}
1167
1168static void prep_compound_huge_page(struct page *page, int order)
1169{
1170	if (unlikely(order > (MAX_ORDER - 1)))
1171		prep_compound_gigantic_page(page, order);
1172	else
1173		prep_compound_page(page, order);
1174}
1175
1176/* Put bootmem huge pages into the standard lists after mem_map is up */
1177static void __init gather_bootmem_prealloc(void)
1178{
1179	struct huge_bootmem_page *m;
1180
1181	list_for_each_entry(m, &huge_boot_pages, list) {
1182		struct page *page = virt_to_page(m);
1183		struct hstate *h = m->hstate;
1184		__ClearPageReserved(page);
1185		WARN_ON(page_count(page) != 1);
1186		prep_compound_huge_page(page, h->order);
1187		prep_new_huge_page(h, page, page_to_nid(page));
1188	}
1189}
1190
1191static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1192{
1193	unsigned long i;
1194
1195	for (i = 0; i < h->max_huge_pages; ++i) {
1196		if (h->order >= MAX_ORDER) {
1197			if (!alloc_bootmem_huge_page(h))
1198				break;
1199		} else if (!alloc_fresh_huge_page(h,
1200					 &node_states[N_HIGH_MEMORY]))
1201			break;
1202	}
1203	h->max_huge_pages = i;
1204}
1205
1206static void __init hugetlb_init_hstates(void)
1207{
1208	struct hstate *h;
1209
1210	for_each_hstate(h) {
1211		/* oversize hugepages were init'ed in early boot */
1212		if (h->order < MAX_ORDER)
1213			hugetlb_hstate_alloc_pages(h);
1214	}
1215}
1216
1217static char * __init memfmt(char *buf, unsigned long n)
1218{
1219	if (n >= (1UL << 30))
1220		sprintf(buf, "%lu GB", n >> 30);
1221	else if (n >= (1UL << 20))
1222		sprintf(buf, "%lu MB", n >> 20);
1223	else
1224		sprintf(buf, "%lu KB", n >> 10);
1225	return buf;
1226}
1227
1228static void __init report_hugepages(void)
1229{
1230	struct hstate *h;
1231
1232	for_each_hstate(h) {
1233		char buf[32];
1234		printk(KERN_INFO "HugeTLB registered %s page size, "
1235				 "pre-allocated %ld pages\n",
1236			memfmt(buf, huge_page_size(h)),
1237			h->free_huge_pages);
1238	}
1239}
1240
1241#ifdef CONFIG_HIGHMEM
1242static void try_to_free_low(struct hstate *h, unsigned long count,
1243						nodemask_t *nodes_allowed)
1244{
1245	int i;
1246
1247	if (h->order >= MAX_ORDER)
1248		return;
1249
1250	for_each_node_mask(i, *nodes_allowed) {
1251		struct page *page, *next;
1252		struct list_head *freel = &h->hugepage_freelists[i];
1253		list_for_each_entry_safe(page, next, freel, lru) {
1254			if (count >= h->nr_huge_pages)
1255				return;
1256			if (PageHighMem(page))
1257				continue;
1258			list_del(&page->lru);
1259			update_and_free_page(h, page);
1260			h->free_huge_pages--;
1261			h->free_huge_pages_node[page_to_nid(page)]--;
1262		}
1263	}
1264}
1265#else
1266static inline void try_to_free_low(struct hstate *h, unsigned long count,
1267						nodemask_t *nodes_allowed)
1268{
1269}
1270#endif
1271
1272/*
1273 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1274 * balanced by operating on them in a round-robin fashion.
1275 * Returns 1 if an adjustment was made.
1276 */
1277static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1278				int delta)
1279{
1280	int start_nid, next_nid;
1281	int ret = 0;
1282
1283	VM_BUG_ON(delta != -1 && delta != 1);
1284
1285	if (delta < 0)
1286		start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1287	else
1288		start_nid = hstate_next_node_to_free(h, nodes_allowed);
1289	next_nid = start_nid;
1290
1291	do {
1292		int nid = next_nid;
1293		if (delta < 0)  {
1294			/*
1295			 * To shrink on this node, there must be a surplus page
1296			 */
1297			if (!h->surplus_huge_pages_node[nid]) {
1298				next_nid = hstate_next_node_to_alloc(h,
1299								nodes_allowed);
1300				continue;
1301			}
1302		}
1303		if (delta > 0) {
1304			/*
1305			 * Surplus cannot exceed the total number of pages
1306			 */
1307			if (h->surplus_huge_pages_node[nid] >=
1308						h->nr_huge_pages_node[nid]) {
1309				next_nid = hstate_next_node_to_free(h,
1310								nodes_allowed);
1311				continue;
1312			}
1313		}
1314
1315		h->surplus_huge_pages += delta;
1316		h->surplus_huge_pages_node[nid] += delta;
1317		ret = 1;
1318		break;
1319	} while (next_nid != start_nid);
1320
1321	return ret;
1322}
1323
1324#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1325static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1326						nodemask_t *nodes_allowed)
1327{
1328	unsigned long min_count, ret;
1329
1330	if (h->order >= MAX_ORDER)
1331		return h->max_huge_pages;
1332
1333	/*
1334	 * Increase the pool size
1335	 * First take pages out of surplus state.  Then make up the
1336	 * remaining difference by allocating fresh huge pages.
1337	 *
1338	 * We might race with alloc_buddy_huge_page() here and be unable
1339	 * to convert a surplus huge page to a normal huge page. That is
1340	 * not critical, though, it just means the overall size of the
1341	 * pool might be one hugepage larger than it needs to be, but
1342	 * within all the constraints specified by the sysctls.
1343	 */
1344	spin_lock(&hugetlb_lock);
1345	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1346		if (!adjust_pool_surplus(h, nodes_allowed, -1))
1347			break;
1348	}
1349
1350	while (count > persistent_huge_pages(h)) {
1351		/*
1352		 * If this allocation races such that we no longer need the
1353		 * page, free_huge_page will handle it by freeing the page
1354		 * and reducing the surplus.
1355		 */
1356		spin_unlock(&hugetlb_lock);
1357		ret = alloc_fresh_huge_page(h, nodes_allowed);
1358		spin_lock(&hugetlb_lock);
1359		if (!ret)
1360			goto out;
1361
1362		/* Bail for signals. Probably ctrl-c from user */
1363		if (signal_pending(current))
1364			goto out;
1365	}
1366
1367	/*
1368	 * Decrease the pool size
1369	 * First return free pages to the buddy allocator (being careful
1370	 * to keep enough around to satisfy reservations).  Then place
1371	 * pages into surplus state as needed so the pool will shrink
1372	 * to the desired size as pages become free.
1373	 *
1374	 * By placing pages into the surplus state independent of the
1375	 * overcommit value, we are allowing the surplus pool size to
1376	 * exceed overcommit. There are few sane options here. Since
1377	 * alloc_buddy_huge_page() is checking the global counter,
1378	 * though, we'll note that we're not allowed to exceed surplus
1379	 * and won't grow the pool anywhere else. Not until one of the
1380	 * sysctls are changed, or the surplus pages go out of use.
1381	 */
1382	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1383	min_count = max(count, min_count);
1384	try_to_free_low(h, min_count, nodes_allowed);
1385	while (min_count < persistent_huge_pages(h)) {
1386		if (!free_pool_huge_page(h, nodes_allowed, 0))
1387			break;
1388	}
1389	while (count < persistent_huge_pages(h)) {
1390		if (!adjust_pool_surplus(h, nodes_allowed, 1))
1391			break;
1392	}
1393out:
1394	ret = persistent_huge_pages(h);
1395	spin_unlock(&hugetlb_lock);
1396	return ret;
1397}
1398
1399#define HSTATE_ATTR_RO(_name) \
1400	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1401
1402#define HSTATE_ATTR(_name) \
1403	static struct kobj_attribute _name##_attr = \
1404		__ATTR(_name, 0644, _name##_show, _name##_store)
1405
1406static struct kobject *hugepages_kobj;
1407static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1408
1409static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1410
1411static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1412{
1413	int i;
1414
1415	for (i = 0; i < HUGE_MAX_HSTATE; i++)
1416		if (hstate_kobjs[i] == kobj) {
1417			if (nidp)
1418				*nidp = NUMA_NO_NODE;
1419			return &hstates[i];
1420		}
1421
1422	return kobj_to_node_hstate(kobj, nidp);
1423}
1424
1425static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1426					struct kobj_attribute *attr, char *buf)
1427{
1428	struct hstate *h;
1429	unsigned long nr_huge_pages;
1430	int nid;
1431
1432	h = kobj_to_hstate(kobj, &nid);
1433	if (nid == NUMA_NO_NODE)
1434		nr_huge_pages = h->nr_huge_pages;
1435	else
1436		nr_huge_pages = h->nr_huge_pages_node[nid];
1437
1438	return sprintf(buf, "%lu\n", nr_huge_pages);
1439}
1440static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1441			struct kobject *kobj, struct kobj_attribute *attr,
1442			const char *buf, size_t len)
1443{
1444	int err;
1445	int nid;
1446	unsigned long count;
1447	struct hstate *h;
1448	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1449
1450	err = strict_strtoul(buf, 10, &count);
1451	if (err)
1452		return 0;
1453
1454	h = kobj_to_hstate(kobj, &nid);
1455	if (nid == NUMA_NO_NODE) {
1456		/*
1457		 * global hstate attribute
1458		 */
1459		if (!(obey_mempolicy &&
1460				init_nodemask_of_mempolicy(nodes_allowed))) {
1461			NODEMASK_FREE(nodes_allowed);
1462			nodes_allowed = &node_states[N_HIGH_MEMORY];
1463		}
1464	} else if (nodes_allowed) {
1465		/*
1466		 * per node hstate attribute: adjust count to global,
1467		 * but restrict alloc/free to the specified node.
1468		 */
1469		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1470		init_nodemask_of_node(nodes_allowed, nid);
1471	} else
1472		nodes_allowed = &node_states[N_HIGH_MEMORY];
1473
1474	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1475
1476	if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1477		NODEMASK_FREE(nodes_allowed);
1478
1479	return len;
1480}
1481
1482static ssize_t nr_hugepages_show(struct kobject *kobj,
1483				       struct kobj_attribute *attr, char *buf)
1484{
1485	return nr_hugepages_show_common(kobj, attr, buf);
1486}
1487
1488static ssize_t nr_hugepages_store(struct kobject *kobj,
1489	       struct kobj_attribute *attr, const char *buf, size_t len)
1490{
1491	return nr_hugepages_store_common(false, kobj, attr, buf, len);
1492}
1493HSTATE_ATTR(nr_hugepages);
1494
1495#ifdef CONFIG_NUMA
1496
1497/*
1498 * hstate attribute for optionally mempolicy-based constraint on persistent
1499 * huge page alloc/free.
1500 */
1501static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1502				       struct kobj_attribute *attr, char *buf)
1503{
1504	return nr_hugepages_show_common(kobj, attr, buf);
1505}
1506
1507static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1508	       struct kobj_attribute *attr, const char *buf, size_t len)
1509{
1510	return nr_hugepages_store_common(true, kobj, attr, buf, len);
1511}
1512HSTATE_ATTR(nr_hugepages_mempolicy);
1513#endif
1514
1515
1516static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1517					struct kobj_attribute *attr, char *buf)
1518{
1519	struct hstate *h = kobj_to_hstate(kobj, NULL);
1520	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1521}
1522static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1523		struct kobj_attribute *attr, const char *buf, size_t count)
1524{
1525	int err;
1526	unsigned long input;
1527	struct hstate *h = kobj_to_hstate(kobj, NULL);
1528
1529	err = strict_strtoul(buf, 10, &input);
1530	if (err)
1531		return 0;
1532
1533	spin_lock(&hugetlb_lock);
1534	h->nr_overcommit_huge_pages = input;
1535	spin_unlock(&hugetlb_lock);
1536
1537	return count;
1538}
1539HSTATE_ATTR(nr_overcommit_hugepages);
1540
1541static ssize_t free_hugepages_show(struct kobject *kobj,
1542					struct kobj_attribute *attr, char *buf)
1543{
1544	struct hstate *h;
1545	unsigned long free_huge_pages;
1546	int nid;
1547
1548	h = kobj_to_hstate(kobj, &nid);
1549	if (nid == NUMA_NO_NODE)
1550		free_huge_pages = h->free_huge_pages;
1551	else
1552		free_huge_pages = h->free_huge_pages_node[nid];
1553
1554	return sprintf(buf, "%lu\n", free_huge_pages);
1555}
1556HSTATE_ATTR_RO(free_hugepages);
1557
1558static ssize_t resv_hugepages_show(struct kobject *kobj,
1559					struct kobj_attribute *attr, char *buf)
1560{
1561	struct hstate *h = kobj_to_hstate(kobj, NULL);
1562	return sprintf(buf, "%lu\n", h->resv_huge_pages);
1563}
1564HSTATE_ATTR_RO(resv_hugepages);
1565
1566static ssize_t surplus_hugepages_show(struct kobject *kobj,
1567					struct kobj_attribute *attr, char *buf)
1568{
1569	struct hstate *h;
1570	unsigned long surplus_huge_pages;
1571	int nid;
1572
1573	h = kobj_to_hstate(kobj, &nid);
1574	if (nid == NUMA_NO_NODE)
1575		surplus_huge_pages = h->surplus_huge_pages;
1576	else
1577		surplus_huge_pages = h->surplus_huge_pages_node[nid];
1578
1579	return sprintf(buf, "%lu\n", surplus_huge_pages);
1580}
1581HSTATE_ATTR_RO(surplus_hugepages);
1582
1583static struct attribute *hstate_attrs[] = {
1584	&nr_hugepages_attr.attr,
1585	&nr_overcommit_hugepages_attr.attr,
1586	&free_hugepages_attr.attr,
1587	&resv_hugepages_attr.attr,
1588	&surplus_hugepages_attr.attr,
1589#ifdef CONFIG_NUMA
1590	&nr_hugepages_mempolicy_attr.attr,
1591#endif
1592	NULL,
1593};
1594
1595static struct attribute_group hstate_attr_group = {
1596	.attrs = hstate_attrs,
1597};
1598
1599static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1600				    struct kobject **hstate_kobjs,
1601				    struct attribute_group *hstate_attr_group)
1602{
1603	int retval;
1604	int hi = h - hstates;
1605
1606	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1607	if (!hstate_kobjs[hi])
1608		return -ENOMEM;
1609
1610	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1611	if (retval)
1612		kobject_put(hstate_kobjs[hi]);
1613
1614	return retval;
1615}
1616
1617static void __init hugetlb_sysfs_init(void)
1618{
1619	struct hstate *h;
1620	int err;
1621
1622	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1623	if (!hugepages_kobj)
1624		return;
1625
1626	for_each_hstate(h) {
1627		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1628					 hstate_kobjs, &hstate_attr_group);
1629		if (err)
1630			printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1631								h->name);
1632	}
1633}
1634
1635#ifdef CONFIG_NUMA
1636
1637/*
1638 * node_hstate/s - associate per node hstate attributes, via their kobjects,
1639 * with node sysdevs in node_devices[] using a parallel array.  The array
1640 * index of a node sysdev or _hstate == node id.
1641 * This is here to avoid any static dependency of the node sysdev driver, in
1642 * the base kernel, on the hugetlb module.
1643 */
1644struct node_hstate {
1645	struct kobject		*hugepages_kobj;
1646	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
1647};
1648struct node_hstate node_hstates[MAX_NUMNODES];
1649
1650/*
1651 * A subset of global hstate attributes for node sysdevs
1652 */
1653static struct attribute *per_node_hstate_attrs[] = {
1654	&nr_hugepages_attr.attr,
1655	&free_hugepages_attr.attr,
1656	&surplus_hugepages_attr.attr,
1657	NULL,
1658};
1659
1660static struct attribute_group per_node_hstate_attr_group = {
1661	.attrs = per_node_hstate_attrs,
1662};
1663
1664/*
1665 * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
1666 * Returns node id via non-NULL nidp.
1667 */
1668static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1669{
1670	int nid;
1671
1672	for (nid = 0; nid < nr_node_ids; nid++) {
1673		struct node_hstate *nhs = &node_hstates[nid];
1674		int i;
1675		for (i = 0; i < HUGE_MAX_HSTATE; i++)
1676			if (nhs->hstate_kobjs[i] == kobj) {
1677				if (nidp)
1678					*nidp = nid;
1679				return &hstates[i];
1680			}
1681	}
1682
1683	BUG();
1684	return NULL;
1685}
1686
1687/*
1688 * Unregister hstate attributes from a single node sysdev.
1689 * No-op if no hstate attributes attached.
1690 */
1691void hugetlb_unregister_node(struct node *node)
1692{
1693	struct hstate *h;
1694	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1695
1696	if (!nhs->hugepages_kobj)
1697		return;		/* no hstate attributes */
1698
1699	for_each_hstate(h)
1700		if (nhs->hstate_kobjs[h - hstates]) {
1701			kobject_put(nhs->hstate_kobjs[h - hstates]);
1702			nhs->hstate_kobjs[h - hstates] = NULL;
1703		}
1704
1705	kobject_put(nhs->hugepages_kobj);
1706	nhs->hugepages_kobj = NULL;
1707}
1708
1709/*
1710 * hugetlb module exit:  unregister hstate attributes from node sysdevs
1711 * that have them.
1712 */
1713static void hugetlb_unregister_all_nodes(void)
1714{
1715	int nid;
1716
1717	/*
1718	 * disable node sysdev registrations.
1719	 */
1720	register_hugetlbfs_with_node(NULL, NULL);
1721
1722	/*
1723	 * remove hstate attributes from any nodes that have them.
1724	 */
1725	for (nid = 0; nid < nr_node_ids; nid++)
1726		hugetlb_unregister_node(&node_devices[nid]);
1727}
1728
1729/*
1730 * Register hstate attributes for a single node sysdev.
1731 * No-op if attributes already registered.
1732 */
1733void hugetlb_register_node(struct node *node)
1734{
1735	struct hstate *h;
1736	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1737	int err;
1738
1739	if (nhs->hugepages_kobj)
1740		return;		/* already allocated */
1741
1742	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1743							&node->sysdev.kobj);
1744	if (!nhs->hugepages_kobj)
1745		return;
1746
1747	for_each_hstate(h) {
1748		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1749						nhs->hstate_kobjs,
1750						&per_node_hstate_attr_group);
1751		if (err) {
1752			printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1753					" for node %d\n",
1754						h->name, node->sysdev.id);
1755			hugetlb_unregister_node(node);
1756			break;
1757		}
1758	}
1759}
1760
1761/*
1762 * hugetlb init time:  register hstate attributes for all registered node
1763 * sysdevs of nodes that have memory.  All on-line nodes should have
1764 * registered their associated sysdev by this time.
1765 */
1766static void hugetlb_register_all_nodes(void)
1767{
1768	int nid;
1769
1770	for_each_node_state(nid, N_HIGH_MEMORY) {
1771		struct node *node = &node_devices[nid];
1772		if (node->sysdev.id == nid)
1773			hugetlb_register_node(node);
1774	}
1775
1776	/*
1777	 * Let the node sysdev driver know we're here so it can
1778	 * [un]register hstate attributes on node hotplug.
1779	 */
1780	register_hugetlbfs_with_node(hugetlb_register_node,
1781				     hugetlb_unregister_node);
1782}
1783#else	/* !CONFIG_NUMA */
1784
1785static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1786{
1787	BUG();
1788	if (nidp)
1789		*nidp = -1;
1790	return NULL;
1791}
1792
1793static void hugetlb_unregister_all_nodes(void) { }
1794
1795static void hugetlb_register_all_nodes(void) { }
1796
1797#endif
1798
1799static void __exit hugetlb_exit(void)
1800{
1801	struct hstate *h;
1802
1803	hugetlb_unregister_all_nodes();
1804
1805	for_each_hstate(h) {
1806		kobject_put(hstate_kobjs[h - hstates]);
1807	}
1808
1809	kobject_put(hugepages_kobj);
1810}
1811module_exit(hugetlb_exit);
1812
1813static int __init hugetlb_init(void)
1814{
1815	/* Some platform decide whether they support huge pages at boot
1816	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1817	 * there is no such support
1818	 */
1819	if (HPAGE_SHIFT == 0)
1820		return 0;
1821
1822	if (!size_to_hstate(default_hstate_size)) {
1823		default_hstate_size = HPAGE_SIZE;
1824		if (!size_to_hstate(default_hstate_size))
1825			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1826	}
1827	default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1828	if (default_hstate_max_huge_pages)
1829		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1830
1831	hugetlb_init_hstates();
1832
1833	gather_bootmem_prealloc();
1834
1835	report_hugepages();
1836
1837	hugetlb_sysfs_init();
1838
1839	hugetlb_register_all_nodes();
1840
1841	return 0;
1842}
1843module_init(hugetlb_init);
1844
1845/* Should be called on processing a hugepagesz=... option */
1846void __init hugetlb_add_hstate(unsigned order)
1847{
1848	struct hstate *h;
1849	unsigned long i;
1850
1851	if (size_to_hstate(PAGE_SIZE << order)) {
1852		printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1853		return;
1854	}
1855	BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1856	BUG_ON(order == 0);
1857	h = &hstates[max_hstate++];
1858	h->order = order;
1859	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1860	h->nr_huge_pages = 0;
1861	h->free_huge_pages = 0;
1862	for (i = 0; i < MAX_NUMNODES; ++i)
1863		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1864	h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1865	h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1866	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1867					huge_page_size(h)/1024);
1868
1869	parsed_hstate = h;
1870}
1871
1872static int __init hugetlb_nrpages_setup(char *s)
1873{
1874	unsigned long *mhp;
1875	static unsigned long *last_mhp;
1876
1877	/*
1878	 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1879	 * so this hugepages= parameter goes to the "default hstate".
1880	 */
1881	if (!max_hstate)
1882		mhp = &default_hstate_max_huge_pages;
1883	else
1884		mhp = &parsed_hstate->max_huge_pages;
1885
1886	if (mhp == last_mhp) {
1887		printk(KERN_WARNING "hugepages= specified twice without "
1888			"interleaving hugepagesz=, ignoring\n");
1889		return 1;
1890	}
1891
1892	if (sscanf(s, "%lu", mhp) <= 0)
1893		*mhp = 0;
1894
1895	/*
1896	 * Global state is always initialized later in hugetlb_init.
1897	 * But we need to allocate >= MAX_ORDER hstates here early to still
1898	 * use the bootmem allocator.
1899	 */
1900	if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1901		hugetlb_hstate_alloc_pages(parsed_hstate);
1902
1903	last_mhp = mhp;
1904
1905	return 1;
1906}
1907__setup("hugepages=", hugetlb_nrpages_setup);
1908
1909static int __init hugetlb_default_setup(char *s)
1910{
1911	default_hstate_size = memparse(s, &s);
1912	return 1;
1913}
1914__setup("default_hugepagesz=", hugetlb_default_setup);
1915
1916static unsigned int cpuset_mems_nr(unsigned int *array)
1917{
1918	int node;
1919	unsigned int nr = 0;
1920
1921	for_each_node_mask(node, cpuset_current_mems_allowed)
1922		nr += array[node];
1923
1924	return nr;
1925}
1926
1927#ifdef CONFIG_SYSCTL
1928static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1929			 struct ctl_table *table, int write,
1930			 void __user *buffer, size_t *length, loff_t *ppos)
1931{
1932	struct hstate *h = &default_hstate;
1933	unsigned long tmp;
1934
1935	if (!write)
1936		tmp = h->max_huge_pages;
1937
1938	table->data = &tmp;
1939	table->maxlen = sizeof(unsigned long);
1940	proc_doulongvec_minmax(table, write, buffer, length, ppos);
1941
1942	if (write) {
1943		NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1944						GFP_KERNEL | __GFP_NORETRY);
1945		if (!(obey_mempolicy &&
1946			       init_nodemask_of_mempolicy(nodes_allowed))) {
1947			NODEMASK_FREE(nodes_allowed);
1948			nodes_allowed = &node_states[N_HIGH_MEMORY];
1949		}
1950		h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
1951
1952		if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1953			NODEMASK_FREE(nodes_allowed);
1954	}
1955
1956	return 0;
1957}
1958
1959int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1960			  void __user *buffer, size_t *length, loff_t *ppos)
1961{
1962
1963	return hugetlb_sysctl_handler_common(false, table, write,
1964							buffer, length, ppos);
1965}
1966
1967#ifdef CONFIG_NUMA
1968int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
1969			  void __user *buffer, size_t *length, loff_t *ppos)
1970{
1971	return hugetlb_sysctl_handler_common(true, table, write,
1972							buffer, length, ppos);
1973}
1974#endif /* CONFIG_NUMA */
1975
1976int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1977			void __user *buffer,
1978			size_t *length, loff_t *ppos)
1979{
1980	proc_dointvec(table, write, buffer, length, ppos);
1981	if (hugepages_treat_as_movable)
1982		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1983	else
1984		htlb_alloc_mask = GFP_HIGHUSER;
1985	return 0;
1986}
1987
1988int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1989			void __user *buffer,
1990			size_t *length, loff_t *ppos)
1991{
1992	struct hstate *h = &default_hstate;
1993	unsigned long tmp;
1994
1995	if (!write)
1996		tmp = h->nr_overcommit_huge_pages;
1997
1998	table->data = &tmp;
1999	table->maxlen = sizeof(unsigned long);
2000	proc_doulongvec_minmax(table, write, buffer, length, ppos);
2001
2002	if (write) {
2003		spin_lock(&hugetlb_lock);
2004		h->nr_overcommit_huge_pages = tmp;
2005		spin_unlock(&hugetlb_lock);
2006	}
2007
2008	return 0;
2009}
2010
2011#endif /* CONFIG_SYSCTL */
2012
2013void hugetlb_report_meminfo(struct seq_file *m)
2014{
2015	struct hstate *h = &default_hstate;
2016	seq_printf(m,
2017			"HugePages_Total:   %5lu\n"
2018			"HugePages_Free:    %5lu\n"
2019			"HugePages_Rsvd:    %5lu\n"
2020			"HugePages_Surp:    %5lu\n"
2021			"Hugepagesize:   %8lu kB\n",
2022			h->nr_huge_pages,
2023			h->free_huge_pages,
2024			h->resv_huge_pages,
2025			h->surplus_huge_pages,
2026			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2027}
2028
2029int hugetlb_report_node_meminfo(int nid, char *buf)
2030{
2031	struct hstate *h = &default_hstate;
2032	return sprintf(buf,
2033		"Node %d HugePages_Total: %5u\n"
2034		"Node %d HugePages_Free:  %5u\n"
2035		"Node %d HugePages_Surp:  %5u\n",
2036		nid, h->nr_huge_pages_node[nid],
2037		nid, h->free_huge_pages_node[nid],
2038		nid, h->surplus_huge_pages_node[nid]);
2039}
2040
2041/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2042unsigned long hugetlb_total_pages(void)
2043{
2044	struct hstate *h = &default_hstate;
2045	return h->nr_huge_pages * pages_per_huge_page(h);
2046}
2047
2048static int hugetlb_acct_memory(struct hstate *h, long delta)
2049{
2050	int ret = -ENOMEM;
2051
2052	spin_lock(&hugetlb_lock);
2053	/*
2054	 * When cpuset is configured, it breaks the strict hugetlb page
2055	 * reservation as the accounting is done on a global variable. Such
2056	 * reservation is completely rubbish in the presence of cpuset because
2057	 * the reservation is not checked against page availability for the
2058	 * current cpuset. Application can still potentially OOM'ed by kernel
2059	 * with lack of free htlb page in cpuset that the task is in.
2060	 * Attempt to enforce strict accounting with cpuset is almost
2061	 * impossible (or too ugly) because cpuset is too fluid that
2062	 * task or memory node can be dynamically moved between cpusets.
2063	 *
2064	 * The change of semantics for shared hugetlb mapping with cpuset is
2065	 * undesirable. However, in order to preserve some of the semantics,
2066	 * we fall back to check against current free page availability as
2067	 * a best attempt and hopefully to minimize the impact of changing
2068	 * semantics that cpuset has.
2069	 */
2070	if (delta > 0) {
2071		if (gather_surplus_pages(h, delta) < 0)
2072			goto out;
2073
2074		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2075			return_unused_surplus_pages(h, delta);
2076			goto out;
2077		}
2078	}
2079
2080	ret = 0;
2081	if (delta < 0)
2082		return_unused_surplus_pages(h, (unsigned long) -delta);
2083
2084out:
2085	spin_unlock(&hugetlb_lock);
2086	return ret;
2087}
2088
2089static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2090{
2091	struct resv_map *reservations = vma_resv_map(vma);
2092
2093	/*
2094	 * This new VMA should share its siblings reservation map if present.
2095	 * The VMA will only ever have a valid reservation map pointer where
2096	 * it is being copied for another still existing VMA.  As that VMA
2097	 * has a reference to the reservation map it cannot dissappear until
2098	 * after this open call completes.  It is therefore safe to take a
2099	 * new reference here without additional locking.
2100	 */
2101	if (reservations)
2102		kref_get(&reservations->refs);
2103}
2104
2105static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2106{
2107	struct hstate *h = hstate_vma(vma);
2108	struct resv_map *reservations = vma_resv_map(vma);
2109	unsigned long reserve;
2110	unsigned long start;
2111	unsigned long end;
2112
2113	if (reservations) {
2114		start = vma_hugecache_offset(h, vma, vma->vm_start);
2115		end = vma_hugecache_offset(h, vma, vma->vm_end);
2116
2117		reserve = (end - start) -
2118			region_count(&reservations->regions, start, end);
2119
2120		kref_put(&reservations->refs, resv_map_release);
2121
2122		if (reserve) {
2123			hugetlb_acct_memory(h, -reserve);
2124			hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
2125		}
2126	}
2127}
2128
2129/*
2130 * We cannot handle pagefaults against hugetlb pages at all.  They cause
2131 * handle_mm_fault() to try to instantiate regular-sized pages in the
2132 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2133 * this far.
2134 */
2135static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2136{
2137	BUG();
2138	return 0;
2139}
2140
2141const struct vm_operations_struct hugetlb_vm_ops = {
2142	.fault = hugetlb_vm_op_fault,
2143	.open = hugetlb_vm_op_open,
2144	.close = hugetlb_vm_op_close,
2145};
2146
2147static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2148				int writable)
2149{
2150	pte_t entry;
2151
2152	if (writable) {
2153		entry =
2154		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2155	} else {
2156		entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2157	}
2158	entry = pte_mkyoung(entry);
2159	entry = pte_mkhuge(entry);
2160
2161	return entry;
2162}
2163
2164static void set_huge_ptep_writable(struct vm_area_struct *vma,
2165				   unsigned long address, pte_t *ptep)
2166{
2167	pte_t entry;
2168
2169	entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2170	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
2171		update_mmu_cache(vma, address, ptep);
2172	}
2173}
2174
2175
2176int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2177			    struct vm_area_struct *vma)
2178{
2179	pte_t *src_pte, *dst_pte, entry;
2180	struct page *ptepage;
2181	unsigned long addr;
2182	int cow;
2183	struct hstate *h = hstate_vma(vma);
2184	unsigned long sz = huge_page_size(h);
2185
2186	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2187
2188	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2189		src_pte = huge_pte_offset(src, addr);
2190		if (!src_pte)
2191			continue;
2192		dst_pte = huge_pte_alloc(dst, addr, sz);
2193		if (!dst_pte)
2194			goto nomem;
2195
2196		/* If the pagetables are shared don't copy or take references */
2197		if (dst_pte == src_pte)
2198			continue;
2199
2200		spin_lock(&dst->page_table_lock);
2201		spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2202		if (!huge_pte_none(huge_ptep_get(src_pte))) {
2203			if (cow)
2204				huge_ptep_set_wrprotect(src, addr, src_pte);
2205			entry = huge_ptep_get(src_pte);
2206			ptepage = pte_page(entry);
2207			get_page(ptepage);
2208			page_dup_rmap(ptepage);
2209			set_huge_pte_at(dst, addr, dst_pte, entry);
2210		}
2211		spin_unlock(&src->page_table_lock);
2212		spin_unlock(&dst->page_table_lock);
2213	}
2214	return 0;
2215
2216nomem:
2217	return -ENOMEM;
2218}
2219
2220static int is_hugetlb_entry_migration(pte_t pte)
2221{
2222	swp_entry_t swp;
2223
2224	if (huge_pte_none(pte) || pte_present(pte))
2225		return 0;
2226	swp = pte_to_swp_entry(pte);
2227	if (non_swap_entry(swp) && is_migration_entry(swp)) {
2228		return 1;
2229	} else
2230		return 0;
2231}
2232
2233static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2234{
2235	swp_entry_t swp;
2236
2237	if (huge_pte_none(pte) || pte_present(pte))
2238		return 0;
2239	swp = pte_to_swp_entry(pte);
2240	if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
2241		return 1;
2242	} else
2243		return 0;
2244}
2245
2246void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2247			    unsigned long end, struct page *ref_page)
2248{
2249	struct mm_struct *mm = vma->vm_mm;
2250	unsigned long address;
2251	pte_t *ptep;
2252	pte_t pte;
2253	struct page *page;
2254	struct page *tmp;
2255	struct hstate *h = hstate_vma(vma);
2256	unsigned long sz = huge_page_size(h);
2257
2258	/*
2259	 * A page gathering list, protected by per file i_mmap_lock. The
2260	 * lock is used to avoid list corruption from multiple unmapping
2261	 * of the same page since we are using page->lru.
2262	 */
2263	LIST_HEAD(page_list);
2264
2265	WARN_ON(!is_vm_hugetlb_page(vma));
2266	BUG_ON(start & ~huge_page_mask(h));
2267	BUG_ON(end & ~huge_page_mask(h));
2268
2269	mmu_notifier_invalidate_range_start(mm, start, end);
2270	spin_lock(&mm->page_table_lock);
2271	for (address = start; address < end; address += sz) {
2272		ptep = huge_pte_offset(mm, address);
2273		if (!ptep)
2274			continue;
2275
2276		if (huge_pmd_unshare(mm, &address, ptep))
2277			continue;
2278
2279		/*
2280		 * If a reference page is supplied, it is because a specific
2281		 * page is being unmapped, not a range. Ensure the page we
2282		 * are about to unmap is the actual page of interest.
2283		 */
2284		if (ref_page) {
2285			pte = huge_ptep_get(ptep);
2286			if (huge_pte_none(pte))
2287				continue;
2288			page = pte_page(pte);
2289			if (page != ref_page)
2290				continue;
2291
2292			/*
2293			 * Mark the VMA as having unmapped its page so that
2294			 * future faults in this VMA will fail rather than
2295			 * looking like data was lost
2296			 */
2297			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2298		}
2299
2300		pte = huge_ptep_get_and_clear(mm, address, ptep);
2301		if (huge_pte_none(pte))
2302			continue;
2303
2304		/*
2305		 * HWPoisoned hugepage is already unmapped and dropped reference
2306		 */
2307		if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2308			continue;
2309
2310		page = pte_page(pte);
2311		if (pte_dirty(pte))
2312			set_page_dirty(page);
2313		list_add(&page->lru, &page_list);
2314	}
2315	spin_unlock(&mm->page_table_lock);
2316	flush_tlb_range(vma, start, end);
2317	mmu_notifier_invalidate_range_end(mm, start, end);
2318	list_for_each_entry_safe(page, tmp, &page_list, lru) {
2319		page_remove_rmap(page);
2320		list_del(&page->lru);
2321		put_page(page);
2322	}
2323}
2324
2325void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2326			  unsigned long end, struct page *ref_page)
2327{
2328	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2329	__unmap_hugepage_range(vma, start, end, ref_page);
2330	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2331}
2332
2333/*
2334 * This is called when the original mapper is failing to COW a MAP_PRIVATE
2335 * mappping it owns the reserve page for. The intention is to unmap the page
2336 * from other VMAs and let the children be SIGKILLed if they are faulting the
2337 * same region.
2338 */
2339static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2340				struct page *page, unsigned long address)
2341{
2342	struct hstate *h = hstate_vma(vma);
2343	struct vm_area_struct *iter_vma;
2344	struct address_space *mapping;
2345	struct prio_tree_iter iter;
2346	pgoff_t pgoff;
2347
2348	/*
2349	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2350	 * from page cache lookup which is in HPAGE_SIZE units.
2351	 */
2352	address = address & huge_page_mask(h);
2353	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
2354		+ (vma->vm_pgoff >> PAGE_SHIFT);
2355	mapping = (struct address_space *)page_private(page);
2356
2357	/*
2358	 * Take the mapping lock for the duration of the table walk. As
2359	 * this mapping should be shared between all the VMAs,
2360	 * __unmap_hugepage_range() is called as the lock is already held
2361	 */
2362	spin_lock(&mapping->i_mmap_lock);
2363	vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2364		/* Do not unmap the current VMA */
2365		if (iter_vma == vma)
2366			continue;
2367
2368		/*
2369		 * Unmap the page from other VMAs without their own reserves.
2370		 * They get marked to be SIGKILLed if they fault in these
2371		 * areas. This is because a future no-page fault on this VMA
2372		 * could insert a zeroed page instead of the data existing
2373		 * from the time of fork. This would look like data corruption
2374		 */
2375		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2376			__unmap_hugepage_range(iter_vma,
2377				address, address + huge_page_size(h),
2378				page);
2379	}
2380	spin_unlock(&mapping->i_mmap_lock);
2381
2382	return 1;
2383}
2384
2385/*
2386 * Hugetlb_cow() should be called with page lock of the original hugepage held.
2387 */
2388static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2389			unsigned long address, pte_t *ptep, pte_t pte,
2390			struct page *pagecache_page)
2391{
2392	struct hstate *h = hstate_vma(vma);
2393	struct page *old_page, *new_page;
2394	int avoidcopy;
2395	int outside_reserve = 0;
2396
2397	old_page = pte_page(pte);
2398
2399retry_avoidcopy:
2400	/* If no-one else is actually using this page, avoid the copy
2401	 * and just make the page writable */
2402	avoidcopy = (page_mapcount(old_page) == 1);
2403	if (avoidcopy) {
2404		if (PageAnon(old_page))
2405			page_move_anon_rmap(old_page, vma, address);
2406		set_huge_ptep_writable(vma, address, ptep);
2407		return 0;
2408	}
2409
2410	/*
2411	 * If the process that created a MAP_PRIVATE mapping is about to
2412	 * perform a COW due to a shared page count, attempt to satisfy
2413	 * the allocation without using the existing reserves. The pagecache
2414	 * page is used to determine if the reserve at this address was
2415	 * consumed or not. If reserves were used, a partial faulted mapping
2416	 * at the time of fork() could consume its reserves on COW instead
2417	 * of the full address range.
2418	 */
2419	if (!(vma->vm_flags & VM_MAYSHARE) &&
2420			is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2421			old_page != pagecache_page)
2422		outside_reserve = 1;
2423
2424	page_cache_get(old_page);
2425
2426	/* Drop page_table_lock as buddy allocator may be called */
2427	spin_unlock(&mm->page_table_lock);
2428	new_page = alloc_huge_page(vma, address, outside_reserve);
2429
2430	if (IS_ERR(new_page)) {
2431		page_cache_release(old_page);
2432
2433		/*
2434		 * If a process owning a MAP_PRIVATE mapping fails to COW,
2435		 * it is due to references held by a child and an insufficient
2436		 * huge page pool. To guarantee the original mappers
2437		 * reliability, unmap the page from child processes. The child
2438		 * may get SIGKILLed if it later faults.
2439		 */
2440		if (outside_reserve) {
2441			BUG_ON(huge_pte_none(pte));
2442			if (unmap_ref_private(mm, vma, old_page, address)) {
2443				BUG_ON(page_count(old_page) != 1);
2444				BUG_ON(huge_pte_none(pte));
2445				spin_lock(&mm->page_table_lock);
2446				goto retry_avoidcopy;
2447			}
2448			WARN_ON_ONCE(1);
2449		}
2450
2451		/* Caller expects lock to be held */
2452		spin_lock(&mm->page_table_lock);
2453		return -PTR_ERR(new_page);
2454	}
2455
2456	/*
2457	 * When the original hugepage is shared one, it does not have
2458	 * anon_vma prepared.
2459	 */
2460	if (unlikely(anon_vma_prepare(vma)))
2461		return VM_FAULT_OOM;
2462
2463	copy_user_huge_page(new_page, old_page, address, vma);
2464	__SetPageUptodate(new_page);
2465
2466	/*
2467	 * Retake the page_table_lock to check for racing updates
2468	 * before the page tables are altered
2469	 */
2470	spin_lock(&mm->page_table_lock);
2471	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2472	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2473		/* Break COW */
2474		mmu_notifier_invalidate_range_start(mm,
2475			address & huge_page_mask(h),
2476			(address & huge_page_mask(h)) + huge_page_size(h));
2477		huge_ptep_clear_flush(vma, address, ptep);
2478		set_huge_pte_at(mm, address, ptep,
2479				make_huge_pte(vma, new_page, 1));
2480		page_remove_rmap(old_page);
2481		hugepage_add_new_anon_rmap(new_page, vma, address);
2482		/* Make the old page be freed below */
2483		new_page = old_page;
2484		mmu_notifier_invalidate_range_end(mm,
2485			address & huge_page_mask(h),
2486			(address & huge_page_mask(h)) + huge_page_size(h));
2487	}
2488	page_cache_release(new_page);
2489	page_cache_release(old_page);
2490	return 0;
2491}
2492
2493/* Return the pagecache page at a given address within a VMA */
2494static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2495			struct vm_area_struct *vma, unsigned long address)
2496{
2497	struct address_space *mapping;
2498	pgoff_t idx;
2499
2500	mapping = vma->vm_file->f_mapping;
2501	idx = vma_hugecache_offset(h, vma, address);
2502
2503	return find_lock_page(mapping, idx);
2504}
2505
2506/*
2507 * Return whether there is a pagecache page to back given address within VMA.
2508 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2509 */
2510static bool hugetlbfs_pagecache_present(struct hstate *h,
2511			struct vm_area_struct *vma, unsigned long address)
2512{
2513	struct address_space *mapping;
2514	pgoff_t idx;
2515	struct page *page;
2516
2517	mapping = vma->vm_file->f_mapping;
2518	idx = vma_hugecache_offset(h, vma, address);
2519
2520	page = find_get_page(mapping, idx);
2521	if (page)
2522		put_page(page);
2523	return page != NULL;
2524}
2525
2526static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2527			unsigned long address, pte_t *ptep, unsigned int flags)
2528{
2529	struct hstate *h = hstate_vma(vma);
2530	int ret = VM_FAULT_SIGBUS;
2531	pgoff_t idx;
2532	unsigned long size;
2533	struct page *page;
2534	struct address_space *mapping;
2535	pte_t new_pte;
2536
2537	/*
2538	 * Currently, we are forced to kill the process in the event the
2539	 * original mapper has unmapped pages from the child due to a failed
2540	 * COW. Warn that such a situation has occured as it may not be obvious
2541	 */
2542	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2543		printk(KERN_WARNING
2544			"PID %d killed due to inadequate hugepage pool\n",
2545			current->pid);
2546		return ret;
2547	}
2548
2549	mapping = vma->vm_file->f_mapping;
2550	idx = vma_hugecache_offset(h, vma, address);
2551
2552	/*
2553	 * Use page lock to guard against racing truncation
2554	 * before we get page_table_lock.
2555	 */
2556retry:
2557	page = find_lock_page(mapping, idx);
2558	if (!page) {
2559		size = i_size_read(mapping->host) >> huge_page_shift(h);
2560		if (idx >= size)
2561			goto out;
2562		page = alloc_huge_page(vma, address, 0);
2563		if (IS_ERR(page)) {
2564			ret = -PTR_ERR(page);
2565			goto out;
2566		}
2567		clear_huge_page(page, address, huge_page_size(h));
2568		__SetPageUptodate(page);
2569
2570		if (vma->vm_flags & VM_MAYSHARE) {
2571			int err;
2572			struct inode *inode = mapping->host;
2573
2574			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2575			if (err) {
2576				put_page(page);
2577				if (err == -EEXIST)
2578					goto retry;
2579				goto out;
2580			}
2581
2582			spin_lock(&inode->i_lock);
2583			inode->i_blocks += blocks_per_huge_page(h);
2584			spin_unlock(&inode->i_lock);
2585			page_dup_rmap(page);
2586		} else {
2587			lock_page(page);
2588			if (unlikely(anon_vma_prepare(vma))) {
2589				ret = VM_FAULT_OOM;
2590				goto backout_unlocked;
2591			}
2592			hugepage_add_new_anon_rmap(page, vma, address);
2593		}
2594	} else {
2595		/*
2596		 * If memory error occurs between mmap() and fault, some process
2597		 * don't have hwpoisoned swap entry for errored virtual address.
2598		 * So we need to block hugepage fault by PG_hwpoison bit check.
2599		 */
2600		if (unlikely(PageHWPoison(page))) {
2601			ret = VM_FAULT_HWPOISON;
2602			goto backout_unlocked;
2603		}
2604		page_dup_rmap(page);
2605	}
2606
2607	/*
2608	 * If we are going to COW a private mapping later, we examine the
2609	 * pending reservations for this page now. This will ensure that
2610	 * any allocations necessary to record that reservation occur outside
2611	 * the spinlock.
2612	 */
2613	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2614		if (vma_needs_reservation(h, vma, address) < 0) {
2615			ret = VM_FAULT_OOM;
2616			goto backout_unlocked;
2617		}
2618
2619	spin_lock(&mm->page_table_lock);
2620	size = i_size_read(mapping->host) >> huge_page_shift(h);
2621	if (idx >= size)
2622		goto backout;
2623
2624	ret = 0;
2625	if (!huge_pte_none(huge_ptep_get(ptep)))
2626		goto backout;
2627
2628	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2629				&& (vma->vm_flags & VM_SHARED)));
2630	set_huge_pte_at(mm, address, ptep, new_pte);
2631
2632	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2633		/* Optimization, do the COW without a second fault */
2634		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2635	}
2636
2637	spin_unlock(&mm->page_table_lock);
2638	unlock_page(page);
2639out:
2640	return ret;
2641
2642backout:
2643	spin_unlock(&mm->page_table_lock);
2644backout_unlocked:
2645	unlock_page(page);
2646	put_page(page);
2647	goto out;
2648}
2649
2650int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2651			unsigned long address, unsigned int flags)
2652{
2653	pte_t *ptep;
2654	pte_t entry;
2655	int ret;
2656	struct page *page = NULL;
2657	struct page *pagecache_page = NULL;
2658	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2659	struct hstate *h = hstate_vma(vma);
2660
2661	ptep = huge_pte_offset(mm, address);
2662	if (ptep) {
2663		entry = huge_ptep_get(ptep);
2664		if (unlikely(is_hugetlb_entry_migration(entry))) {
2665			migration_entry_wait(mm, (pmd_t *)ptep, address);
2666			return 0;
2667		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2668			return VM_FAULT_HWPOISON;
2669	}
2670
2671	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2672	if (!ptep)
2673		return VM_FAULT_OOM;
2674
2675	/*
2676	 * Serialize hugepage allocation and instantiation, so that we don't
2677	 * get spurious allocation failures if two CPUs race to instantiate
2678	 * the same page in the page cache.
2679	 */
2680	mutex_lock(&hugetlb_instantiation_mutex);
2681	entry = huge_ptep_get(ptep);
2682	if (huge_pte_none(entry)) {
2683		ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2684		goto out_mutex;
2685	}
2686
2687	ret = 0;
2688
2689	/*
2690	 * If we are going to COW the mapping later, we examine the pending
2691	 * reservations for this page now. This will ensure that any
2692	 * allocations necessary to record that reservation occur outside the
2693	 * spinlock. For private mappings, we also lookup the pagecache
2694	 * page now as it is used to determine if a reservation has been
2695	 * consumed.
2696	 */
2697	if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2698		if (vma_needs_reservation(h, vma, address) < 0) {
2699			ret = VM_FAULT_OOM;
2700			goto out_mutex;
2701		}
2702
2703		if (!(vma->vm_flags & VM_MAYSHARE))
2704			pagecache_page = hugetlbfs_pagecache_page(h,
2705								vma, address);
2706	}
2707
2708	/*
2709	 * hugetlb_cow() requires page locks of pte_page(entry) and
2710	 * pagecache_page, so here we need take the former one
2711	 * when page != pagecache_page or !pagecache_page.
2712	 * Note that locking order is always pagecache_page -> page,
2713	 * so no worry about deadlock.
2714	 */
2715	page = pte_page(entry);
2716	if (page != pagecache_page)
2717		lock_page(page);
2718
2719	spin_lock(&mm->page_table_lock);
2720	/* Check for a racing update before calling hugetlb_cow */
2721	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2722		goto out_page_table_lock;
2723
2724
2725	if (flags & FAULT_FLAG_WRITE) {
2726		if (!pte_write(entry)) {
2727			ret = hugetlb_cow(mm, vma, address, ptep, entry,
2728							pagecache_page);
2729			goto out_page_table_lock;
2730		}
2731		entry = pte_mkdirty(entry);
2732	}
2733	entry = pte_mkyoung(entry);
2734	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2735						flags & FAULT_FLAG_WRITE))
2736		update_mmu_cache(vma, address, ptep);
2737
2738out_page_table_lock:
2739	spin_unlock(&mm->page_table_lock);
2740
2741	if (pagecache_page) {
2742		unlock_page(pagecache_page);
2743		put_page(pagecache_page);
2744	}
2745	unlock_page(page);
2746
2747out_mutex:
2748	mutex_unlock(&hugetlb_instantiation_mutex);
2749
2750	return ret;
2751}
2752
2753/* Can be overriden by architectures */
2754__attribute__((weak)) struct page *
2755follow_huge_pud(struct mm_struct *mm, unsigned long address,
2756	       pud_t *pud, int write)
2757{
2758	BUG();
2759	return NULL;
2760}
2761
2762int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2763			struct page **pages, struct vm_area_struct **vmas,
2764			unsigned long *position, int *length, int i,
2765			unsigned int flags)
2766{
2767	unsigned long pfn_offset;
2768	unsigned long vaddr = *position;
2769	int remainder = *length;
2770	struct hstate *h = hstate_vma(vma);
2771
2772	spin_lock(&mm->page_table_lock);
2773	while (vaddr < vma->vm_end && remainder) {
2774		pte_t *pte;
2775		int absent;
2776		struct page *page;
2777
2778		/*
2779		 * Some archs (sparc64, sh*) have multiple pte_ts to
2780		 * each hugepage.  We have to make sure we get the
2781		 * first, for the page indexing below to work.
2782		 */
2783		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2784		absent = !pte || huge_pte_none(huge_ptep_get(pte));
2785
2786		/*
2787		 * When coredumping, it suits get_dump_page if we just return
2788		 * an error where there's an empty slot with no huge pagecache
2789		 * to back it.  This way, we avoid allocating a hugepage, and
2790		 * the sparse dumpfile avoids allocating disk blocks, but its
2791		 * huge holes still show up with zeroes where they need to be.
2792		 */
2793		if (absent && (flags & FOLL_DUMP) &&
2794		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2795			remainder = 0;
2796			break;
2797		}
2798
2799		if (absent ||
2800		    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2801			int ret;
2802
2803			spin_unlock(&mm->page_table_lock);
2804			ret = hugetlb_fault(mm, vma, vaddr,
2805				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2806			spin_lock(&mm->page_table_lock);
2807			if (!(ret & VM_FAULT_ERROR))
2808				continue;
2809
2810			remainder = 0;
2811			break;
2812		}
2813
2814		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2815		page = pte_page(huge_ptep_get(pte));
2816same_page:
2817		if (pages) {
2818			pages[i] = mem_map_offset(page, pfn_offset);
2819			get_page(pages[i]);
2820		}
2821
2822		if (vmas)
2823			vmas[i] = vma;
2824
2825		vaddr += PAGE_SIZE;
2826		++pfn_offset;
2827		--remainder;
2828		++i;
2829		if (vaddr < vma->vm_end && remainder &&
2830				pfn_offset < pages_per_huge_page(h)) {
2831			/*
2832			 * We use pfn_offset to avoid touching the pageframes
2833			 * of this compound page.
2834			 */
2835			goto same_page;
2836		}
2837	}
2838	spin_unlock(&mm->page_table_lock);
2839	*length = remainder;
2840	*position = vaddr;
2841
2842	return i ? i : -EFAULT;
2843}
2844
2845void hugetlb_change_protection(struct vm_area_struct *vma,
2846		unsigned long address, unsigned long end, pgprot_t newprot)
2847{
2848	struct mm_struct *mm = vma->vm_mm;
2849	unsigned long start = address;
2850	pte_t *ptep;
2851	pte_t pte;
2852	struct hstate *h = hstate_vma(vma);
2853
2854	BUG_ON(address >= end);
2855	flush_cache_range(vma, address, end);
2856
2857	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2858	spin_lock(&mm->page_table_lock);
2859	for (; address < end; address += huge_page_size(h)) {
2860		ptep = huge_pte_offset(mm, address);
2861		if (!ptep)
2862			continue;
2863		if (huge_pmd_unshare(mm, &address, ptep))
2864			continue;
2865		if (!huge_pte_none(huge_ptep_get(ptep))) {
2866			pte = huge_ptep_get_and_clear(mm, address, ptep);
2867			pte = pte_mkhuge(pte_modify(pte, newprot));
2868			set_huge_pte_at(mm, address, ptep, pte);
2869		}
2870	}
2871	spin_unlock(&mm->page_table_lock);
2872	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2873
2874	flush_tlb_range(vma, start, end);
2875}
2876
2877int hugetlb_reserve_pages(struct inode *inode,
2878					long from, long to,
2879					struct vm_area_struct *vma,
2880					int acctflag)
2881{
2882	long ret, chg;
2883	struct hstate *h = hstate_inode(inode);
2884
2885	/*
2886	 * Only apply hugepage reservation if asked. At fault time, an
2887	 * attempt will be made for VM_NORESERVE to allocate a page
2888	 * and filesystem quota without using reserves
2889	 */
2890	if (acctflag & VM_NORESERVE)
2891		return 0;
2892
2893	/*
2894	 * Shared mappings base their reservation on the number of pages that
2895	 * are already allocated on behalf of the file. Private mappings need
2896	 * to reserve the full area even if read-only as mprotect() may be
2897	 * called to make the mapping read-write. Assume !vma is a shm mapping
2898	 */
2899	if (!vma || vma->vm_flags & VM_MAYSHARE)
2900		chg = region_chg(&inode->i_mapping->private_list, from, to);
2901	else {
2902		struct resv_map *resv_map = resv_map_alloc();
2903		if (!resv_map)
2904			return -ENOMEM;
2905
2906		chg = to - from;
2907
2908		set_vma_resv_map(vma, resv_map);
2909		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2910	}
2911
2912	if (chg < 0)
2913		return chg;
2914
2915	/* There must be enough filesystem quota for the mapping */
2916	if (hugetlb_get_quota(inode->i_mapping, chg))
2917		return -ENOSPC;
2918
2919	/*
2920	 * Check enough hugepages are available for the reservation.
2921	 * Hand back the quota if there are not
2922	 */
2923	ret = hugetlb_acct_memory(h, chg);
2924	if (ret < 0) {
2925		hugetlb_put_quota(inode->i_mapping, chg);
2926		return ret;
2927	}
2928
2929	/*
2930	 * Account for the reservations made. Shared mappings record regions
2931	 * that have reservations as they are shared by multiple VMAs.
2932	 * When the last VMA disappears, the region map says how much
2933	 * the reservation was and the page cache tells how much of
2934	 * the reservation was consumed. Private mappings are per-VMA and
2935	 * only the consumed reservations are tracked. When the VMA
2936	 * disappears, the original reservation is the VMA size and the
2937	 * consumed reservations are stored in the map. Hence, nothing
2938	 * else has to be done for private mappings here
2939	 */
2940	if (!vma || vma->vm_flags & VM_MAYSHARE)
2941		region_add(&inode->i_mapping->private_list, from, to);
2942	return 0;
2943}
2944
2945void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2946{
2947	struct hstate *h = hstate_inode(inode);
2948	long chg = region_truncate(&inode->i_mapping->private_list, offset);
2949
2950	spin_lock(&inode->i_lock);
2951	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
2952	spin_unlock(&inode->i_lock);
2953
2954	hugetlb_put_quota(inode->i_mapping, (chg - freed));
2955	hugetlb_acct_memory(h, -(chg - freed));
2956}
2957
2958/* Should be called in hugetlb_lock */
2959static int is_hugepage_on_freelist(struct page *hpage)
2960{
2961	struct page *page;
2962	struct page *tmp;
2963	struct hstate *h = page_hstate(hpage);
2964	int nid = page_to_nid(hpage);
2965
2966	list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
2967		if (page == hpage)
2968			return 1;
2969	return 0;
2970}
2971
2972#ifdef CONFIG_MEMORY_FAILURE
2973/*
2974 * This function is called from memory failure code.
2975 * Assume the caller holds page lock of the head page.
2976 */
2977int dequeue_hwpoisoned_huge_page(struct page *hpage)
2978{
2979	struct hstate *h = page_hstate(hpage);
2980	int nid = page_to_nid(hpage);
2981	int ret = -EBUSY;
2982
2983	spin_lock(&hugetlb_lock);
2984	if (is_hugepage_on_freelist(hpage)) {
2985		list_del(&hpage->lru);
2986		h->free_huge_pages--;
2987		h->free_huge_pages_node[nid]--;
2988		ret = 0;
2989	}
2990	spin_unlock(&hugetlb_lock);
2991	return ret;
2992}
2993#endif
2994