hugetlb.c revision b0320c7b7d1ac1bd5c2d9dff3258524ab39bad32
1/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/list.h>
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/mm.h>
9#include <linux/seq_file.h>
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/mmu_notifier.h>
13#include <linux/nodemask.h>
14#include <linux/pagemap.h>
15#include <linux/mempolicy.h>
16#include <linux/cpuset.h>
17#include <linux/mutex.h>
18#include <linux/bootmem.h>
19#include <linux/sysfs.h>
20#include <linux/slab.h>
21#include <linux/rmap.h>
22#include <linux/swap.h>
23#include <linux/swapops.h>
24
25#include <asm/page.h>
26#include <asm/pgtable.h>
27#include <asm/io.h>
28
29#include <linux/hugetlb.h>
30#include <linux/node.h>
31#include "internal.h"
32
33const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
34static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35unsigned long hugepages_treat_as_movable;
36
37static int max_hstate;
38unsigned int default_hstate_idx;
39struct hstate hstates[HUGE_MAX_HSTATE];
40
41__initdata LIST_HEAD(huge_boot_pages);
42
43/* for command line parsing */
44static struct hstate * __initdata parsed_hstate;
45static unsigned long __initdata default_hstate_max_huge_pages;
46static unsigned long __initdata default_hstate_size;
47
48#define for_each_hstate(h) \
49	for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
50
51/*
52 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
53 */
54static DEFINE_SPINLOCK(hugetlb_lock);
55
56/*
57 * Region tracking -- allows tracking of reservations and instantiated pages
58 *                    across the pages in a mapping.
59 *
60 * The region data structures are protected by a combination of the mmap_sem
61 * and the hugetlb_instantion_mutex.  To access or modify a region the caller
62 * must either hold the mmap_sem for write, or the mmap_sem for read and
63 * the hugetlb_instantiation mutex:
64 *
65 * 	down_write(&mm->mmap_sem);
66 * or
67 * 	down_read(&mm->mmap_sem);
68 * 	mutex_lock(&hugetlb_instantiation_mutex);
69 */
70struct file_region {
71	struct list_head link;
72	long from;
73	long to;
74};
75
76static long region_add(struct list_head *head, long f, long t)
77{
78	struct file_region *rg, *nrg, *trg;
79
80	/* Locate the region we are either in or before. */
81	list_for_each_entry(rg, head, link)
82		if (f <= rg->to)
83			break;
84
85	/* Round our left edge to the current segment if it encloses us. */
86	if (f > rg->from)
87		f = rg->from;
88
89	/* Check for and consume any regions we now overlap with. */
90	nrg = rg;
91	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
92		if (&rg->link == head)
93			break;
94		if (rg->from > t)
95			break;
96
97		/* If this area reaches higher then extend our area to
98		 * include it completely.  If this is not the first area
99		 * which we intend to reuse, free it. */
100		if (rg->to > t)
101			t = rg->to;
102		if (rg != nrg) {
103			list_del(&rg->link);
104			kfree(rg);
105		}
106	}
107	nrg->from = f;
108	nrg->to = t;
109	return 0;
110}
111
112static long region_chg(struct list_head *head, long f, long t)
113{
114	struct file_region *rg, *nrg;
115	long chg = 0;
116
117	/* Locate the region we are before or in. */
118	list_for_each_entry(rg, head, link)
119		if (f <= rg->to)
120			break;
121
122	/* If we are below the current region then a new region is required.
123	 * Subtle, allocate a new region at the position but make it zero
124	 * size such that we can guarantee to record the reservation. */
125	if (&rg->link == head || t < rg->from) {
126		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
127		if (!nrg)
128			return -ENOMEM;
129		nrg->from = f;
130		nrg->to   = f;
131		INIT_LIST_HEAD(&nrg->link);
132		list_add(&nrg->link, rg->link.prev);
133
134		return t - f;
135	}
136
137	/* Round our left edge to the current segment if it encloses us. */
138	if (f > rg->from)
139		f = rg->from;
140	chg = t - f;
141
142	/* Check for and consume any regions we now overlap with. */
143	list_for_each_entry(rg, rg->link.prev, link) {
144		if (&rg->link == head)
145			break;
146		if (rg->from > t)
147			return chg;
148
149		/* We overlap with this area, if it extends further than
150		 * us then we must extend ourselves.  Account for its
151		 * existing reservation. */
152		if (rg->to > t) {
153			chg += rg->to - t;
154			t = rg->to;
155		}
156		chg -= rg->to - rg->from;
157	}
158	return chg;
159}
160
161static long region_truncate(struct list_head *head, long end)
162{
163	struct file_region *rg, *trg;
164	long chg = 0;
165
166	/* Locate the region we are either in or before. */
167	list_for_each_entry(rg, head, link)
168		if (end <= rg->to)
169			break;
170	if (&rg->link == head)
171		return 0;
172
173	/* If we are in the middle of a region then adjust it. */
174	if (end > rg->from) {
175		chg = rg->to - end;
176		rg->to = end;
177		rg = list_entry(rg->link.next, typeof(*rg), link);
178	}
179
180	/* Drop any remaining regions. */
181	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
182		if (&rg->link == head)
183			break;
184		chg += rg->to - rg->from;
185		list_del(&rg->link);
186		kfree(rg);
187	}
188	return chg;
189}
190
191static long region_count(struct list_head *head, long f, long t)
192{
193	struct file_region *rg;
194	long chg = 0;
195
196	/* Locate each segment we overlap with, and count that overlap. */
197	list_for_each_entry(rg, head, link) {
198		int seg_from;
199		int seg_to;
200
201		if (rg->to <= f)
202			continue;
203		if (rg->from >= t)
204			break;
205
206		seg_from = max(rg->from, f);
207		seg_to = min(rg->to, t);
208
209		chg += seg_to - seg_from;
210	}
211
212	return chg;
213}
214
215/*
216 * Convert the address within this vma to the page offset within
217 * the mapping, in pagecache page units; huge pages here.
218 */
219static pgoff_t vma_hugecache_offset(struct hstate *h,
220			struct vm_area_struct *vma, unsigned long address)
221{
222	return ((address - vma->vm_start) >> huge_page_shift(h)) +
223			(vma->vm_pgoff >> huge_page_order(h));
224}
225
226pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
227				     unsigned long address)
228{
229	return vma_hugecache_offset(hstate_vma(vma), vma, address);
230}
231
232/*
233 * Return the size of the pages allocated when backing a VMA. In the majority
234 * cases this will be same size as used by the page table entries.
235 */
236unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
237{
238	struct hstate *hstate;
239
240	if (!is_vm_hugetlb_page(vma))
241		return PAGE_SIZE;
242
243	hstate = hstate_vma(vma);
244
245	return 1UL << (hstate->order + PAGE_SHIFT);
246}
247EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
248
249/*
250 * Return the page size being used by the MMU to back a VMA. In the majority
251 * of cases, the page size used by the kernel matches the MMU size. On
252 * architectures where it differs, an architecture-specific version of this
253 * function is required.
254 */
255#ifndef vma_mmu_pagesize
256unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
257{
258	return vma_kernel_pagesize(vma);
259}
260#endif
261
262/*
263 * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
264 * bits of the reservation map pointer, which are always clear due to
265 * alignment.
266 */
267#define HPAGE_RESV_OWNER    (1UL << 0)
268#define HPAGE_RESV_UNMAPPED (1UL << 1)
269#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
270
271/*
272 * These helpers are used to track how many pages are reserved for
273 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
274 * is guaranteed to have their future faults succeed.
275 *
276 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
277 * the reserve counters are updated with the hugetlb_lock held. It is safe
278 * to reset the VMA at fork() time as it is not in use yet and there is no
279 * chance of the global counters getting corrupted as a result of the values.
280 *
281 * The private mapping reservation is represented in a subtly different
282 * manner to a shared mapping.  A shared mapping has a region map associated
283 * with the underlying file, this region map represents the backing file
284 * pages which have ever had a reservation assigned which this persists even
285 * after the page is instantiated.  A private mapping has a region map
286 * associated with the original mmap which is attached to all VMAs which
287 * reference it, this region map represents those offsets which have consumed
288 * reservation ie. where pages have been instantiated.
289 */
290static unsigned long get_vma_private_data(struct vm_area_struct *vma)
291{
292	return (unsigned long)vma->vm_private_data;
293}
294
295static void set_vma_private_data(struct vm_area_struct *vma,
296							unsigned long value)
297{
298	vma->vm_private_data = (void *)value;
299}
300
301struct resv_map {
302	struct kref refs;
303	struct list_head regions;
304};
305
306static struct resv_map *resv_map_alloc(void)
307{
308	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
309	if (!resv_map)
310		return NULL;
311
312	kref_init(&resv_map->refs);
313	INIT_LIST_HEAD(&resv_map->regions);
314
315	return resv_map;
316}
317
318static void resv_map_release(struct kref *ref)
319{
320	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
321
322	/* Clear out any active regions before we release the map. */
323	region_truncate(&resv_map->regions, 0);
324	kfree(resv_map);
325}
326
327static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
328{
329	VM_BUG_ON(!is_vm_hugetlb_page(vma));
330	if (!(vma->vm_flags & VM_MAYSHARE))
331		return (struct resv_map *)(get_vma_private_data(vma) &
332							~HPAGE_RESV_MASK);
333	return NULL;
334}
335
336static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
337{
338	VM_BUG_ON(!is_vm_hugetlb_page(vma));
339	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
340
341	set_vma_private_data(vma, (get_vma_private_data(vma) &
342				HPAGE_RESV_MASK) | (unsigned long)map);
343}
344
345static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
346{
347	VM_BUG_ON(!is_vm_hugetlb_page(vma));
348	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
349
350	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
351}
352
353static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
354{
355	VM_BUG_ON(!is_vm_hugetlb_page(vma));
356
357	return (get_vma_private_data(vma) & flag) != 0;
358}
359
360/* Decrement the reserved pages in the hugepage pool by one */
361static void decrement_hugepage_resv_vma(struct hstate *h,
362			struct vm_area_struct *vma)
363{
364	if (vma->vm_flags & VM_NORESERVE)
365		return;
366
367	if (vma->vm_flags & VM_MAYSHARE) {
368		/* Shared mappings always use reserves */
369		h->resv_huge_pages--;
370	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
371		/*
372		 * Only the process that called mmap() has reserves for
373		 * private mappings.
374		 */
375		h->resv_huge_pages--;
376	}
377}
378
379/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
380void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
381{
382	VM_BUG_ON(!is_vm_hugetlb_page(vma));
383	if (!(vma->vm_flags & VM_MAYSHARE))
384		vma->vm_private_data = (void *)0;
385}
386
387/* Returns true if the VMA has associated reserve pages */
388static int vma_has_reserves(struct vm_area_struct *vma)
389{
390	if (vma->vm_flags & VM_MAYSHARE)
391		return 1;
392	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
393		return 1;
394	return 0;
395}
396
397static void copy_gigantic_page(struct page *dst, struct page *src)
398{
399	int i;
400	struct hstate *h = page_hstate(src);
401	struct page *dst_base = dst;
402	struct page *src_base = src;
403
404	for (i = 0; i < pages_per_huge_page(h); ) {
405		cond_resched();
406		copy_highpage(dst, src);
407
408		i++;
409		dst = mem_map_next(dst, dst_base, i);
410		src = mem_map_next(src, src_base, i);
411	}
412}
413
414void copy_huge_page(struct page *dst, struct page *src)
415{
416	int i;
417	struct hstate *h = page_hstate(src);
418
419	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
420		copy_gigantic_page(dst, src);
421		return;
422	}
423
424	might_sleep();
425	for (i = 0; i < pages_per_huge_page(h); i++) {
426		cond_resched();
427		copy_highpage(dst + i, src + i);
428	}
429}
430
431static void enqueue_huge_page(struct hstate *h, struct page *page)
432{
433	int nid = page_to_nid(page);
434	list_add(&page->lru, &h->hugepage_freelists[nid]);
435	h->free_huge_pages++;
436	h->free_huge_pages_node[nid]++;
437}
438
439static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
440{
441	struct page *page;
442
443	if (list_empty(&h->hugepage_freelists[nid]))
444		return NULL;
445	page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
446	list_del(&page->lru);
447	set_page_refcounted(page);
448	h->free_huge_pages--;
449	h->free_huge_pages_node[nid]--;
450	return page;
451}
452
453static struct page *dequeue_huge_page_vma(struct hstate *h,
454				struct vm_area_struct *vma,
455				unsigned long address, int avoid_reserve)
456{
457	struct page *page = NULL;
458	struct mempolicy *mpol;
459	nodemask_t *nodemask;
460	struct zonelist *zonelist;
461	struct zone *zone;
462	struct zoneref *z;
463
464	get_mems_allowed();
465	zonelist = huge_zonelist(vma, address,
466					htlb_alloc_mask, &mpol, &nodemask);
467	/*
468	 * A child process with MAP_PRIVATE mappings created by their parent
469	 * have no page reserves. This check ensures that reservations are
470	 * not "stolen". The child may still get SIGKILLed
471	 */
472	if (!vma_has_reserves(vma) &&
473			h->free_huge_pages - h->resv_huge_pages == 0)
474		goto err;
475
476	/* If reserves cannot be used, ensure enough pages are in the pool */
477	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
478		goto err;
479
480	for_each_zone_zonelist_nodemask(zone, z, zonelist,
481						MAX_NR_ZONES - 1, nodemask) {
482		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
483			page = dequeue_huge_page_node(h, zone_to_nid(zone));
484			if (page) {
485				if (!avoid_reserve)
486					decrement_hugepage_resv_vma(h, vma);
487				break;
488			}
489		}
490	}
491err:
492	mpol_cond_put(mpol);
493	put_mems_allowed();
494	return page;
495}
496
497static void update_and_free_page(struct hstate *h, struct page *page)
498{
499	int i;
500
501	VM_BUG_ON(h->order >= MAX_ORDER);
502
503	h->nr_huge_pages--;
504	h->nr_huge_pages_node[page_to_nid(page)]--;
505	for (i = 0; i < pages_per_huge_page(h); i++) {
506		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
507				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
508				1 << PG_private | 1<< PG_writeback);
509	}
510	set_compound_page_dtor(page, NULL);
511	set_page_refcounted(page);
512	arch_release_hugepage(page);
513	__free_pages(page, huge_page_order(h));
514}
515
516struct hstate *size_to_hstate(unsigned long size)
517{
518	struct hstate *h;
519
520	for_each_hstate(h) {
521		if (huge_page_size(h) == size)
522			return h;
523	}
524	return NULL;
525}
526
527static void free_huge_page(struct page *page)
528{
529	/*
530	 * Can't pass hstate in here because it is called from the
531	 * compound page destructor.
532	 */
533	struct hstate *h = page_hstate(page);
534	int nid = page_to_nid(page);
535	struct address_space *mapping;
536
537	mapping = (struct address_space *) page_private(page);
538	set_page_private(page, 0);
539	page->mapping = NULL;
540	BUG_ON(page_count(page));
541	BUG_ON(page_mapcount(page));
542	INIT_LIST_HEAD(&page->lru);
543
544	spin_lock(&hugetlb_lock);
545	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
546		update_and_free_page(h, page);
547		h->surplus_huge_pages--;
548		h->surplus_huge_pages_node[nid]--;
549	} else {
550		enqueue_huge_page(h, page);
551	}
552	spin_unlock(&hugetlb_lock);
553	if (mapping)
554		hugetlb_put_quota(mapping, 1);
555}
556
557static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
558{
559	set_compound_page_dtor(page, free_huge_page);
560	spin_lock(&hugetlb_lock);
561	h->nr_huge_pages++;
562	h->nr_huge_pages_node[nid]++;
563	spin_unlock(&hugetlb_lock);
564	put_page(page); /* free it into the hugepage allocator */
565}
566
567static void prep_compound_gigantic_page(struct page *page, unsigned long order)
568{
569	int i;
570	int nr_pages = 1 << order;
571	struct page *p = page + 1;
572
573	/* we rely on prep_new_huge_page to set the destructor */
574	set_compound_order(page, order);
575	__SetPageHead(page);
576	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
577		__SetPageTail(p);
578		p->first_page = page;
579	}
580}
581
582int PageHuge(struct page *page)
583{
584	compound_page_dtor *dtor;
585
586	if (!PageCompound(page))
587		return 0;
588
589	page = compound_head(page);
590	dtor = get_compound_page_dtor(page);
591
592	return dtor == free_huge_page;
593}
594
595EXPORT_SYMBOL_GPL(PageHuge);
596
597static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
598{
599	struct page *page;
600
601	if (h->order >= MAX_ORDER)
602		return NULL;
603
604	page = alloc_pages_exact_node(nid,
605		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
606						__GFP_REPEAT|__GFP_NOWARN,
607		huge_page_order(h));
608	if (page) {
609		if (arch_prepare_hugepage(page)) {
610			__free_pages(page, huge_page_order(h));
611			return NULL;
612		}
613		prep_new_huge_page(h, page, nid);
614	}
615
616	return page;
617}
618
619/*
620 * common helper functions for hstate_next_node_to_{alloc|free}.
621 * We may have allocated or freed a huge page based on a different
622 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
623 * be outside of *nodes_allowed.  Ensure that we use an allowed
624 * node for alloc or free.
625 */
626static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
627{
628	nid = next_node(nid, *nodes_allowed);
629	if (nid == MAX_NUMNODES)
630		nid = first_node(*nodes_allowed);
631	VM_BUG_ON(nid >= MAX_NUMNODES);
632
633	return nid;
634}
635
636static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
637{
638	if (!node_isset(nid, *nodes_allowed))
639		nid = next_node_allowed(nid, nodes_allowed);
640	return nid;
641}
642
643/*
644 * returns the previously saved node ["this node"] from which to
645 * allocate a persistent huge page for the pool and advance the
646 * next node from which to allocate, handling wrap at end of node
647 * mask.
648 */
649static int hstate_next_node_to_alloc(struct hstate *h,
650					nodemask_t *nodes_allowed)
651{
652	int nid;
653
654	VM_BUG_ON(!nodes_allowed);
655
656	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
657	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
658
659	return nid;
660}
661
662static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
663{
664	struct page *page;
665	int start_nid;
666	int next_nid;
667	int ret = 0;
668
669	start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
670	next_nid = start_nid;
671
672	do {
673		page = alloc_fresh_huge_page_node(h, next_nid);
674		if (page) {
675			ret = 1;
676			break;
677		}
678		next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
679	} while (next_nid != start_nid);
680
681	if (ret)
682		count_vm_event(HTLB_BUDDY_PGALLOC);
683	else
684		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
685
686	return ret;
687}
688
689/*
690 * helper for free_pool_huge_page() - return the previously saved
691 * node ["this node"] from which to free a huge page.  Advance the
692 * next node id whether or not we find a free huge page to free so
693 * that the next attempt to free addresses the next node.
694 */
695static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
696{
697	int nid;
698
699	VM_BUG_ON(!nodes_allowed);
700
701	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
702	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
703
704	return nid;
705}
706
707/*
708 * Free huge page from pool from next node to free.
709 * Attempt to keep persistent huge pages more or less
710 * balanced over allowed nodes.
711 * Called with hugetlb_lock locked.
712 */
713static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
714							 bool acct_surplus)
715{
716	int start_nid;
717	int next_nid;
718	int ret = 0;
719
720	start_nid = hstate_next_node_to_free(h, nodes_allowed);
721	next_nid = start_nid;
722
723	do {
724		/*
725		 * If we're returning unused surplus pages, only examine
726		 * nodes with surplus pages.
727		 */
728		if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
729		    !list_empty(&h->hugepage_freelists[next_nid])) {
730			struct page *page =
731				list_entry(h->hugepage_freelists[next_nid].next,
732					  struct page, lru);
733			list_del(&page->lru);
734			h->free_huge_pages--;
735			h->free_huge_pages_node[next_nid]--;
736			if (acct_surplus) {
737				h->surplus_huge_pages--;
738				h->surplus_huge_pages_node[next_nid]--;
739			}
740			update_and_free_page(h, page);
741			ret = 1;
742			break;
743		}
744		next_nid = hstate_next_node_to_free(h, nodes_allowed);
745	} while (next_nid != start_nid);
746
747	return ret;
748}
749
750static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
751{
752	struct page *page;
753	unsigned int r_nid;
754
755	if (h->order >= MAX_ORDER)
756		return NULL;
757
758	/*
759	 * Assume we will successfully allocate the surplus page to
760	 * prevent racing processes from causing the surplus to exceed
761	 * overcommit
762	 *
763	 * This however introduces a different race, where a process B
764	 * tries to grow the static hugepage pool while alloc_pages() is
765	 * called by process A. B will only examine the per-node
766	 * counters in determining if surplus huge pages can be
767	 * converted to normal huge pages in adjust_pool_surplus(). A
768	 * won't be able to increment the per-node counter, until the
769	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
770	 * no more huge pages can be converted from surplus to normal
771	 * state (and doesn't try to convert again). Thus, we have a
772	 * case where a surplus huge page exists, the pool is grown, and
773	 * the surplus huge page still exists after, even though it
774	 * should just have been converted to a normal huge page. This
775	 * does not leak memory, though, as the hugepage will be freed
776	 * once it is out of use. It also does not allow the counters to
777	 * go out of whack in adjust_pool_surplus() as we don't modify
778	 * the node values until we've gotten the hugepage and only the
779	 * per-node value is checked there.
780	 */
781	spin_lock(&hugetlb_lock);
782	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
783		spin_unlock(&hugetlb_lock);
784		return NULL;
785	} else {
786		h->nr_huge_pages++;
787		h->surplus_huge_pages++;
788	}
789	spin_unlock(&hugetlb_lock);
790
791	if (nid == NUMA_NO_NODE)
792		page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
793				   __GFP_REPEAT|__GFP_NOWARN,
794				   huge_page_order(h));
795	else
796		page = alloc_pages_exact_node(nid,
797			htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
798			__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
799
800	if (page && arch_prepare_hugepage(page)) {
801		__free_pages(page, huge_page_order(h));
802		return NULL;
803	}
804
805	spin_lock(&hugetlb_lock);
806	if (page) {
807		r_nid = page_to_nid(page);
808		set_compound_page_dtor(page, free_huge_page);
809		/*
810		 * We incremented the global counters already
811		 */
812		h->nr_huge_pages_node[r_nid]++;
813		h->surplus_huge_pages_node[r_nid]++;
814		__count_vm_event(HTLB_BUDDY_PGALLOC);
815	} else {
816		h->nr_huge_pages--;
817		h->surplus_huge_pages--;
818		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
819	}
820	spin_unlock(&hugetlb_lock);
821
822	return page;
823}
824
825/*
826 * This allocation function is useful in the context where vma is irrelevant.
827 * E.g. soft-offlining uses this function because it only cares physical
828 * address of error page.
829 */
830struct page *alloc_huge_page_node(struct hstate *h, int nid)
831{
832	struct page *page;
833
834	spin_lock(&hugetlb_lock);
835	page = dequeue_huge_page_node(h, nid);
836	spin_unlock(&hugetlb_lock);
837
838	if (!page)
839		page = alloc_buddy_huge_page(h, nid);
840
841	return page;
842}
843
844/*
845 * Increase the hugetlb pool such that it can accommodate a reservation
846 * of size 'delta'.
847 */
848static int gather_surplus_pages(struct hstate *h, int delta)
849{
850	struct list_head surplus_list;
851	struct page *page, *tmp;
852	int ret, i;
853	int needed, allocated;
854
855	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
856	if (needed <= 0) {
857		h->resv_huge_pages += delta;
858		return 0;
859	}
860
861	allocated = 0;
862	INIT_LIST_HEAD(&surplus_list);
863
864	ret = -ENOMEM;
865retry:
866	spin_unlock(&hugetlb_lock);
867	for (i = 0; i < needed; i++) {
868		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
869		if (!page)
870			/*
871			 * We were not able to allocate enough pages to
872			 * satisfy the entire reservation so we free what
873			 * we've allocated so far.
874			 */
875			goto free;
876
877		list_add(&page->lru, &surplus_list);
878	}
879	allocated += needed;
880
881	/*
882	 * After retaking hugetlb_lock, we need to recalculate 'needed'
883	 * because either resv_huge_pages or free_huge_pages may have changed.
884	 */
885	spin_lock(&hugetlb_lock);
886	needed = (h->resv_huge_pages + delta) -
887			(h->free_huge_pages + allocated);
888	if (needed > 0)
889		goto retry;
890
891	/*
892	 * The surplus_list now contains _at_least_ the number of extra pages
893	 * needed to accommodate the reservation.  Add the appropriate number
894	 * of pages to the hugetlb pool and free the extras back to the buddy
895	 * allocator.  Commit the entire reservation here to prevent another
896	 * process from stealing the pages as they are added to the pool but
897	 * before they are reserved.
898	 */
899	needed += allocated;
900	h->resv_huge_pages += delta;
901	ret = 0;
902
903	spin_unlock(&hugetlb_lock);
904	/* Free the needed pages to the hugetlb pool */
905	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
906		if ((--needed) < 0)
907			break;
908		list_del(&page->lru);
909		/*
910		 * This page is now managed by the hugetlb allocator and has
911		 * no users -- drop the buddy allocator's reference.
912		 */
913		put_page_testzero(page);
914		VM_BUG_ON(page_count(page));
915		enqueue_huge_page(h, page);
916	}
917
918	/* Free unnecessary surplus pages to the buddy allocator */
919free:
920	if (!list_empty(&surplus_list)) {
921		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
922			list_del(&page->lru);
923			put_page(page);
924		}
925	}
926	spin_lock(&hugetlb_lock);
927
928	return ret;
929}
930
931/*
932 * When releasing a hugetlb pool reservation, any surplus pages that were
933 * allocated to satisfy the reservation must be explicitly freed if they were
934 * never used.
935 * Called with hugetlb_lock held.
936 */
937static void return_unused_surplus_pages(struct hstate *h,
938					unsigned long unused_resv_pages)
939{
940	unsigned long nr_pages;
941
942	/* Uncommit the reservation */
943	h->resv_huge_pages -= unused_resv_pages;
944
945	/* Cannot return gigantic pages currently */
946	if (h->order >= MAX_ORDER)
947		return;
948
949	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
950
951	/*
952	 * We want to release as many surplus pages as possible, spread
953	 * evenly across all nodes with memory. Iterate across these nodes
954	 * until we can no longer free unreserved surplus pages. This occurs
955	 * when the nodes with surplus pages have no free pages.
956	 * free_pool_huge_page() will balance the the freed pages across the
957	 * on-line nodes with memory and will handle the hstate accounting.
958	 */
959	while (nr_pages--) {
960		if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
961			break;
962	}
963}
964
965/*
966 * Determine if the huge page at addr within the vma has an associated
967 * reservation.  Where it does not we will need to logically increase
968 * reservation and actually increase quota before an allocation can occur.
969 * Where any new reservation would be required the reservation change is
970 * prepared, but not committed.  Once the page has been quota'd allocated
971 * an instantiated the change should be committed via vma_commit_reservation.
972 * No action is required on failure.
973 */
974static long vma_needs_reservation(struct hstate *h,
975			struct vm_area_struct *vma, unsigned long addr)
976{
977	struct address_space *mapping = vma->vm_file->f_mapping;
978	struct inode *inode = mapping->host;
979
980	if (vma->vm_flags & VM_MAYSHARE) {
981		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
982		return region_chg(&inode->i_mapping->private_list,
983							idx, idx + 1);
984
985	} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
986		return 1;
987
988	} else  {
989		long err;
990		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
991		struct resv_map *reservations = vma_resv_map(vma);
992
993		err = region_chg(&reservations->regions, idx, idx + 1);
994		if (err < 0)
995			return err;
996		return 0;
997	}
998}
999static void vma_commit_reservation(struct hstate *h,
1000			struct vm_area_struct *vma, unsigned long addr)
1001{
1002	struct address_space *mapping = vma->vm_file->f_mapping;
1003	struct inode *inode = mapping->host;
1004
1005	if (vma->vm_flags & VM_MAYSHARE) {
1006		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1007		region_add(&inode->i_mapping->private_list, idx, idx + 1);
1008
1009	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1010		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1011		struct resv_map *reservations = vma_resv_map(vma);
1012
1013		/* Mark this page used in the map. */
1014		region_add(&reservations->regions, idx, idx + 1);
1015	}
1016}
1017
1018static struct page *alloc_huge_page(struct vm_area_struct *vma,
1019				    unsigned long addr, int avoid_reserve)
1020{
1021	struct hstate *h = hstate_vma(vma);
1022	struct page *page;
1023	struct address_space *mapping = vma->vm_file->f_mapping;
1024	struct inode *inode = mapping->host;
1025	long chg;
1026
1027	/*
1028	 * Processes that did not create the mapping will have no reserves and
1029	 * will not have accounted against quota. Check that the quota can be
1030	 * made before satisfying the allocation
1031	 * MAP_NORESERVE mappings may also need pages and quota allocated
1032	 * if no reserve mapping overlaps.
1033	 */
1034	chg = vma_needs_reservation(h, vma, addr);
1035	if (chg < 0)
1036		return ERR_PTR(-VM_FAULT_OOM);
1037	if (chg)
1038		if (hugetlb_get_quota(inode->i_mapping, chg))
1039			return ERR_PTR(-VM_FAULT_SIGBUS);
1040
1041	spin_lock(&hugetlb_lock);
1042	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1043	spin_unlock(&hugetlb_lock);
1044
1045	if (!page) {
1046		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1047		if (!page) {
1048			hugetlb_put_quota(inode->i_mapping, chg);
1049			return ERR_PTR(-VM_FAULT_SIGBUS);
1050		}
1051	}
1052
1053	set_page_private(page, (unsigned long) mapping);
1054
1055	vma_commit_reservation(h, vma, addr);
1056
1057	return page;
1058}
1059
1060int __weak alloc_bootmem_huge_page(struct hstate *h)
1061{
1062	struct huge_bootmem_page *m;
1063	int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1064
1065	while (nr_nodes) {
1066		void *addr;
1067
1068		addr = __alloc_bootmem_node_nopanic(
1069				NODE_DATA(hstate_next_node_to_alloc(h,
1070						&node_states[N_HIGH_MEMORY])),
1071				huge_page_size(h), huge_page_size(h), 0);
1072
1073		if (addr) {
1074			/*
1075			 * Use the beginning of the huge page to store the
1076			 * huge_bootmem_page struct (until gather_bootmem
1077			 * puts them into the mem_map).
1078			 */
1079			m = addr;
1080			goto found;
1081		}
1082		nr_nodes--;
1083	}
1084	return 0;
1085
1086found:
1087	BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1088	/* Put them into a private list first because mem_map is not up yet */
1089	list_add(&m->list, &huge_boot_pages);
1090	m->hstate = h;
1091	return 1;
1092}
1093
1094static void prep_compound_huge_page(struct page *page, int order)
1095{
1096	if (unlikely(order > (MAX_ORDER - 1)))
1097		prep_compound_gigantic_page(page, order);
1098	else
1099		prep_compound_page(page, order);
1100}
1101
1102/* Put bootmem huge pages into the standard lists after mem_map is up */
1103static void __init gather_bootmem_prealloc(void)
1104{
1105	struct huge_bootmem_page *m;
1106
1107	list_for_each_entry(m, &huge_boot_pages, list) {
1108		struct page *page = virt_to_page(m);
1109		struct hstate *h = m->hstate;
1110		__ClearPageReserved(page);
1111		WARN_ON(page_count(page) != 1);
1112		prep_compound_huge_page(page, h->order);
1113		prep_new_huge_page(h, page, page_to_nid(page));
1114		/*
1115		 * If we had gigantic hugepages allocated at boot time, we need
1116		 * to restore the 'stolen' pages to totalram_pages in order to
1117		 * fix confusing memory reports from free(1) and another
1118		 * side-effects, like CommitLimit going negative.
1119		 */
1120		if (h->order > (MAX_ORDER - 1))
1121			totalram_pages += 1 << h->order;
1122	}
1123}
1124
1125static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1126{
1127	unsigned long i;
1128
1129	for (i = 0; i < h->max_huge_pages; ++i) {
1130		if (h->order >= MAX_ORDER) {
1131			if (!alloc_bootmem_huge_page(h))
1132				break;
1133		} else if (!alloc_fresh_huge_page(h,
1134					 &node_states[N_HIGH_MEMORY]))
1135			break;
1136	}
1137	h->max_huge_pages = i;
1138}
1139
1140static void __init hugetlb_init_hstates(void)
1141{
1142	struct hstate *h;
1143
1144	for_each_hstate(h) {
1145		/* oversize hugepages were init'ed in early boot */
1146		if (h->order < MAX_ORDER)
1147			hugetlb_hstate_alloc_pages(h);
1148	}
1149}
1150
1151static char * __init memfmt(char *buf, unsigned long n)
1152{
1153	if (n >= (1UL << 30))
1154		sprintf(buf, "%lu GB", n >> 30);
1155	else if (n >= (1UL << 20))
1156		sprintf(buf, "%lu MB", n >> 20);
1157	else
1158		sprintf(buf, "%lu KB", n >> 10);
1159	return buf;
1160}
1161
1162static void __init report_hugepages(void)
1163{
1164	struct hstate *h;
1165
1166	for_each_hstate(h) {
1167		char buf[32];
1168		printk(KERN_INFO "HugeTLB registered %s page size, "
1169				 "pre-allocated %ld pages\n",
1170			memfmt(buf, huge_page_size(h)),
1171			h->free_huge_pages);
1172	}
1173}
1174
1175#ifdef CONFIG_HIGHMEM
1176static void try_to_free_low(struct hstate *h, unsigned long count,
1177						nodemask_t *nodes_allowed)
1178{
1179	int i;
1180
1181	if (h->order >= MAX_ORDER)
1182		return;
1183
1184	for_each_node_mask(i, *nodes_allowed) {
1185		struct page *page, *next;
1186		struct list_head *freel = &h->hugepage_freelists[i];
1187		list_for_each_entry_safe(page, next, freel, lru) {
1188			if (count >= h->nr_huge_pages)
1189				return;
1190			if (PageHighMem(page))
1191				continue;
1192			list_del(&page->lru);
1193			update_and_free_page(h, page);
1194			h->free_huge_pages--;
1195			h->free_huge_pages_node[page_to_nid(page)]--;
1196		}
1197	}
1198}
1199#else
1200static inline void try_to_free_low(struct hstate *h, unsigned long count,
1201						nodemask_t *nodes_allowed)
1202{
1203}
1204#endif
1205
1206/*
1207 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1208 * balanced by operating on them in a round-robin fashion.
1209 * Returns 1 if an adjustment was made.
1210 */
1211static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1212				int delta)
1213{
1214	int start_nid, next_nid;
1215	int ret = 0;
1216
1217	VM_BUG_ON(delta != -1 && delta != 1);
1218
1219	if (delta < 0)
1220		start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1221	else
1222		start_nid = hstate_next_node_to_free(h, nodes_allowed);
1223	next_nid = start_nid;
1224
1225	do {
1226		int nid = next_nid;
1227		if (delta < 0)  {
1228			/*
1229			 * To shrink on this node, there must be a surplus page
1230			 */
1231			if (!h->surplus_huge_pages_node[nid]) {
1232				next_nid = hstate_next_node_to_alloc(h,
1233								nodes_allowed);
1234				continue;
1235			}
1236		}
1237		if (delta > 0) {
1238			/*
1239			 * Surplus cannot exceed the total number of pages
1240			 */
1241			if (h->surplus_huge_pages_node[nid] >=
1242						h->nr_huge_pages_node[nid]) {
1243				next_nid = hstate_next_node_to_free(h,
1244								nodes_allowed);
1245				continue;
1246			}
1247		}
1248
1249		h->surplus_huge_pages += delta;
1250		h->surplus_huge_pages_node[nid] += delta;
1251		ret = 1;
1252		break;
1253	} while (next_nid != start_nid);
1254
1255	return ret;
1256}
1257
1258#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1259static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1260						nodemask_t *nodes_allowed)
1261{
1262	unsigned long min_count, ret;
1263
1264	if (h->order >= MAX_ORDER)
1265		return h->max_huge_pages;
1266
1267	/*
1268	 * Increase the pool size
1269	 * First take pages out of surplus state.  Then make up the
1270	 * remaining difference by allocating fresh huge pages.
1271	 *
1272	 * We might race with alloc_buddy_huge_page() here and be unable
1273	 * to convert a surplus huge page to a normal huge page. That is
1274	 * not critical, though, it just means the overall size of the
1275	 * pool might be one hugepage larger than it needs to be, but
1276	 * within all the constraints specified by the sysctls.
1277	 */
1278	spin_lock(&hugetlb_lock);
1279	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1280		if (!adjust_pool_surplus(h, nodes_allowed, -1))
1281			break;
1282	}
1283
1284	while (count > persistent_huge_pages(h)) {
1285		/*
1286		 * If this allocation races such that we no longer need the
1287		 * page, free_huge_page will handle it by freeing the page
1288		 * and reducing the surplus.
1289		 */
1290		spin_unlock(&hugetlb_lock);
1291		ret = alloc_fresh_huge_page(h, nodes_allowed);
1292		spin_lock(&hugetlb_lock);
1293		if (!ret)
1294			goto out;
1295
1296		/* Bail for signals. Probably ctrl-c from user */
1297		if (signal_pending(current))
1298			goto out;
1299	}
1300
1301	/*
1302	 * Decrease the pool size
1303	 * First return free pages to the buddy allocator (being careful
1304	 * to keep enough around to satisfy reservations).  Then place
1305	 * pages into surplus state as needed so the pool will shrink
1306	 * to the desired size as pages become free.
1307	 *
1308	 * By placing pages into the surplus state independent of the
1309	 * overcommit value, we are allowing the surplus pool size to
1310	 * exceed overcommit. There are few sane options here. Since
1311	 * alloc_buddy_huge_page() is checking the global counter,
1312	 * though, we'll note that we're not allowed to exceed surplus
1313	 * and won't grow the pool anywhere else. Not until one of the
1314	 * sysctls are changed, or the surplus pages go out of use.
1315	 */
1316	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1317	min_count = max(count, min_count);
1318	try_to_free_low(h, min_count, nodes_allowed);
1319	while (min_count < persistent_huge_pages(h)) {
1320		if (!free_pool_huge_page(h, nodes_allowed, 0))
1321			break;
1322	}
1323	while (count < persistent_huge_pages(h)) {
1324		if (!adjust_pool_surplus(h, nodes_allowed, 1))
1325			break;
1326	}
1327out:
1328	ret = persistent_huge_pages(h);
1329	spin_unlock(&hugetlb_lock);
1330	return ret;
1331}
1332
1333#define HSTATE_ATTR_RO(_name) \
1334	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1335
1336#define HSTATE_ATTR(_name) \
1337	static struct kobj_attribute _name##_attr = \
1338		__ATTR(_name, 0644, _name##_show, _name##_store)
1339
1340static struct kobject *hugepages_kobj;
1341static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1342
1343static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1344
1345static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1346{
1347	int i;
1348
1349	for (i = 0; i < HUGE_MAX_HSTATE; i++)
1350		if (hstate_kobjs[i] == kobj) {
1351			if (nidp)
1352				*nidp = NUMA_NO_NODE;
1353			return &hstates[i];
1354		}
1355
1356	return kobj_to_node_hstate(kobj, nidp);
1357}
1358
1359static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1360					struct kobj_attribute *attr, char *buf)
1361{
1362	struct hstate *h;
1363	unsigned long nr_huge_pages;
1364	int nid;
1365
1366	h = kobj_to_hstate(kobj, &nid);
1367	if (nid == NUMA_NO_NODE)
1368		nr_huge_pages = h->nr_huge_pages;
1369	else
1370		nr_huge_pages = h->nr_huge_pages_node[nid];
1371
1372	return sprintf(buf, "%lu\n", nr_huge_pages);
1373}
1374
1375static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1376			struct kobject *kobj, struct kobj_attribute *attr,
1377			const char *buf, size_t len)
1378{
1379	int err;
1380	int nid;
1381	unsigned long count;
1382	struct hstate *h;
1383	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1384
1385	err = strict_strtoul(buf, 10, &count);
1386	if (err)
1387		goto out;
1388
1389	h = kobj_to_hstate(kobj, &nid);
1390	if (h->order >= MAX_ORDER) {
1391		err = -EINVAL;
1392		goto out;
1393	}
1394
1395	if (nid == NUMA_NO_NODE) {
1396		/*
1397		 * global hstate attribute
1398		 */
1399		if (!(obey_mempolicy &&
1400				init_nodemask_of_mempolicy(nodes_allowed))) {
1401			NODEMASK_FREE(nodes_allowed);
1402			nodes_allowed = &node_states[N_HIGH_MEMORY];
1403		}
1404	} else if (nodes_allowed) {
1405		/*
1406		 * per node hstate attribute: adjust count to global,
1407		 * but restrict alloc/free to the specified node.
1408		 */
1409		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1410		init_nodemask_of_node(nodes_allowed, nid);
1411	} else
1412		nodes_allowed = &node_states[N_HIGH_MEMORY];
1413
1414	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1415
1416	if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1417		NODEMASK_FREE(nodes_allowed);
1418
1419	return len;
1420out:
1421	NODEMASK_FREE(nodes_allowed);
1422	return err;
1423}
1424
1425static ssize_t nr_hugepages_show(struct kobject *kobj,
1426				       struct kobj_attribute *attr, char *buf)
1427{
1428	return nr_hugepages_show_common(kobj, attr, buf);
1429}
1430
1431static ssize_t nr_hugepages_store(struct kobject *kobj,
1432	       struct kobj_attribute *attr, const char *buf, size_t len)
1433{
1434	return nr_hugepages_store_common(false, kobj, attr, buf, len);
1435}
1436HSTATE_ATTR(nr_hugepages);
1437
1438#ifdef CONFIG_NUMA
1439
1440/*
1441 * hstate attribute for optionally mempolicy-based constraint on persistent
1442 * huge page alloc/free.
1443 */
1444static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1445				       struct kobj_attribute *attr, char *buf)
1446{
1447	return nr_hugepages_show_common(kobj, attr, buf);
1448}
1449
1450static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1451	       struct kobj_attribute *attr, const char *buf, size_t len)
1452{
1453	return nr_hugepages_store_common(true, kobj, attr, buf, len);
1454}
1455HSTATE_ATTR(nr_hugepages_mempolicy);
1456#endif
1457
1458
1459static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1460					struct kobj_attribute *attr, char *buf)
1461{
1462	struct hstate *h = kobj_to_hstate(kobj, NULL);
1463	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1464}
1465
1466static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1467		struct kobj_attribute *attr, const char *buf, size_t count)
1468{
1469	int err;
1470	unsigned long input;
1471	struct hstate *h = kobj_to_hstate(kobj, NULL);
1472
1473	if (h->order >= MAX_ORDER)
1474		return -EINVAL;
1475
1476	err = strict_strtoul(buf, 10, &input);
1477	if (err)
1478		return err;
1479
1480	spin_lock(&hugetlb_lock);
1481	h->nr_overcommit_huge_pages = input;
1482	spin_unlock(&hugetlb_lock);
1483
1484	return count;
1485}
1486HSTATE_ATTR(nr_overcommit_hugepages);
1487
1488static ssize_t free_hugepages_show(struct kobject *kobj,
1489					struct kobj_attribute *attr, char *buf)
1490{
1491	struct hstate *h;
1492	unsigned long free_huge_pages;
1493	int nid;
1494
1495	h = kobj_to_hstate(kobj, &nid);
1496	if (nid == NUMA_NO_NODE)
1497		free_huge_pages = h->free_huge_pages;
1498	else
1499		free_huge_pages = h->free_huge_pages_node[nid];
1500
1501	return sprintf(buf, "%lu\n", free_huge_pages);
1502}
1503HSTATE_ATTR_RO(free_hugepages);
1504
1505static ssize_t resv_hugepages_show(struct kobject *kobj,
1506					struct kobj_attribute *attr, char *buf)
1507{
1508	struct hstate *h = kobj_to_hstate(kobj, NULL);
1509	return sprintf(buf, "%lu\n", h->resv_huge_pages);
1510}
1511HSTATE_ATTR_RO(resv_hugepages);
1512
1513static ssize_t surplus_hugepages_show(struct kobject *kobj,
1514					struct kobj_attribute *attr, char *buf)
1515{
1516	struct hstate *h;
1517	unsigned long surplus_huge_pages;
1518	int nid;
1519
1520	h = kobj_to_hstate(kobj, &nid);
1521	if (nid == NUMA_NO_NODE)
1522		surplus_huge_pages = h->surplus_huge_pages;
1523	else
1524		surplus_huge_pages = h->surplus_huge_pages_node[nid];
1525
1526	return sprintf(buf, "%lu\n", surplus_huge_pages);
1527}
1528HSTATE_ATTR_RO(surplus_hugepages);
1529
1530static struct attribute *hstate_attrs[] = {
1531	&nr_hugepages_attr.attr,
1532	&nr_overcommit_hugepages_attr.attr,
1533	&free_hugepages_attr.attr,
1534	&resv_hugepages_attr.attr,
1535	&surplus_hugepages_attr.attr,
1536#ifdef CONFIG_NUMA
1537	&nr_hugepages_mempolicy_attr.attr,
1538#endif
1539	NULL,
1540};
1541
1542static struct attribute_group hstate_attr_group = {
1543	.attrs = hstate_attrs,
1544};
1545
1546static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1547				    struct kobject **hstate_kobjs,
1548				    struct attribute_group *hstate_attr_group)
1549{
1550	int retval;
1551	int hi = h - hstates;
1552
1553	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1554	if (!hstate_kobjs[hi])
1555		return -ENOMEM;
1556
1557	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1558	if (retval)
1559		kobject_put(hstate_kobjs[hi]);
1560
1561	return retval;
1562}
1563
1564static void __init hugetlb_sysfs_init(void)
1565{
1566	struct hstate *h;
1567	int err;
1568
1569	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1570	if (!hugepages_kobj)
1571		return;
1572
1573	for_each_hstate(h) {
1574		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1575					 hstate_kobjs, &hstate_attr_group);
1576		if (err)
1577			printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1578								h->name);
1579	}
1580}
1581
1582#ifdef CONFIG_NUMA
1583
1584/*
1585 * node_hstate/s - associate per node hstate attributes, via their kobjects,
1586 * with node sysdevs in node_devices[] using a parallel array.  The array
1587 * index of a node sysdev or _hstate == node id.
1588 * This is here to avoid any static dependency of the node sysdev driver, in
1589 * the base kernel, on the hugetlb module.
1590 */
1591struct node_hstate {
1592	struct kobject		*hugepages_kobj;
1593	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
1594};
1595struct node_hstate node_hstates[MAX_NUMNODES];
1596
1597/*
1598 * A subset of global hstate attributes for node sysdevs
1599 */
1600static struct attribute *per_node_hstate_attrs[] = {
1601	&nr_hugepages_attr.attr,
1602	&free_hugepages_attr.attr,
1603	&surplus_hugepages_attr.attr,
1604	NULL,
1605};
1606
1607static struct attribute_group per_node_hstate_attr_group = {
1608	.attrs = per_node_hstate_attrs,
1609};
1610
1611/*
1612 * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
1613 * Returns node id via non-NULL nidp.
1614 */
1615static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1616{
1617	int nid;
1618
1619	for (nid = 0; nid < nr_node_ids; nid++) {
1620		struct node_hstate *nhs = &node_hstates[nid];
1621		int i;
1622		for (i = 0; i < HUGE_MAX_HSTATE; i++)
1623			if (nhs->hstate_kobjs[i] == kobj) {
1624				if (nidp)
1625					*nidp = nid;
1626				return &hstates[i];
1627			}
1628	}
1629
1630	BUG();
1631	return NULL;
1632}
1633
1634/*
1635 * Unregister hstate attributes from a single node sysdev.
1636 * No-op if no hstate attributes attached.
1637 */
1638void hugetlb_unregister_node(struct node *node)
1639{
1640	struct hstate *h;
1641	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1642
1643	if (!nhs->hugepages_kobj)
1644		return;		/* no hstate attributes */
1645
1646	for_each_hstate(h)
1647		if (nhs->hstate_kobjs[h - hstates]) {
1648			kobject_put(nhs->hstate_kobjs[h - hstates]);
1649			nhs->hstate_kobjs[h - hstates] = NULL;
1650		}
1651
1652	kobject_put(nhs->hugepages_kobj);
1653	nhs->hugepages_kobj = NULL;
1654}
1655
1656/*
1657 * hugetlb module exit:  unregister hstate attributes from node sysdevs
1658 * that have them.
1659 */
1660static void hugetlb_unregister_all_nodes(void)
1661{
1662	int nid;
1663
1664	/*
1665	 * disable node sysdev registrations.
1666	 */
1667	register_hugetlbfs_with_node(NULL, NULL);
1668
1669	/*
1670	 * remove hstate attributes from any nodes that have them.
1671	 */
1672	for (nid = 0; nid < nr_node_ids; nid++)
1673		hugetlb_unregister_node(&node_devices[nid]);
1674}
1675
1676/*
1677 * Register hstate attributes for a single node sysdev.
1678 * No-op if attributes already registered.
1679 */
1680void hugetlb_register_node(struct node *node)
1681{
1682	struct hstate *h;
1683	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1684	int err;
1685
1686	if (nhs->hugepages_kobj)
1687		return;		/* already allocated */
1688
1689	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1690							&node->sysdev.kobj);
1691	if (!nhs->hugepages_kobj)
1692		return;
1693
1694	for_each_hstate(h) {
1695		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1696						nhs->hstate_kobjs,
1697						&per_node_hstate_attr_group);
1698		if (err) {
1699			printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1700					" for node %d\n",
1701						h->name, node->sysdev.id);
1702			hugetlb_unregister_node(node);
1703			break;
1704		}
1705	}
1706}
1707
1708/*
1709 * hugetlb init time:  register hstate attributes for all registered node
1710 * sysdevs of nodes that have memory.  All on-line nodes should have
1711 * registered their associated sysdev by this time.
1712 */
1713static void hugetlb_register_all_nodes(void)
1714{
1715	int nid;
1716
1717	for_each_node_state(nid, N_HIGH_MEMORY) {
1718		struct node *node = &node_devices[nid];
1719		if (node->sysdev.id == nid)
1720			hugetlb_register_node(node);
1721	}
1722
1723	/*
1724	 * Let the node sysdev driver know we're here so it can
1725	 * [un]register hstate attributes on node hotplug.
1726	 */
1727	register_hugetlbfs_with_node(hugetlb_register_node,
1728				     hugetlb_unregister_node);
1729}
1730#else	/* !CONFIG_NUMA */
1731
1732static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1733{
1734	BUG();
1735	if (nidp)
1736		*nidp = -1;
1737	return NULL;
1738}
1739
1740static void hugetlb_unregister_all_nodes(void) { }
1741
1742static void hugetlb_register_all_nodes(void) { }
1743
1744#endif
1745
1746static void __exit hugetlb_exit(void)
1747{
1748	struct hstate *h;
1749
1750	hugetlb_unregister_all_nodes();
1751
1752	for_each_hstate(h) {
1753		kobject_put(hstate_kobjs[h - hstates]);
1754	}
1755
1756	kobject_put(hugepages_kobj);
1757}
1758module_exit(hugetlb_exit);
1759
1760static int __init hugetlb_init(void)
1761{
1762	/* Some platform decide whether they support huge pages at boot
1763	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1764	 * there is no such support
1765	 */
1766	if (HPAGE_SHIFT == 0)
1767		return 0;
1768
1769	if (!size_to_hstate(default_hstate_size)) {
1770		default_hstate_size = HPAGE_SIZE;
1771		if (!size_to_hstate(default_hstate_size))
1772			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1773	}
1774	default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1775	if (default_hstate_max_huge_pages)
1776		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1777
1778	hugetlb_init_hstates();
1779
1780	gather_bootmem_prealloc();
1781
1782	report_hugepages();
1783
1784	hugetlb_sysfs_init();
1785
1786	hugetlb_register_all_nodes();
1787
1788	return 0;
1789}
1790module_init(hugetlb_init);
1791
1792/* Should be called on processing a hugepagesz=... option */
1793void __init hugetlb_add_hstate(unsigned order)
1794{
1795	struct hstate *h;
1796	unsigned long i;
1797
1798	if (size_to_hstate(PAGE_SIZE << order)) {
1799		printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1800		return;
1801	}
1802	BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1803	BUG_ON(order == 0);
1804	h = &hstates[max_hstate++];
1805	h->order = order;
1806	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1807	h->nr_huge_pages = 0;
1808	h->free_huge_pages = 0;
1809	for (i = 0; i < MAX_NUMNODES; ++i)
1810		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1811	h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1812	h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1813	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1814					huge_page_size(h)/1024);
1815
1816	parsed_hstate = h;
1817}
1818
1819static int __init hugetlb_nrpages_setup(char *s)
1820{
1821	unsigned long *mhp;
1822	static unsigned long *last_mhp;
1823
1824	/*
1825	 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1826	 * so this hugepages= parameter goes to the "default hstate".
1827	 */
1828	if (!max_hstate)
1829		mhp = &default_hstate_max_huge_pages;
1830	else
1831		mhp = &parsed_hstate->max_huge_pages;
1832
1833	if (mhp == last_mhp) {
1834		printk(KERN_WARNING "hugepages= specified twice without "
1835			"interleaving hugepagesz=, ignoring\n");
1836		return 1;
1837	}
1838
1839	if (sscanf(s, "%lu", mhp) <= 0)
1840		*mhp = 0;
1841
1842	/*
1843	 * Global state is always initialized later in hugetlb_init.
1844	 * But we need to allocate >= MAX_ORDER hstates here early to still
1845	 * use the bootmem allocator.
1846	 */
1847	if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1848		hugetlb_hstate_alloc_pages(parsed_hstate);
1849
1850	last_mhp = mhp;
1851
1852	return 1;
1853}
1854__setup("hugepages=", hugetlb_nrpages_setup);
1855
1856static int __init hugetlb_default_setup(char *s)
1857{
1858	default_hstate_size = memparse(s, &s);
1859	return 1;
1860}
1861__setup("default_hugepagesz=", hugetlb_default_setup);
1862
1863static unsigned int cpuset_mems_nr(unsigned int *array)
1864{
1865	int node;
1866	unsigned int nr = 0;
1867
1868	for_each_node_mask(node, cpuset_current_mems_allowed)
1869		nr += array[node];
1870
1871	return nr;
1872}
1873
1874#ifdef CONFIG_SYSCTL
1875static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1876			 struct ctl_table *table, int write,
1877			 void __user *buffer, size_t *length, loff_t *ppos)
1878{
1879	struct hstate *h = &default_hstate;
1880	unsigned long tmp;
1881	int ret;
1882
1883	tmp = h->max_huge_pages;
1884
1885	if (write && h->order >= MAX_ORDER)
1886		return -EINVAL;
1887
1888	table->data = &tmp;
1889	table->maxlen = sizeof(unsigned long);
1890	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1891	if (ret)
1892		goto out;
1893
1894	if (write) {
1895		NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1896						GFP_KERNEL | __GFP_NORETRY);
1897		if (!(obey_mempolicy &&
1898			       init_nodemask_of_mempolicy(nodes_allowed))) {
1899			NODEMASK_FREE(nodes_allowed);
1900			nodes_allowed = &node_states[N_HIGH_MEMORY];
1901		}
1902		h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
1903
1904		if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1905			NODEMASK_FREE(nodes_allowed);
1906	}
1907out:
1908	return ret;
1909}
1910
1911int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1912			  void __user *buffer, size_t *length, loff_t *ppos)
1913{
1914
1915	return hugetlb_sysctl_handler_common(false, table, write,
1916							buffer, length, ppos);
1917}
1918
1919#ifdef CONFIG_NUMA
1920int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
1921			  void __user *buffer, size_t *length, loff_t *ppos)
1922{
1923	return hugetlb_sysctl_handler_common(true, table, write,
1924							buffer, length, ppos);
1925}
1926#endif /* CONFIG_NUMA */
1927
1928int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1929			void __user *buffer,
1930			size_t *length, loff_t *ppos)
1931{
1932	proc_dointvec(table, write, buffer, length, ppos);
1933	if (hugepages_treat_as_movable)
1934		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1935	else
1936		htlb_alloc_mask = GFP_HIGHUSER;
1937	return 0;
1938}
1939
1940int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1941			void __user *buffer,
1942			size_t *length, loff_t *ppos)
1943{
1944	struct hstate *h = &default_hstate;
1945	unsigned long tmp;
1946	int ret;
1947
1948	tmp = h->nr_overcommit_huge_pages;
1949
1950	if (write && h->order >= MAX_ORDER)
1951		return -EINVAL;
1952
1953	table->data = &tmp;
1954	table->maxlen = sizeof(unsigned long);
1955	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1956	if (ret)
1957		goto out;
1958
1959	if (write) {
1960		spin_lock(&hugetlb_lock);
1961		h->nr_overcommit_huge_pages = tmp;
1962		spin_unlock(&hugetlb_lock);
1963	}
1964out:
1965	return ret;
1966}
1967
1968#endif /* CONFIG_SYSCTL */
1969
1970void hugetlb_report_meminfo(struct seq_file *m)
1971{
1972	struct hstate *h = &default_hstate;
1973	seq_printf(m,
1974			"HugePages_Total:   %5lu\n"
1975			"HugePages_Free:    %5lu\n"
1976			"HugePages_Rsvd:    %5lu\n"
1977			"HugePages_Surp:    %5lu\n"
1978			"Hugepagesize:   %8lu kB\n",
1979			h->nr_huge_pages,
1980			h->free_huge_pages,
1981			h->resv_huge_pages,
1982			h->surplus_huge_pages,
1983			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
1984}
1985
1986int hugetlb_report_node_meminfo(int nid, char *buf)
1987{
1988	struct hstate *h = &default_hstate;
1989	return sprintf(buf,
1990		"Node %d HugePages_Total: %5u\n"
1991		"Node %d HugePages_Free:  %5u\n"
1992		"Node %d HugePages_Surp:  %5u\n",
1993		nid, h->nr_huge_pages_node[nid],
1994		nid, h->free_huge_pages_node[nid],
1995		nid, h->surplus_huge_pages_node[nid]);
1996}
1997
1998/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
1999unsigned long hugetlb_total_pages(void)
2000{
2001	struct hstate *h = &default_hstate;
2002	return h->nr_huge_pages * pages_per_huge_page(h);
2003}
2004
2005static int hugetlb_acct_memory(struct hstate *h, long delta)
2006{
2007	int ret = -ENOMEM;
2008
2009	spin_lock(&hugetlb_lock);
2010	/*
2011	 * When cpuset is configured, it breaks the strict hugetlb page
2012	 * reservation as the accounting is done on a global variable. Such
2013	 * reservation is completely rubbish in the presence of cpuset because
2014	 * the reservation is not checked against page availability for the
2015	 * current cpuset. Application can still potentially OOM'ed by kernel
2016	 * with lack of free htlb page in cpuset that the task is in.
2017	 * Attempt to enforce strict accounting with cpuset is almost
2018	 * impossible (or too ugly) because cpuset is too fluid that
2019	 * task or memory node can be dynamically moved between cpusets.
2020	 *
2021	 * The change of semantics for shared hugetlb mapping with cpuset is
2022	 * undesirable. However, in order to preserve some of the semantics,
2023	 * we fall back to check against current free page availability as
2024	 * a best attempt and hopefully to minimize the impact of changing
2025	 * semantics that cpuset has.
2026	 */
2027	if (delta > 0) {
2028		if (gather_surplus_pages(h, delta) < 0)
2029			goto out;
2030
2031		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2032			return_unused_surplus_pages(h, delta);
2033			goto out;
2034		}
2035	}
2036
2037	ret = 0;
2038	if (delta < 0)
2039		return_unused_surplus_pages(h, (unsigned long) -delta);
2040
2041out:
2042	spin_unlock(&hugetlb_lock);
2043	return ret;
2044}
2045
2046static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2047{
2048	struct resv_map *reservations = vma_resv_map(vma);
2049
2050	/*
2051	 * This new VMA should share its siblings reservation map if present.
2052	 * The VMA will only ever have a valid reservation map pointer where
2053	 * it is being copied for another still existing VMA.  As that VMA
2054	 * has a reference to the reservation map it cannot disappear until
2055	 * after this open call completes.  It is therefore safe to take a
2056	 * new reference here without additional locking.
2057	 */
2058	if (reservations)
2059		kref_get(&reservations->refs);
2060}
2061
2062static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2063{
2064	struct hstate *h = hstate_vma(vma);
2065	struct resv_map *reservations = vma_resv_map(vma);
2066	unsigned long reserve;
2067	unsigned long start;
2068	unsigned long end;
2069
2070	if (reservations) {
2071		start = vma_hugecache_offset(h, vma, vma->vm_start);
2072		end = vma_hugecache_offset(h, vma, vma->vm_end);
2073
2074		reserve = (end - start) -
2075			region_count(&reservations->regions, start, end);
2076
2077		kref_put(&reservations->refs, resv_map_release);
2078
2079		if (reserve) {
2080			hugetlb_acct_memory(h, -reserve);
2081			hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
2082		}
2083	}
2084}
2085
2086/*
2087 * We cannot handle pagefaults against hugetlb pages at all.  They cause
2088 * handle_mm_fault() to try to instantiate regular-sized pages in the
2089 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2090 * this far.
2091 */
2092static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2093{
2094	BUG();
2095	return 0;
2096}
2097
2098const struct vm_operations_struct hugetlb_vm_ops = {
2099	.fault = hugetlb_vm_op_fault,
2100	.open = hugetlb_vm_op_open,
2101	.close = hugetlb_vm_op_close,
2102};
2103
2104static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2105				int writable)
2106{
2107	pte_t entry;
2108
2109	if (writable) {
2110		entry =
2111		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2112	} else {
2113		entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2114	}
2115	entry = pte_mkyoung(entry);
2116	entry = pte_mkhuge(entry);
2117
2118	return entry;
2119}
2120
2121static void set_huge_ptep_writable(struct vm_area_struct *vma,
2122				   unsigned long address, pte_t *ptep)
2123{
2124	pte_t entry;
2125
2126	entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2127	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
2128		update_mmu_cache(vma, address, ptep);
2129	}
2130}
2131
2132
2133int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2134			    struct vm_area_struct *vma)
2135{
2136	pte_t *src_pte, *dst_pte, entry;
2137	struct page *ptepage;
2138	unsigned long addr;
2139	int cow;
2140	struct hstate *h = hstate_vma(vma);
2141	unsigned long sz = huge_page_size(h);
2142
2143	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2144
2145	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2146		src_pte = huge_pte_offset(src, addr);
2147		if (!src_pte)
2148			continue;
2149		dst_pte = huge_pte_alloc(dst, addr, sz);
2150		if (!dst_pte)
2151			goto nomem;
2152
2153		/* If the pagetables are shared don't copy or take references */
2154		if (dst_pte == src_pte)
2155			continue;
2156
2157		spin_lock(&dst->page_table_lock);
2158		spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2159		if (!huge_pte_none(huge_ptep_get(src_pte))) {
2160			if (cow)
2161				huge_ptep_set_wrprotect(src, addr, src_pte);
2162			entry = huge_ptep_get(src_pte);
2163			ptepage = pte_page(entry);
2164			get_page(ptepage);
2165			page_dup_rmap(ptepage);
2166			set_huge_pte_at(dst, addr, dst_pte, entry);
2167		}
2168		spin_unlock(&src->page_table_lock);
2169		spin_unlock(&dst->page_table_lock);
2170	}
2171	return 0;
2172
2173nomem:
2174	return -ENOMEM;
2175}
2176
2177static int is_hugetlb_entry_migration(pte_t pte)
2178{
2179	swp_entry_t swp;
2180
2181	if (huge_pte_none(pte) || pte_present(pte))
2182		return 0;
2183	swp = pte_to_swp_entry(pte);
2184	if (non_swap_entry(swp) && is_migration_entry(swp)) {
2185		return 1;
2186	} else
2187		return 0;
2188}
2189
2190static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2191{
2192	swp_entry_t swp;
2193
2194	if (huge_pte_none(pte) || pte_present(pte))
2195		return 0;
2196	swp = pte_to_swp_entry(pte);
2197	if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
2198		return 1;
2199	} else
2200		return 0;
2201}
2202
2203void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2204			    unsigned long end, struct page *ref_page)
2205{
2206	struct mm_struct *mm = vma->vm_mm;
2207	unsigned long address;
2208	pte_t *ptep;
2209	pte_t pte;
2210	struct page *page;
2211	struct page *tmp;
2212	struct hstate *h = hstate_vma(vma);
2213	unsigned long sz = huge_page_size(h);
2214
2215	/*
2216	 * A page gathering list, protected by per file i_mmap_mutex. The
2217	 * lock is used to avoid list corruption from multiple unmapping
2218	 * of the same page since we are using page->lru.
2219	 */
2220	LIST_HEAD(page_list);
2221
2222	WARN_ON(!is_vm_hugetlb_page(vma));
2223	BUG_ON(start & ~huge_page_mask(h));
2224	BUG_ON(end & ~huge_page_mask(h));
2225
2226	mmu_notifier_invalidate_range_start(mm, start, end);
2227	spin_lock(&mm->page_table_lock);
2228	for (address = start; address < end; address += sz) {
2229		ptep = huge_pte_offset(mm, address);
2230		if (!ptep)
2231			continue;
2232
2233		if (huge_pmd_unshare(mm, &address, ptep))
2234			continue;
2235
2236		/*
2237		 * If a reference page is supplied, it is because a specific
2238		 * page is being unmapped, not a range. Ensure the page we
2239		 * are about to unmap is the actual page of interest.
2240		 */
2241		if (ref_page) {
2242			pte = huge_ptep_get(ptep);
2243			if (huge_pte_none(pte))
2244				continue;
2245			page = pte_page(pte);
2246			if (page != ref_page)
2247				continue;
2248
2249			/*
2250			 * Mark the VMA as having unmapped its page so that
2251			 * future faults in this VMA will fail rather than
2252			 * looking like data was lost
2253			 */
2254			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2255		}
2256
2257		pte = huge_ptep_get_and_clear(mm, address, ptep);
2258		if (huge_pte_none(pte))
2259			continue;
2260
2261		/*
2262		 * HWPoisoned hugepage is already unmapped and dropped reference
2263		 */
2264		if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2265			continue;
2266
2267		page = pte_page(pte);
2268		if (pte_dirty(pte))
2269			set_page_dirty(page);
2270		list_add(&page->lru, &page_list);
2271	}
2272	spin_unlock(&mm->page_table_lock);
2273	flush_tlb_range(vma, start, end);
2274	mmu_notifier_invalidate_range_end(mm, start, end);
2275	list_for_each_entry_safe(page, tmp, &page_list, lru) {
2276		page_remove_rmap(page);
2277		list_del(&page->lru);
2278		put_page(page);
2279	}
2280}
2281
2282void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2283			  unsigned long end, struct page *ref_page)
2284{
2285	mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2286	__unmap_hugepage_range(vma, start, end, ref_page);
2287	mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2288}
2289
2290/*
2291 * This is called when the original mapper is failing to COW a MAP_PRIVATE
2292 * mappping it owns the reserve page for. The intention is to unmap the page
2293 * from other VMAs and let the children be SIGKILLed if they are faulting the
2294 * same region.
2295 */
2296static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2297				struct page *page, unsigned long address)
2298{
2299	struct hstate *h = hstate_vma(vma);
2300	struct vm_area_struct *iter_vma;
2301	struct address_space *mapping;
2302	struct prio_tree_iter iter;
2303	pgoff_t pgoff;
2304
2305	/*
2306	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2307	 * from page cache lookup which is in HPAGE_SIZE units.
2308	 */
2309	address = address & huge_page_mask(h);
2310	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
2311		+ (vma->vm_pgoff >> PAGE_SHIFT);
2312	mapping = (struct address_space *)page_private(page);
2313
2314	/*
2315	 * Take the mapping lock for the duration of the table walk. As
2316	 * this mapping should be shared between all the VMAs,
2317	 * __unmap_hugepage_range() is called as the lock is already held
2318	 */
2319	mutex_lock(&mapping->i_mmap_mutex);
2320	vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2321		/* Do not unmap the current VMA */
2322		if (iter_vma == vma)
2323			continue;
2324
2325		/*
2326		 * Unmap the page from other VMAs without their own reserves.
2327		 * They get marked to be SIGKILLed if they fault in these
2328		 * areas. This is because a future no-page fault on this VMA
2329		 * could insert a zeroed page instead of the data existing
2330		 * from the time of fork. This would look like data corruption
2331		 */
2332		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2333			__unmap_hugepage_range(iter_vma,
2334				address, address + huge_page_size(h),
2335				page);
2336	}
2337	mutex_unlock(&mapping->i_mmap_mutex);
2338
2339	return 1;
2340}
2341
2342/*
2343 * Hugetlb_cow() should be called with page lock of the original hugepage held.
2344 */
2345static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2346			unsigned long address, pte_t *ptep, pte_t pte,
2347			struct page *pagecache_page)
2348{
2349	struct hstate *h = hstate_vma(vma);
2350	struct page *old_page, *new_page;
2351	int avoidcopy;
2352	int outside_reserve = 0;
2353
2354	old_page = pte_page(pte);
2355
2356retry_avoidcopy:
2357	/* If no-one else is actually using this page, avoid the copy
2358	 * and just make the page writable */
2359	avoidcopy = (page_mapcount(old_page) == 1);
2360	if (avoidcopy) {
2361		if (PageAnon(old_page))
2362			page_move_anon_rmap(old_page, vma, address);
2363		set_huge_ptep_writable(vma, address, ptep);
2364		return 0;
2365	}
2366
2367	/*
2368	 * If the process that created a MAP_PRIVATE mapping is about to
2369	 * perform a COW due to a shared page count, attempt to satisfy
2370	 * the allocation without using the existing reserves. The pagecache
2371	 * page is used to determine if the reserve at this address was
2372	 * consumed or not. If reserves were used, a partial faulted mapping
2373	 * at the time of fork() could consume its reserves on COW instead
2374	 * of the full address range.
2375	 */
2376	if (!(vma->vm_flags & VM_MAYSHARE) &&
2377			is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2378			old_page != pagecache_page)
2379		outside_reserve = 1;
2380
2381	page_cache_get(old_page);
2382
2383	/* Drop page_table_lock as buddy allocator may be called */
2384	spin_unlock(&mm->page_table_lock);
2385	new_page = alloc_huge_page(vma, address, outside_reserve);
2386
2387	if (IS_ERR(new_page)) {
2388		page_cache_release(old_page);
2389
2390		/*
2391		 * If a process owning a MAP_PRIVATE mapping fails to COW,
2392		 * it is due to references held by a child and an insufficient
2393		 * huge page pool. To guarantee the original mappers
2394		 * reliability, unmap the page from child processes. The child
2395		 * may get SIGKILLed if it later faults.
2396		 */
2397		if (outside_reserve) {
2398			BUG_ON(huge_pte_none(pte));
2399			if (unmap_ref_private(mm, vma, old_page, address)) {
2400				BUG_ON(page_count(old_page) != 1);
2401				BUG_ON(huge_pte_none(pte));
2402				spin_lock(&mm->page_table_lock);
2403				goto retry_avoidcopy;
2404			}
2405			WARN_ON_ONCE(1);
2406		}
2407
2408		/* Caller expects lock to be held */
2409		spin_lock(&mm->page_table_lock);
2410		return -PTR_ERR(new_page);
2411	}
2412
2413	/*
2414	 * When the original hugepage is shared one, it does not have
2415	 * anon_vma prepared.
2416	 */
2417	if (unlikely(anon_vma_prepare(vma))) {
2418		/* Caller expects lock to be held */
2419		spin_lock(&mm->page_table_lock);
2420		return VM_FAULT_OOM;
2421	}
2422
2423	copy_user_huge_page(new_page, old_page, address, vma,
2424			    pages_per_huge_page(h));
2425	__SetPageUptodate(new_page);
2426
2427	/*
2428	 * Retake the page_table_lock to check for racing updates
2429	 * before the page tables are altered
2430	 */
2431	spin_lock(&mm->page_table_lock);
2432	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2433	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2434		/* Break COW */
2435		mmu_notifier_invalidate_range_start(mm,
2436			address & huge_page_mask(h),
2437			(address & huge_page_mask(h)) + huge_page_size(h));
2438		huge_ptep_clear_flush(vma, address, ptep);
2439		set_huge_pte_at(mm, address, ptep,
2440				make_huge_pte(vma, new_page, 1));
2441		page_remove_rmap(old_page);
2442		hugepage_add_new_anon_rmap(new_page, vma, address);
2443		/* Make the old page be freed below */
2444		new_page = old_page;
2445		mmu_notifier_invalidate_range_end(mm,
2446			address & huge_page_mask(h),
2447			(address & huge_page_mask(h)) + huge_page_size(h));
2448	}
2449	page_cache_release(new_page);
2450	page_cache_release(old_page);
2451	return 0;
2452}
2453
2454/* Return the pagecache page at a given address within a VMA */
2455static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2456			struct vm_area_struct *vma, unsigned long address)
2457{
2458	struct address_space *mapping;
2459	pgoff_t idx;
2460
2461	mapping = vma->vm_file->f_mapping;
2462	idx = vma_hugecache_offset(h, vma, address);
2463
2464	return find_lock_page(mapping, idx);
2465}
2466
2467/*
2468 * Return whether there is a pagecache page to back given address within VMA.
2469 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2470 */
2471static bool hugetlbfs_pagecache_present(struct hstate *h,
2472			struct vm_area_struct *vma, unsigned long address)
2473{
2474	struct address_space *mapping;
2475	pgoff_t idx;
2476	struct page *page;
2477
2478	mapping = vma->vm_file->f_mapping;
2479	idx = vma_hugecache_offset(h, vma, address);
2480
2481	page = find_get_page(mapping, idx);
2482	if (page)
2483		put_page(page);
2484	return page != NULL;
2485}
2486
2487static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2488			unsigned long address, pte_t *ptep, unsigned int flags)
2489{
2490	struct hstate *h = hstate_vma(vma);
2491	int ret = VM_FAULT_SIGBUS;
2492	pgoff_t idx;
2493	unsigned long size;
2494	struct page *page;
2495	struct address_space *mapping;
2496	pte_t new_pte;
2497
2498	/*
2499	 * Currently, we are forced to kill the process in the event the
2500	 * original mapper has unmapped pages from the child due to a failed
2501	 * COW. Warn that such a situation has occurred as it may not be obvious
2502	 */
2503	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2504		printk(KERN_WARNING
2505			"PID %d killed due to inadequate hugepage pool\n",
2506			current->pid);
2507		return ret;
2508	}
2509
2510	mapping = vma->vm_file->f_mapping;
2511	idx = vma_hugecache_offset(h, vma, address);
2512
2513	/*
2514	 * Use page lock to guard against racing truncation
2515	 * before we get page_table_lock.
2516	 */
2517retry:
2518	page = find_lock_page(mapping, idx);
2519	if (!page) {
2520		size = i_size_read(mapping->host) >> huge_page_shift(h);
2521		if (idx >= size)
2522			goto out;
2523		page = alloc_huge_page(vma, address, 0);
2524		if (IS_ERR(page)) {
2525			ret = -PTR_ERR(page);
2526			goto out;
2527		}
2528		clear_huge_page(page, address, pages_per_huge_page(h));
2529		__SetPageUptodate(page);
2530
2531		if (vma->vm_flags & VM_MAYSHARE) {
2532			int err;
2533			struct inode *inode = mapping->host;
2534
2535			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2536			if (err) {
2537				put_page(page);
2538				if (err == -EEXIST)
2539					goto retry;
2540				goto out;
2541			}
2542
2543			spin_lock(&inode->i_lock);
2544			inode->i_blocks += blocks_per_huge_page(h);
2545			spin_unlock(&inode->i_lock);
2546			page_dup_rmap(page);
2547		} else {
2548			lock_page(page);
2549			if (unlikely(anon_vma_prepare(vma))) {
2550				ret = VM_FAULT_OOM;
2551				goto backout_unlocked;
2552			}
2553			hugepage_add_new_anon_rmap(page, vma, address);
2554		}
2555	} else {
2556		/*
2557		 * If memory error occurs between mmap() and fault, some process
2558		 * don't have hwpoisoned swap entry for errored virtual address.
2559		 * So we need to block hugepage fault by PG_hwpoison bit check.
2560		 */
2561		if (unlikely(PageHWPoison(page))) {
2562			ret = VM_FAULT_HWPOISON |
2563			      VM_FAULT_SET_HINDEX(h - hstates);
2564			goto backout_unlocked;
2565		}
2566		page_dup_rmap(page);
2567	}
2568
2569	/*
2570	 * If we are going to COW a private mapping later, we examine the
2571	 * pending reservations for this page now. This will ensure that
2572	 * any allocations necessary to record that reservation occur outside
2573	 * the spinlock.
2574	 */
2575	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2576		if (vma_needs_reservation(h, vma, address) < 0) {
2577			ret = VM_FAULT_OOM;
2578			goto backout_unlocked;
2579		}
2580
2581	spin_lock(&mm->page_table_lock);
2582	size = i_size_read(mapping->host) >> huge_page_shift(h);
2583	if (idx >= size)
2584		goto backout;
2585
2586	ret = 0;
2587	if (!huge_pte_none(huge_ptep_get(ptep)))
2588		goto backout;
2589
2590	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2591				&& (vma->vm_flags & VM_SHARED)));
2592	set_huge_pte_at(mm, address, ptep, new_pte);
2593
2594	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2595		/* Optimization, do the COW without a second fault */
2596		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2597	}
2598
2599	spin_unlock(&mm->page_table_lock);
2600	unlock_page(page);
2601out:
2602	return ret;
2603
2604backout:
2605	spin_unlock(&mm->page_table_lock);
2606backout_unlocked:
2607	unlock_page(page);
2608	put_page(page);
2609	goto out;
2610}
2611
2612int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2613			unsigned long address, unsigned int flags)
2614{
2615	pte_t *ptep;
2616	pte_t entry;
2617	int ret;
2618	struct page *page = NULL;
2619	struct page *pagecache_page = NULL;
2620	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2621	struct hstate *h = hstate_vma(vma);
2622
2623	ptep = huge_pte_offset(mm, address);
2624	if (ptep) {
2625		entry = huge_ptep_get(ptep);
2626		if (unlikely(is_hugetlb_entry_migration(entry))) {
2627			migration_entry_wait(mm, (pmd_t *)ptep, address);
2628			return 0;
2629		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2630			return VM_FAULT_HWPOISON_LARGE |
2631			       VM_FAULT_SET_HINDEX(h - hstates);
2632	}
2633
2634	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2635	if (!ptep)
2636		return VM_FAULT_OOM;
2637
2638	/*
2639	 * Serialize hugepage allocation and instantiation, so that we don't
2640	 * get spurious allocation failures if two CPUs race to instantiate
2641	 * the same page in the page cache.
2642	 */
2643	mutex_lock(&hugetlb_instantiation_mutex);
2644	entry = huge_ptep_get(ptep);
2645	if (huge_pte_none(entry)) {
2646		ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2647		goto out_mutex;
2648	}
2649
2650	ret = 0;
2651
2652	/*
2653	 * If we are going to COW the mapping later, we examine the pending
2654	 * reservations for this page now. This will ensure that any
2655	 * allocations necessary to record that reservation occur outside the
2656	 * spinlock. For private mappings, we also lookup the pagecache
2657	 * page now as it is used to determine if a reservation has been
2658	 * consumed.
2659	 */
2660	if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2661		if (vma_needs_reservation(h, vma, address) < 0) {
2662			ret = VM_FAULT_OOM;
2663			goto out_mutex;
2664		}
2665
2666		if (!(vma->vm_flags & VM_MAYSHARE))
2667			pagecache_page = hugetlbfs_pagecache_page(h,
2668								vma, address);
2669	}
2670
2671	/*
2672	 * hugetlb_cow() requires page locks of pte_page(entry) and
2673	 * pagecache_page, so here we need take the former one
2674	 * when page != pagecache_page or !pagecache_page.
2675	 * Note that locking order is always pagecache_page -> page,
2676	 * so no worry about deadlock.
2677	 */
2678	page = pte_page(entry);
2679	if (page != pagecache_page)
2680		lock_page(page);
2681
2682	spin_lock(&mm->page_table_lock);
2683	/* Check for a racing update before calling hugetlb_cow */
2684	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2685		goto out_page_table_lock;
2686
2687
2688	if (flags & FAULT_FLAG_WRITE) {
2689		if (!pte_write(entry)) {
2690			ret = hugetlb_cow(mm, vma, address, ptep, entry,
2691							pagecache_page);
2692			goto out_page_table_lock;
2693		}
2694		entry = pte_mkdirty(entry);
2695	}
2696	entry = pte_mkyoung(entry);
2697	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2698						flags & FAULT_FLAG_WRITE))
2699		update_mmu_cache(vma, address, ptep);
2700
2701out_page_table_lock:
2702	spin_unlock(&mm->page_table_lock);
2703
2704	if (pagecache_page) {
2705		unlock_page(pagecache_page);
2706		put_page(pagecache_page);
2707	}
2708	if (page != pagecache_page)
2709		unlock_page(page);
2710
2711out_mutex:
2712	mutex_unlock(&hugetlb_instantiation_mutex);
2713
2714	return ret;
2715}
2716
2717/* Can be overriden by architectures */
2718__attribute__((weak)) struct page *
2719follow_huge_pud(struct mm_struct *mm, unsigned long address,
2720	       pud_t *pud, int write)
2721{
2722	BUG();
2723	return NULL;
2724}
2725
2726int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2727			struct page **pages, struct vm_area_struct **vmas,
2728			unsigned long *position, int *length, int i,
2729			unsigned int flags)
2730{
2731	unsigned long pfn_offset;
2732	unsigned long vaddr = *position;
2733	int remainder = *length;
2734	struct hstate *h = hstate_vma(vma);
2735
2736	spin_lock(&mm->page_table_lock);
2737	while (vaddr < vma->vm_end && remainder) {
2738		pte_t *pte;
2739		int absent;
2740		struct page *page;
2741
2742		/*
2743		 * Some archs (sparc64, sh*) have multiple pte_ts to
2744		 * each hugepage.  We have to make sure we get the
2745		 * first, for the page indexing below to work.
2746		 */
2747		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2748		absent = !pte || huge_pte_none(huge_ptep_get(pte));
2749
2750		/*
2751		 * When coredumping, it suits get_dump_page if we just return
2752		 * an error where there's an empty slot with no huge pagecache
2753		 * to back it.  This way, we avoid allocating a hugepage, and
2754		 * the sparse dumpfile avoids allocating disk blocks, but its
2755		 * huge holes still show up with zeroes where they need to be.
2756		 */
2757		if (absent && (flags & FOLL_DUMP) &&
2758		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2759			remainder = 0;
2760			break;
2761		}
2762
2763		if (absent ||
2764		    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2765			int ret;
2766
2767			spin_unlock(&mm->page_table_lock);
2768			ret = hugetlb_fault(mm, vma, vaddr,
2769				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2770			spin_lock(&mm->page_table_lock);
2771			if (!(ret & VM_FAULT_ERROR))
2772				continue;
2773
2774			remainder = 0;
2775			break;
2776		}
2777
2778		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2779		page = pte_page(huge_ptep_get(pte));
2780same_page:
2781		if (pages) {
2782			pages[i] = mem_map_offset(page, pfn_offset);
2783			get_page(pages[i]);
2784		}
2785
2786		if (vmas)
2787			vmas[i] = vma;
2788
2789		vaddr += PAGE_SIZE;
2790		++pfn_offset;
2791		--remainder;
2792		++i;
2793		if (vaddr < vma->vm_end && remainder &&
2794				pfn_offset < pages_per_huge_page(h)) {
2795			/*
2796			 * We use pfn_offset to avoid touching the pageframes
2797			 * of this compound page.
2798			 */
2799			goto same_page;
2800		}
2801	}
2802	spin_unlock(&mm->page_table_lock);
2803	*length = remainder;
2804	*position = vaddr;
2805
2806	return i ? i : -EFAULT;
2807}
2808
2809void hugetlb_change_protection(struct vm_area_struct *vma,
2810		unsigned long address, unsigned long end, pgprot_t newprot)
2811{
2812	struct mm_struct *mm = vma->vm_mm;
2813	unsigned long start = address;
2814	pte_t *ptep;
2815	pte_t pte;
2816	struct hstate *h = hstate_vma(vma);
2817
2818	BUG_ON(address >= end);
2819	flush_cache_range(vma, address, end);
2820
2821	mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2822	spin_lock(&mm->page_table_lock);
2823	for (; address < end; address += huge_page_size(h)) {
2824		ptep = huge_pte_offset(mm, address);
2825		if (!ptep)
2826			continue;
2827		if (huge_pmd_unshare(mm, &address, ptep))
2828			continue;
2829		if (!huge_pte_none(huge_ptep_get(ptep))) {
2830			pte = huge_ptep_get_and_clear(mm, address, ptep);
2831			pte = pte_mkhuge(pte_modify(pte, newprot));
2832			set_huge_pte_at(mm, address, ptep, pte);
2833		}
2834	}
2835	spin_unlock(&mm->page_table_lock);
2836	mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2837
2838	flush_tlb_range(vma, start, end);
2839}
2840
2841int hugetlb_reserve_pages(struct inode *inode,
2842					long from, long to,
2843					struct vm_area_struct *vma,
2844					vm_flags_t vm_flags)
2845{
2846	long ret, chg;
2847	struct hstate *h = hstate_inode(inode);
2848
2849	/*
2850	 * Only apply hugepage reservation if asked. At fault time, an
2851	 * attempt will be made for VM_NORESERVE to allocate a page
2852	 * and filesystem quota without using reserves
2853	 */
2854	if (vm_flags & VM_NORESERVE)
2855		return 0;
2856
2857	/*
2858	 * Shared mappings base their reservation on the number of pages that
2859	 * are already allocated on behalf of the file. Private mappings need
2860	 * to reserve the full area even if read-only as mprotect() may be
2861	 * called to make the mapping read-write. Assume !vma is a shm mapping
2862	 */
2863	if (!vma || vma->vm_flags & VM_MAYSHARE)
2864		chg = region_chg(&inode->i_mapping->private_list, from, to);
2865	else {
2866		struct resv_map *resv_map = resv_map_alloc();
2867		if (!resv_map)
2868			return -ENOMEM;
2869
2870		chg = to - from;
2871
2872		set_vma_resv_map(vma, resv_map);
2873		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2874	}
2875
2876	if (chg < 0)
2877		return chg;
2878
2879	/* There must be enough filesystem quota for the mapping */
2880	if (hugetlb_get_quota(inode->i_mapping, chg))
2881		return -ENOSPC;
2882
2883	/*
2884	 * Check enough hugepages are available for the reservation.
2885	 * Hand back the quota if there are not
2886	 */
2887	ret = hugetlb_acct_memory(h, chg);
2888	if (ret < 0) {
2889		hugetlb_put_quota(inode->i_mapping, chg);
2890		return ret;
2891	}
2892
2893	/*
2894	 * Account for the reservations made. Shared mappings record regions
2895	 * that have reservations as they are shared by multiple VMAs.
2896	 * When the last VMA disappears, the region map says how much
2897	 * the reservation was and the page cache tells how much of
2898	 * the reservation was consumed. Private mappings are per-VMA and
2899	 * only the consumed reservations are tracked. When the VMA
2900	 * disappears, the original reservation is the VMA size and the
2901	 * consumed reservations are stored in the map. Hence, nothing
2902	 * else has to be done for private mappings here
2903	 */
2904	if (!vma || vma->vm_flags & VM_MAYSHARE)
2905		region_add(&inode->i_mapping->private_list, from, to);
2906	return 0;
2907}
2908
2909void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2910{
2911	struct hstate *h = hstate_inode(inode);
2912	long chg = region_truncate(&inode->i_mapping->private_list, offset);
2913
2914	spin_lock(&inode->i_lock);
2915	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
2916	spin_unlock(&inode->i_lock);
2917
2918	hugetlb_put_quota(inode->i_mapping, (chg - freed));
2919	hugetlb_acct_memory(h, -(chg - freed));
2920}
2921
2922#ifdef CONFIG_MEMORY_FAILURE
2923
2924/* Should be called in hugetlb_lock */
2925static int is_hugepage_on_freelist(struct page *hpage)
2926{
2927	struct page *page;
2928	struct page *tmp;
2929	struct hstate *h = page_hstate(hpage);
2930	int nid = page_to_nid(hpage);
2931
2932	list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
2933		if (page == hpage)
2934			return 1;
2935	return 0;
2936}
2937
2938/*
2939 * This function is called from memory failure code.
2940 * Assume the caller holds page lock of the head page.
2941 */
2942int dequeue_hwpoisoned_huge_page(struct page *hpage)
2943{
2944	struct hstate *h = page_hstate(hpage);
2945	int nid = page_to_nid(hpage);
2946	int ret = -EBUSY;
2947
2948	spin_lock(&hugetlb_lock);
2949	if (is_hugepage_on_freelist(hpage)) {
2950		list_del(&hpage->lru);
2951		set_page_refcounted(hpage);
2952		h->free_huge_pages--;
2953		h->free_huge_pages_node[nid]--;
2954		ret = 0;
2955	}
2956	spin_unlock(&hugetlb_lock);
2957	return ret;
2958}
2959#endif
2960