1/*
2 * IA-32 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5 */
6
7#include <linux/init.h>
8#include <linux/fs.h>
9#include <linux/mm.h>
10#include <linux/hugetlb.h>
11#include <linux/pagemap.h>
12#include <linux/err.h>
13#include <linux/sysctl.h>
14#include <asm/mman.h>
15#include <asm/tlb.h>
16#include <asm/tlbflush.h>
17#include <asm/pgalloc.h>
18
19#if 0	/* This is just for testing */
20struct page *
21follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
22{
23	unsigned long start = address;
24	int length = 1;
25	int nr;
26	struct page *page;
27	struct vm_area_struct *vma;
28
29	vma = find_vma(mm, addr);
30	if (!vma || !is_vm_hugetlb_page(vma))
31		return ERR_PTR(-EINVAL);
32
33	pte = huge_pte_offset(mm, address);
34
35	/* hugetlb should be locked, and hence, prefaulted */
36	WARN_ON(!pte || pte_none(*pte));
37
38	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
39
40	WARN_ON(!PageHead(page));
41
42	return page;
43}
44
45int pmd_huge(pmd_t pmd)
46{
47	return 0;
48}
49
50int pud_huge(pud_t pud)
51{
52	return 0;
53}
54
55struct page *
56follow_huge_pmd(struct mm_struct *mm, unsigned long address,
57		pmd_t *pmd, int write)
58{
59	return NULL;
60}
61#else
62
63struct page *
64follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
65{
66	return ERR_PTR(-EINVAL);
67}
68
69int pmd_huge(pmd_t pmd)
70{
71	return !!(pmd_val(pmd) & _PAGE_PSE);
72}
73
74int pud_huge(pud_t pud)
75{
76	return !!(pud_val(pud) & _PAGE_PSE);
77}
78#endif
79
80#ifdef CONFIG_HUGETLB_PAGE
81static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
82		unsigned long addr, unsigned long len,
83		unsigned long pgoff, unsigned long flags)
84{
85	struct hstate *h = hstate_file(file);
86	struct vm_unmapped_area_info info;
87
88	info.flags = 0;
89	info.length = len;
90	info.low_limit = current->mm->mmap_legacy_base;
91	info.high_limit = TASK_SIZE;
92	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
93	info.align_offset = 0;
94	return vm_unmapped_area(&info);
95}
96
97static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
98		unsigned long addr0, unsigned long len,
99		unsigned long pgoff, unsigned long flags)
100{
101	struct hstate *h = hstate_file(file);
102	struct vm_unmapped_area_info info;
103	unsigned long addr;
104
105	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
106	info.length = len;
107	info.low_limit = PAGE_SIZE;
108	info.high_limit = current->mm->mmap_base;
109	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
110	info.align_offset = 0;
111	addr = vm_unmapped_area(&info);
112
113	/*
114	 * A failed mmap() very likely causes application failure,
115	 * so fall back to the bottom-up function here. This scenario
116	 * can happen with large stack limits and large mmap()
117	 * allocations.
118	 */
119	if (addr & ~PAGE_MASK) {
120		VM_BUG_ON(addr != -ENOMEM);
121		info.flags = 0;
122		info.low_limit = TASK_UNMAPPED_BASE;
123		info.high_limit = TASK_SIZE;
124		addr = vm_unmapped_area(&info);
125	}
126
127	return addr;
128}
129
130unsigned long
131hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
132		unsigned long len, unsigned long pgoff, unsigned long flags)
133{
134	struct hstate *h = hstate_file(file);
135	struct mm_struct *mm = current->mm;
136	struct vm_area_struct *vma;
137
138	if (len & ~huge_page_mask(h))
139		return -EINVAL;
140	if (len > TASK_SIZE)
141		return -ENOMEM;
142
143	if (flags & MAP_FIXED) {
144		if (prepare_hugepage_range(file, addr, len))
145			return -EINVAL;
146		return addr;
147	}
148
149	if (addr) {
150		addr = ALIGN(addr, huge_page_size(h));
151		vma = find_vma(mm, addr);
152		if (TASK_SIZE - len >= addr &&
153		    (!vma || addr + len <= vma->vm_start))
154			return addr;
155	}
156	if (mm->get_unmapped_area == arch_get_unmapped_area)
157		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
158				pgoff, flags);
159	else
160		return hugetlb_get_unmapped_area_topdown(file, addr, len,
161				pgoff, flags);
162}
163#endif /* CONFIG_HUGETLB_PAGE */
164
165#ifdef CONFIG_X86_64
166static __init int setup_hugepagesz(char *opt)
167{
168	unsigned long ps = memparse(opt, &opt);
169	if (ps == PMD_SIZE) {
170		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
171	} else if (ps == PUD_SIZE && cpu_has_gbpages) {
172		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
173	} else {
174		printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
175			ps >> 20);
176		return 0;
177	}
178	return 1;
179}
180__setup("hugepagesz=", setup_hugepagesz);
181#endif
182