1/*
2 *    Copyright IBM Corp. 2006
3 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/bootmem.h>
7#include <linux/pfn.h>
8#include <linux/mm.h>
9#include <linux/module.h>
10#include <linux/list.h>
11#include <linux/hugetlb.h>
12#include <linux/slab.h>
13#include <linux/memblock.h>
14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <asm/setup.h>
17#include <asm/tlbflush.h>
18#include <asm/sections.h>
19
20static DEFINE_MUTEX(vmem_mutex);
21
22struct memory_segment {
23	struct list_head list;
24	unsigned long start;
25	unsigned long size;
26};
27
28static LIST_HEAD(mem_segs);
29
30static void __ref *vmem_alloc_pages(unsigned int order)
31{
32	if (slab_is_available())
33		return (void *)__get_free_pages(GFP_KERNEL, order);
34	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
35}
36
37static inline pud_t *vmem_pud_alloc(void)
38{
39	pud_t *pud = NULL;
40
41#ifdef CONFIG_64BIT
42	pud = vmem_alloc_pages(2);
43	if (!pud)
44		return NULL;
45	clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
46#endif
47	return pud;
48}
49
50static inline pmd_t *vmem_pmd_alloc(void)
51{
52	pmd_t *pmd = NULL;
53
54#ifdef CONFIG_64BIT
55	pmd = vmem_alloc_pages(2);
56	if (!pmd)
57		return NULL;
58	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
59#endif
60	return pmd;
61}
62
63static pte_t __ref *vmem_pte_alloc(unsigned long address)
64{
65	pte_t *pte;
66
67	if (slab_is_available())
68		pte = (pte_t *) page_table_alloc(&init_mm);
69	else
70		pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
71					  PTRS_PER_PTE * sizeof(pte_t));
72	if (!pte)
73		return NULL;
74	clear_table((unsigned long *) pte, _PAGE_INVALID,
75		    PTRS_PER_PTE * sizeof(pte_t));
76	return pte;
77}
78
79/*
80 * Add a physical memory range to the 1:1 mapping.
81 */
82static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
83{
84	unsigned long end = start + size;
85	unsigned long address = start;
86	pgd_t *pg_dir;
87	pud_t *pu_dir;
88	pmd_t *pm_dir;
89	pte_t *pt_dir;
90	int ret = -ENOMEM;
91
92	while (address < end) {
93		pg_dir = pgd_offset_k(address);
94		if (pgd_none(*pg_dir)) {
95			pu_dir = vmem_pud_alloc();
96			if (!pu_dir)
97				goto out;
98			pgd_populate(&init_mm, pg_dir, pu_dir);
99		}
100		pu_dir = pud_offset(pg_dir, address);
101#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
102		if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
103		    !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
104			pud_val(*pu_dir) = __pa(address) |
105				_REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
106				(ro ? _REGION_ENTRY_PROTECT : 0);
107			address += PUD_SIZE;
108			continue;
109		}
110#endif
111		if (pud_none(*pu_dir)) {
112			pm_dir = vmem_pmd_alloc();
113			if (!pm_dir)
114				goto out;
115			pud_populate(&init_mm, pu_dir, pm_dir);
116		}
117		pm_dir = pmd_offset(pu_dir, address);
118#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
119		if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
120		    !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
121			pmd_val(*pm_dir) = __pa(address) |
122				_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
123				_SEGMENT_ENTRY_YOUNG |
124				(ro ? _SEGMENT_ENTRY_PROTECT : 0);
125			address += PMD_SIZE;
126			continue;
127		}
128#endif
129		if (pmd_none(*pm_dir)) {
130			pt_dir = vmem_pte_alloc(address);
131			if (!pt_dir)
132				goto out;
133			pmd_populate(&init_mm, pm_dir, pt_dir);
134		}
135
136		pt_dir = pte_offset_kernel(pm_dir, address);
137		pte_val(*pt_dir) = __pa(address) |
138			pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
139		address += PAGE_SIZE;
140	}
141	ret = 0;
142out:
143	return ret;
144}
145
146/*
147 * Remove a physical memory range from the 1:1 mapping.
148 * Currently only invalidates page table entries.
149 */
150static void vmem_remove_range(unsigned long start, unsigned long size)
151{
152	unsigned long end = start + size;
153	unsigned long address = start;
154	pgd_t *pg_dir;
155	pud_t *pu_dir;
156	pmd_t *pm_dir;
157	pte_t *pt_dir;
158	pte_t  pte;
159
160	pte_val(pte) = _PAGE_INVALID;
161	while (address < end) {
162		pg_dir = pgd_offset_k(address);
163		if (pgd_none(*pg_dir)) {
164			address += PGDIR_SIZE;
165			continue;
166		}
167		pu_dir = pud_offset(pg_dir, address);
168		if (pud_none(*pu_dir)) {
169			address += PUD_SIZE;
170			continue;
171		}
172		if (pud_large(*pu_dir)) {
173			pud_clear(pu_dir);
174			address += PUD_SIZE;
175			continue;
176		}
177		pm_dir = pmd_offset(pu_dir, address);
178		if (pmd_none(*pm_dir)) {
179			address += PMD_SIZE;
180			continue;
181		}
182		if (pmd_large(*pm_dir)) {
183			pmd_clear(pm_dir);
184			address += PMD_SIZE;
185			continue;
186		}
187		pt_dir = pte_offset_kernel(pm_dir, address);
188		*pt_dir = pte;
189		address += PAGE_SIZE;
190	}
191	flush_tlb_kernel_range(start, end);
192}
193
194/*
195 * Add a backed mem_map array to the virtual mem_map array.
196 */
197int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
198{
199	unsigned long address = start;
200	pgd_t *pg_dir;
201	pud_t *pu_dir;
202	pmd_t *pm_dir;
203	pte_t *pt_dir;
204	int ret = -ENOMEM;
205
206	for (address = start; address < end;) {
207		pg_dir = pgd_offset_k(address);
208		if (pgd_none(*pg_dir)) {
209			pu_dir = vmem_pud_alloc();
210			if (!pu_dir)
211				goto out;
212			pgd_populate(&init_mm, pg_dir, pu_dir);
213		}
214
215		pu_dir = pud_offset(pg_dir, address);
216		if (pud_none(*pu_dir)) {
217			pm_dir = vmem_pmd_alloc();
218			if (!pm_dir)
219				goto out;
220			pud_populate(&init_mm, pu_dir, pm_dir);
221		}
222
223		pm_dir = pmd_offset(pu_dir, address);
224		if (pmd_none(*pm_dir)) {
225#ifdef CONFIG_64BIT
226			/* Use 1MB frames for vmemmap if available. We always
227			 * use large frames even if they are only partially
228			 * used.
229			 * Otherwise we would have also page tables since
230			 * vmemmap_populate gets called for each section
231			 * separately. */
232			if (MACHINE_HAS_EDAT1) {
233				void *new_page;
234
235				new_page = vmemmap_alloc_block(PMD_SIZE, node);
236				if (!new_page)
237					goto out;
238				pmd_val(*pm_dir) = __pa(new_page) |
239					_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
240				address = (address + PMD_SIZE) & PMD_MASK;
241				continue;
242			}
243#endif
244			pt_dir = vmem_pte_alloc(address);
245			if (!pt_dir)
246				goto out;
247			pmd_populate(&init_mm, pm_dir, pt_dir);
248		} else if (pmd_large(*pm_dir)) {
249			address = (address + PMD_SIZE) & PMD_MASK;
250			continue;
251		}
252
253		pt_dir = pte_offset_kernel(pm_dir, address);
254		if (pte_none(*pt_dir)) {
255			void *new_page;
256
257			new_page = vmemmap_alloc_block(PAGE_SIZE, node);
258			if (!new_page)
259				goto out;
260			pte_val(*pt_dir) =
261				__pa(new_page) | pgprot_val(PAGE_KERNEL);
262		}
263		address += PAGE_SIZE;
264	}
265	ret = 0;
266out:
267	return ret;
268}
269
270void vmemmap_free(unsigned long start, unsigned long end)
271{
272}
273
274/*
275 * Add memory segment to the segment list if it doesn't overlap with
276 * an already present segment.
277 */
278static int insert_memory_segment(struct memory_segment *seg)
279{
280	struct memory_segment *tmp;
281
282	if (seg->start + seg->size > VMEM_MAX_PHYS ||
283	    seg->start + seg->size < seg->start)
284		return -ERANGE;
285
286	list_for_each_entry(tmp, &mem_segs, list) {
287		if (seg->start >= tmp->start + tmp->size)
288			continue;
289		if (seg->start + seg->size <= tmp->start)
290			continue;
291		return -ENOSPC;
292	}
293	list_add(&seg->list, &mem_segs);
294	return 0;
295}
296
297/*
298 * Remove memory segment from the segment list.
299 */
300static void remove_memory_segment(struct memory_segment *seg)
301{
302	list_del(&seg->list);
303}
304
305static void __remove_shared_memory(struct memory_segment *seg)
306{
307	remove_memory_segment(seg);
308	vmem_remove_range(seg->start, seg->size);
309}
310
311int vmem_remove_mapping(unsigned long start, unsigned long size)
312{
313	struct memory_segment *seg;
314	int ret;
315
316	mutex_lock(&vmem_mutex);
317
318	ret = -ENOENT;
319	list_for_each_entry(seg, &mem_segs, list) {
320		if (seg->start == start && seg->size == size)
321			break;
322	}
323
324	if (seg->start != start || seg->size != size)
325		goto out;
326
327	ret = 0;
328	__remove_shared_memory(seg);
329	kfree(seg);
330out:
331	mutex_unlock(&vmem_mutex);
332	return ret;
333}
334
335int vmem_add_mapping(unsigned long start, unsigned long size)
336{
337	struct memory_segment *seg;
338	int ret;
339
340	mutex_lock(&vmem_mutex);
341	ret = -ENOMEM;
342	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
343	if (!seg)
344		goto out;
345	seg->start = start;
346	seg->size = size;
347
348	ret = insert_memory_segment(seg);
349	if (ret)
350		goto out_free;
351
352	ret = vmem_add_mem(start, size, 0);
353	if (ret)
354		goto out_remove;
355	goto out;
356
357out_remove:
358	__remove_shared_memory(seg);
359out_free:
360	kfree(seg);
361out:
362	mutex_unlock(&vmem_mutex);
363	return ret;
364}
365
366/*
367 * map whole physical memory to virtual memory (identity mapping)
368 * we reserve enough space in the vmalloc area for vmemmap to hotplug
369 * additional memory segments.
370 */
371void __init vmem_map_init(void)
372{
373	unsigned long ro_start, ro_end;
374	struct memblock_region *reg;
375	phys_addr_t start, end;
376
377	ro_start = PFN_ALIGN((unsigned long)&_stext);
378	ro_end = (unsigned long)&_eshared & PAGE_MASK;
379	for_each_memblock(memory, reg) {
380		start = reg->base;
381		end = reg->base + reg->size - 1;
382		if (start >= ro_end || end <= ro_start)
383			vmem_add_mem(start, end - start, 0);
384		else if (start >= ro_start && end <= ro_end)
385			vmem_add_mem(start, end - start, 1);
386		else if (start >= ro_start) {
387			vmem_add_mem(start, ro_end - start, 1);
388			vmem_add_mem(ro_end, end - ro_end, 0);
389		} else if (end < ro_end) {
390			vmem_add_mem(start, ro_start - start, 0);
391			vmem_add_mem(ro_start, end - ro_start, 1);
392		} else {
393			vmem_add_mem(start, ro_start - start, 0);
394			vmem_add_mem(ro_start, ro_end - ro_start, 1);
395			vmem_add_mem(ro_end, end - ro_end, 0);
396		}
397	}
398}
399
400/*
401 * Convert memblock.memory  to a memory segment list so there is a single
402 * list that contains all memory segments.
403 */
404static int __init vmem_convert_memory_chunk(void)
405{
406	struct memblock_region *reg;
407	struct memory_segment *seg;
408
409	mutex_lock(&vmem_mutex);
410	for_each_memblock(memory, reg) {
411		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
412		if (!seg)
413			panic("Out of memory...\n");
414		seg->start = reg->base;
415		seg->size = reg->size;
416		insert_memory_segment(seg);
417	}
418	mutex_unlock(&vmem_mutex);
419	return 0;
420}
421
422core_initcall(vmem_convert_memory_chunk);
423