1/*
2 * Provide common bits of early_ioremap() support for architectures needing
3 * temporary mappings during boot before ioremap() is available.
4 *
5 * This is mostly a direct copy of the x86 early_ioremap implementation.
6 *
7 * (C) Copyright 1995 1996, 2014 Linus Torvalds
8 *
9 */
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/mm.h>
16#include <linux/vmalloc.h>
17#include <asm/fixmap.h>
18
19#ifdef CONFIG_MMU
20static int early_ioremap_debug __initdata;
21
22static int __init early_ioremap_debug_setup(char *str)
23{
24	early_ioremap_debug = 1;
25
26	return 0;
27}
28early_param("early_ioremap_debug", early_ioremap_debug_setup);
29
30static int after_paging_init __initdata;
31
32void __init __weak early_ioremap_shutdown(void)
33{
34}
35
36void __init early_ioremap_reset(void)
37{
38	early_ioremap_shutdown();
39	after_paging_init = 1;
40}
41
42/*
43 * Generally, ioremap() is available after paging_init() has been called.
44 * Architectures wanting to allow early_ioremap after paging_init() can
45 * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
46 */
47#ifndef __late_set_fixmap
48static inline void __init __late_set_fixmap(enum fixed_addresses idx,
49					    phys_addr_t phys, pgprot_t prot)
50{
51	BUG();
52}
53#endif
54
55#ifndef __late_clear_fixmap
56static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
57{
58	BUG();
59}
60#endif
61
62static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
63static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
64static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
65
66void __init early_ioremap_setup(void)
67{
68	int i;
69
70	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
71		if (WARN_ON(prev_map[i]))
72			break;
73
74	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
75		slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
76}
77
78static int __init check_early_ioremap_leak(void)
79{
80	int count = 0;
81	int i;
82
83	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
84		if (prev_map[i])
85			count++;
86
87	if (WARN(count, KERN_WARNING
88		 "Debug warning: early ioremap leak of %d areas detected.\n"
89		 "please boot with early_ioremap_debug and report the dmesg.\n",
90		 count))
91		return 1;
92	return 0;
93}
94late_initcall(check_early_ioremap_leak);
95
96static void __init __iomem *
97__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
98{
99	unsigned long offset;
100	resource_size_t last_addr;
101	unsigned int nrpages;
102	enum fixed_addresses idx;
103	int i, slot;
104
105	WARN_ON(system_state != SYSTEM_BOOTING);
106
107	slot = -1;
108	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
109		if (!prev_map[i]) {
110			slot = i;
111			break;
112		}
113	}
114
115	if (WARN(slot < 0, "%s(%08llx, %08lx) not found slot\n",
116		 __func__, (u64)phys_addr, size))
117		return NULL;
118
119	/* Don't allow wraparound or zero size */
120	last_addr = phys_addr + size - 1;
121	if (WARN_ON(!size || last_addr < phys_addr))
122		return NULL;
123
124	prev_size[slot] = size;
125	/*
126	 * Mappings have to be page-aligned
127	 */
128	offset = phys_addr & ~PAGE_MASK;
129	phys_addr &= PAGE_MASK;
130	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
131
132	/*
133	 * Mappings have to fit in the FIX_BTMAP area.
134	 */
135	nrpages = size >> PAGE_SHIFT;
136	if (WARN_ON(nrpages > NR_FIX_BTMAPS))
137		return NULL;
138
139	/*
140	 * Ok, go for it..
141	 */
142	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
143	while (nrpages > 0) {
144		if (after_paging_init)
145			__late_set_fixmap(idx, phys_addr, prot);
146		else
147			__early_set_fixmap(idx, phys_addr, prot);
148		phys_addr += PAGE_SIZE;
149		--idx;
150		--nrpages;
151	}
152	WARN(early_ioremap_debug, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n",
153	     __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]);
154
155	prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
156	return prev_map[slot];
157}
158
159void __init early_iounmap(void __iomem *addr, unsigned long size)
160{
161	unsigned long virt_addr;
162	unsigned long offset;
163	unsigned int nrpages;
164	enum fixed_addresses idx;
165	int i, slot;
166
167	slot = -1;
168	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
169		if (prev_map[i] == addr) {
170			slot = i;
171			break;
172		}
173	}
174
175	if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n",
176		 addr, size))
177		return;
178
179	if (WARN(prev_size[slot] != size,
180		 "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
181		 addr, size, slot, prev_size[slot]))
182		return;
183
184	WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n",
185	     addr, size, slot);
186
187	virt_addr = (unsigned long)addr;
188	if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
189		return;
190
191	offset = virt_addr & ~PAGE_MASK;
192	nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
193
194	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
195	while (nrpages > 0) {
196		if (after_paging_init)
197			__late_clear_fixmap(idx);
198		else
199			__early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
200		--idx;
201		--nrpages;
202	}
203	prev_map[slot] = NULL;
204}
205
206/* Remap an IO device */
207void __init __iomem *
208early_ioremap(resource_size_t phys_addr, unsigned long size)
209{
210	return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
211}
212
213/* Remap memory */
214void __init *
215early_memremap(resource_size_t phys_addr, unsigned long size)
216{
217	return (__force void *)__early_ioremap(phys_addr, size,
218					       FIXMAP_PAGE_NORMAL);
219}
220#else /* CONFIG_MMU */
221
222void __init __iomem *
223early_ioremap(resource_size_t phys_addr, unsigned long size)
224{
225	return (__force void __iomem *)phys_addr;
226}
227
228/* Remap memory */
229void __init *
230early_memremap(resource_size_t phys_addr, unsigned long size)
231{
232	return (void *)phys_addr;
233}
234
235void __init early_iounmap(void __iomem *addr, unsigned long size)
236{
237}
238
239#endif /* CONFIG_MMU */
240
241
242void __init early_memunmap(void *addr, unsigned long size)
243{
244	early_iounmap((__force void __iomem *)addr, size);
245}
246