sq.c revision d7c30c682a278abe1a52db83f69efec1a9d8f8c2
1/*
2 * arch/sh/kernel/cpu/sh4/sq.c
3 *
4 * General management API for SH-4 integrated Store Queues
5 *
6 * Copyright (C) 2001 - 2006  Paul Mundt
7 * Copyright (C) 2001, 2002  M. R. Brown
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License.  See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/cpu.h>
15#include <linux/bitmap.h>
16#include <linux/sysdev.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/vmalloc.h>
21#include <linux/mm.h>
22#include <asm/io.h>
23#include <asm/page.h>
24#include <asm/cacheflush.h>
25#include <asm/cpu/sq.h>
26
27struct sq_mapping;
28
29struct sq_mapping {
30	const char *name;
31
32	unsigned long sq_addr;
33	unsigned long addr;
34	unsigned int size;
35
36	struct sq_mapping *next;
37};
38
39static struct sq_mapping *sq_mapping_list;
40static DEFINE_SPINLOCK(sq_mapping_lock);
41static kmem_cache_t *sq_cache;
42static unsigned long *sq_bitmap;
43
44#define store_queue_barrier()			\
45do {						\
46	(void)ctrl_inl(P4SEG_STORE_QUE);	\
47	ctrl_outl(0, P4SEG_STORE_QUE + 0);	\
48	ctrl_outl(0, P4SEG_STORE_QUE + 8);	\
49} while (0);
50
51/**
52 * sq_flush_range - Flush (prefetch) a specific SQ range
53 * @start: the store queue address to start flushing from
54 * @len: the length to flush
55 *
56 * Flushes the store queue cache from @start to @start + @len in a
57 * linear fashion.
58 */
59void sq_flush_range(unsigned long start, unsigned int len)
60{
61	volatile unsigned long *sq = (unsigned long *)start;
62
63	/* Flush the queues */
64	for (len >>= 5; len--; sq += 8)
65		prefetchw((void *)sq);
66
67	/* Wait for completion */
68	store_queue_barrier();
69}
70
71static inline void sq_mapping_list_add(struct sq_mapping *map)
72{
73	struct sq_mapping **p, *tmp;
74
75	spin_lock_irq(&sq_mapping_lock);
76
77	p = &sq_mapping_list;
78	while ((tmp = *p) != NULL)
79		p = &tmp->next;
80
81	map->next = tmp;
82	*p = map;
83
84	spin_unlock_irq(&sq_mapping_lock);
85}
86
87static inline void sq_mapping_list_del(struct sq_mapping *map)
88{
89	struct sq_mapping **p, *tmp;
90
91	spin_lock_irq(&sq_mapping_lock);
92
93	for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
94		if (tmp == map) {
95			*p = tmp->next;
96			break;
97		}
98
99	spin_unlock_irq(&sq_mapping_lock);
100}
101
102static int __sq_remap(struct sq_mapping *map, unsigned long flags)
103{
104#if defined(CONFIG_MMU)
105	struct vm_struct *vma;
106
107	vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
108	if (!vma)
109		return -ENOMEM;
110
111	vma->phys_addr = map->addr;
112
113	if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr,
114			     map->size, flags)) {
115		vunmap(vma->addr);
116		return -EAGAIN;
117	}
118#else
119	/*
120	 * Without an MMU (or with it turned off), this is much more
121	 * straightforward, as we can just load up each queue's QACR with
122	 * the physical address appropriately masked.
123	 */
124	ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
125	ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
126#endif
127
128	return 0;
129}
130
131/**
132 * sq_remap - Map a physical address through the Store Queues
133 * @phys: Physical address of mapping.
134 * @size: Length of mapping.
135 * @name: User invoking mapping.
136 * @flags: Protection flags.
137 *
138 * Remaps the physical address @phys through the next available store queue
139 * address of @size length. @name is logged at boot time as well as through
140 * the sysfs interface.
141 */
142unsigned long sq_remap(unsigned long phys, unsigned int size,
143		       const char *name, unsigned long flags)
144{
145	struct sq_mapping *map;
146	unsigned long end;
147	unsigned int psz;
148	int ret, page;
149
150	/* Don't allow wraparound or zero size */
151	end = phys + size - 1;
152	if (unlikely(!size || end < phys))
153		return -EINVAL;
154	/* Don't allow anyone to remap normal memory.. */
155	if (unlikely(phys < virt_to_phys(high_memory)))
156		return -EINVAL;
157
158	phys &= PAGE_MASK;
159	size = PAGE_ALIGN(end + 1) - phys;
160
161	map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
162	if (unlikely(!map))
163		return -ENOMEM;
164
165	map->addr = phys;
166	map->size = size;
167	map->name = name;
168
169	page = bitmap_find_free_region(sq_bitmap, 0x04000000,
170				       get_order(map->size));
171	if (unlikely(page < 0)) {
172		ret = -ENOSPC;
173		goto out;
174	}
175
176	map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
177
178	ret = __sq_remap(map, flags);
179	if (unlikely(ret != 0))
180		goto out;
181
182	psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
183	pr_info("sqremap: %15s  [%4d page%s]  va 0x%08lx   pa 0x%08lx\n",
184		likely(map->name) ? map->name : "???",
185		psz, psz == 1 ? " " : "s",
186		map->sq_addr, map->addr);
187
188	sq_mapping_list_add(map);
189
190	return map->sq_addr;
191
192out:
193	kmem_cache_free(sq_cache, map);
194	return ret;
195}
196
197/**
198 * sq_unmap - Unmap a Store Queue allocation
199 * @map: Pre-allocated Store Queue mapping.
200 *
201 * Unmaps the store queue allocation @map that was previously created by
202 * sq_remap(). Also frees up the pte that was previously inserted into
203 * the kernel page table and discards the UTLB translation.
204 */
205void sq_unmap(unsigned long vaddr)
206{
207	struct sq_mapping **p, *map;
208	struct vm_struct *vma;
209	int page;
210
211	for (p = &sq_mapping_list; (map = *p); p = &map->next)
212		if (map->sq_addr == vaddr)
213			break;
214
215	if (unlikely(!map)) {
216		printk("%s: bad store queue address 0x%08lx\n",
217		       __FUNCTION__, vaddr);
218		return;
219	}
220
221	page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
222	bitmap_release_region(sq_bitmap, page, get_order(map->size));
223
224#ifdef CONFIG_MMU
225	vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
226	if (!vma) {
227		printk(KERN_ERR "%s: bad address 0x%08lx\n",
228		       __FUNCTION__, map->sq_addr);
229		return;
230	}
231#endif
232
233	sq_mapping_list_del(map);
234
235	kmem_cache_free(sq_cache, map);
236}
237
238/*
239 * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
240 * there is any other easy way to add things on a per-cpu basis without
241 * putting the directory entries somewhere stupid and having to create
242 * links in sysfs by hand back in to the per-cpu directories.
243 *
244 * Some day we may want to have an additional abstraction per store
245 * queue, but considering the kobject hell we already have to deal with,
246 * it's simply not worth the trouble.
247 */
248static struct kobject *sq_kobject[NR_CPUS];
249
250struct sq_sysfs_attr {
251	struct attribute attr;
252	ssize_t (*show)(char *buf);
253	ssize_t (*store)(const char *buf, size_t count);
254};
255
256#define to_sq_sysfs_attr(attr)	container_of(attr, struct sq_sysfs_attr, attr)
257
258static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr,
259			     char *buf)
260{
261	struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
262
263	if (likely(sattr->show))
264		return sattr->show(buf);
265
266	return -EIO;
267}
268
269static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr,
270			      const char *buf, size_t count)
271{
272	struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
273
274	if (likely(sattr->store))
275		return sattr->store(buf, count);
276
277	return -EIO;
278}
279
280static ssize_t mapping_show(char *buf)
281{
282	struct sq_mapping **list, *entry;
283	char *p = buf;
284
285	for (list = &sq_mapping_list; (entry = *list); list = &entry->next)
286		p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
287			     entry->sq_addr, entry->sq_addr + entry->size,
288			     entry->addr, entry->name);
289
290	return p - buf;
291}
292
293static ssize_t mapping_store(const char *buf, size_t count)
294{
295	unsigned long base = 0, len = 0;
296
297	sscanf(buf, "%lx %lx", &base, &len);
298	if (!base)
299		return -EIO;
300
301	if (likely(len)) {
302		int ret = sq_remap(base, len, "Userspace",
303				   pgprot_val(PAGE_SHARED));
304		if (ret < 0)
305			return ret;
306	} else
307		sq_unmap(base);
308
309	return count;
310}
311
312static struct sq_sysfs_attr mapping_attr =
313	__ATTR(mapping, 0644, mapping_show, mapping_store);
314
315static struct attribute *sq_sysfs_attrs[] = {
316	&mapping_attr.attr,
317	NULL,
318};
319
320static struct sysfs_ops sq_sysfs_ops = {
321	.show	= sq_sysfs_show,
322	.store	= sq_sysfs_store,
323};
324
325static struct kobj_type ktype_percpu_entry = {
326	.sysfs_ops	= &sq_sysfs_ops,
327	.default_attrs	= sq_sysfs_attrs,
328};
329
330static int __devinit sq_sysdev_add(struct sys_device *sysdev)
331{
332	unsigned int cpu = sysdev->id;
333	struct kobject *kobj;
334
335	sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
336	if (unlikely(!sq_kobject[cpu]))
337		return -ENOMEM;
338
339	kobj = sq_kobject[cpu];
340	kobj->parent = &sysdev->kobj;
341	kobject_set_name(kobj, "%s", "sq");
342	kobj->ktype = &ktype_percpu_entry;
343
344	return kobject_register(kobj);
345}
346
347static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
348{
349	unsigned int cpu = sysdev->id;
350	struct kobject *kobj = sq_kobject[cpu];
351
352	kobject_unregister(kobj);
353	return 0;
354}
355
356static struct sysdev_driver sq_sysdev_driver = {
357	.add		= sq_sysdev_add,
358	.remove		= __devexit_p(sq_sysdev_remove),
359};
360
361static int __init sq_api_init(void)
362{
363	unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT;
364	unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG;
365	int ret = -ENOMEM;
366
367	printk(KERN_NOTICE "sq: Registering store queue API.\n");
368
369	sq_cache = kmem_cache_create("store_queue_cache",
370				sizeof(struct sq_mapping), 0, 0,
371				NULL, NULL);
372	if (unlikely(!sq_cache))
373		return ret;
374
375	sq_bitmap = kzalloc(size, GFP_KERNEL);
376	if (unlikely(!sq_bitmap))
377		goto out;
378
379	ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver);
380	if (unlikely(ret != 0))
381		goto out;
382
383	return 0;
384
385out:
386	kfree(sq_bitmap);
387	kmem_cache_destroy(sq_cache);
388
389	return ret;
390}
391
392static void __exit sq_api_exit(void)
393{
394	sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver);
395	kfree(sq_bitmap);
396	kmem_cache_destroy(sq_cache);
397}
398
399module_init(sq_api_init);
400module_exit(sq_api_exit);
401
402MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
403MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
404MODULE_LICENSE("GPL");
405
406EXPORT_SYMBOL(sq_remap);
407EXPORT_SYMBOL(sq_unmap);
408EXPORT_SYMBOL(sq_flush_range);
409