block2mtd.c revision cc71229ff345a32d1b3de370a257dac62986b187
1/*
2 * $Id: block2mtd.c,v 1.28 2005/03/19 22:40:44 gleixner Exp $
3 *
4 * block2mtd.c - create an mtd from a block device
5 *
6 * Copyright (C) 2001,2002	Simon Evans <spse@secret.org.uk>
7 * Copyright (C) 2004,2005	J�rn Engel <joern@wh.fh-wedel.de>
8 *
9 * Licence: GPL
10 */
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/fs.h>
14#include <linux/blkdev.h>
15#include <linux/bio.h>
16#include <linux/pagemap.h>
17#include <linux/list.h>
18#include <linux/init.h>
19#include <linux/mtd/mtd.h>
20#include <linux/buffer_head.h>
21
22#define VERSION "$Revision: 1.28 $"
23
24
25#define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
26#define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args)
27
28
29/* Info for the block device */
30struct block2mtd_dev {
31	struct list_head list;
32	struct block_device *blkdev;
33	struct mtd_info mtd;
34	struct semaphore write_mutex;
35};
36
37
38/* Static info about the MTD, used in cleanup_module */
39static LIST_HEAD(blkmtd_device_list);
40
41
42#define PAGE_READAHEAD 64
43void cache_readahead(struct address_space *mapping, int index)
44{
45	filler_t *filler = (filler_t*)mapping->a_ops->readpage;
46	int i, pagei;
47	unsigned ret = 0;
48	unsigned long end_index;
49	struct page *page;
50	LIST_HEAD(page_pool);
51	struct inode *inode = mapping->host;
52	loff_t isize = i_size_read(inode);
53
54	if (!isize) {
55		INFO("iSize=0 in cache_readahead\n");
56		return;
57	}
58
59	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
60
61	read_lock_irq(&mapping->tree_lock);
62	for (i = 0; i < PAGE_READAHEAD; i++) {
63		pagei = index + i;
64		if (pagei > end_index) {
65			INFO("Overrun end of disk in cache readahead\n");
66			break;
67		}
68		page = radix_tree_lookup(&mapping->page_tree, pagei);
69		if (page && (!i))
70			break;
71		if (page)
72			continue;
73		read_unlock_irq(&mapping->tree_lock);
74		page = page_cache_alloc_cold(mapping);
75		read_lock_irq(&mapping->tree_lock);
76		if (!page)
77			break;
78		page->index = pagei;
79		list_add(&page->lru, &page_pool);
80		ret++;
81	}
82	read_unlock_irq(&mapping->tree_lock);
83	if (ret)
84		read_cache_pages(mapping, &page_pool, filler, NULL);
85}
86
87
88static struct page* page_readahead(struct address_space *mapping, int index)
89{
90	filler_t *filler = (filler_t*)mapping->a_ops->readpage;
91	cache_readahead(mapping, index);
92	return read_cache_page(mapping, index, filler, NULL);
93}
94
95
96/* erase a specified part of the device */
97static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
98{
99	struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
100	struct page *page;
101	int index = to >> PAGE_SHIFT;	// page index
102	int pages = len >> PAGE_SHIFT;
103	u_long *p;
104	u_long *max;
105
106	while (pages) {
107		page = page_readahead(mapping, index);
108		if (!page)
109			return -ENOMEM;
110		if (IS_ERR(page))
111			return PTR_ERR(page);
112
113		max = (u_long*)page_address(page) + PAGE_SIZE;
114		for (p=(u_long*)page_address(page); p<max; p++)
115			if (*p != -1UL) {
116				lock_page(page);
117				memset(page_address(page), 0xff, PAGE_SIZE);
118				set_page_dirty(page);
119				unlock_page(page);
120				break;
121			}
122
123		page_cache_release(page);
124		pages--;
125		index++;
126	}
127	return 0;
128}
129static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
130{
131	struct block2mtd_dev *dev = mtd->priv;
132	size_t from = instr->addr;
133	size_t len = instr->len;
134	int err;
135
136	instr->state = MTD_ERASING;
137	down(&dev->write_mutex);
138	err = _block2mtd_erase(dev, from, len);
139	up(&dev->write_mutex);
140	if (err) {
141		ERROR("erase failed err = %d", err);
142		instr->state = MTD_ERASE_FAILED;
143	} else
144		instr->state = MTD_ERASE_DONE;
145
146	instr->state = MTD_ERASE_DONE;
147	mtd_erase_callback(instr);
148	return err;
149}
150
151
152static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
153		size_t *retlen, u_char *buf)
154{
155	struct block2mtd_dev *dev = mtd->priv;
156	struct page *page;
157	int index = from >> PAGE_SHIFT;
158	int offset = from & (PAGE_SIZE-1);
159	int cpylen;
160
161	if (from > mtd->size)
162		return -EINVAL;
163	if (from + len > mtd->size)
164		len = mtd->size - from;
165
166	if (retlen)
167		*retlen = 0;
168
169	while (len) {
170		if ((offset + len) > PAGE_SIZE)
171			cpylen = PAGE_SIZE - offset;	// multiple pages
172		else
173			cpylen = len;	// this page
174		len = len - cpylen;
175
176		//      Get page
177		page = page_readahead(dev->blkdev->bd_inode->i_mapping, index);
178		if (!page)
179			return -ENOMEM;
180		if (IS_ERR(page))
181			return PTR_ERR(page);
182
183		memcpy(buf, page_address(page) + offset, cpylen);
184		page_cache_release(page);
185
186		if (retlen)
187			*retlen += cpylen;
188		buf += cpylen;
189		offset = 0;
190		index++;
191	}
192	return 0;
193}
194
195
196/* write data to the underlying device */
197static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
198		loff_t to, size_t len, size_t *retlen)
199{
200	struct page *page;
201	struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
202	int index = to >> PAGE_SHIFT;	// page index
203	int offset = to & ~PAGE_MASK;	// page offset
204	int cpylen;
205
206	if (retlen)
207		*retlen = 0;
208	while (len) {
209		if ((offset+len) > PAGE_SIZE)
210			cpylen = PAGE_SIZE - offset;	// multiple pages
211		else
212			cpylen = len;			// this page
213		len = len - cpylen;
214
215		//	Get page
216		page = page_readahead(mapping, index);
217		if (!page)
218			return -ENOMEM;
219		if (IS_ERR(page))
220			return PTR_ERR(page);
221
222		if (memcmp(page_address(page)+offset, buf, cpylen)) {
223			lock_page(page);
224			memcpy(page_address(page) + offset, buf, cpylen);
225			set_page_dirty(page);
226			unlock_page(page);
227		}
228		page_cache_release(page);
229
230		if (retlen)
231			*retlen += cpylen;
232
233		buf += cpylen;
234		offset = 0;
235		index++;
236	}
237	return 0;
238}
239static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
240		size_t *retlen, const u_char *buf)
241{
242	struct block2mtd_dev *dev = mtd->priv;
243	int err;
244
245	if (!len)
246		return 0;
247	if (to >= mtd->size)
248		return -ENOSPC;
249	if (to + len > mtd->size)
250		len = mtd->size - to;
251
252	down(&dev->write_mutex);
253	err = _block2mtd_write(dev, buf, to, len, retlen);
254	up(&dev->write_mutex);
255	if (err > 0)
256		err = 0;
257	return err;
258}
259
260
261/* sync the device - wait until the write queue is empty */
262static void block2mtd_sync(struct mtd_info *mtd)
263{
264	struct block2mtd_dev *dev = mtd->priv;
265	sync_blockdev(dev->blkdev);
266	return;
267}
268
269
270static void block2mtd_free_device(struct block2mtd_dev *dev)
271{
272	if (!dev)
273		return;
274
275	kfree(dev->mtd.name);
276
277	if (dev->blkdev) {
278		invalidate_inode_pages(dev->blkdev->bd_inode->i_mapping);
279		close_bdev_excl(dev->blkdev);
280	}
281
282	kfree(dev);
283}
284
285
286/* FIXME: ensure that mtd->size % erase_size == 0 */
287static struct block2mtd_dev *add_device(char *devname, int erase_size)
288{
289	struct block_device *bdev;
290	struct block2mtd_dev *dev;
291
292	if (!devname)
293		return NULL;
294
295	dev = kmalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
296	if (!dev)
297		return NULL;
298	memset(dev, 0, sizeof(*dev));
299
300	/* Get a handle on the device */
301	bdev = open_bdev_excl(devname, O_RDWR, NULL);
302	if (IS_ERR(bdev)) {
303		ERROR("error: cannot open device %s", devname);
304		goto devinit_err;
305	}
306	dev->blkdev = bdev;
307
308	if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
309		ERROR("attempting to use an MTD device as a block device");
310		goto devinit_err;
311	}
312
313	init_MUTEX(&dev->write_mutex);
314
315	/* Setup the MTD structure */
316	/* make the name contain the block device in */
317	dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname),
318			GFP_KERNEL);
319	if (!dev->mtd.name)
320		goto devinit_err;
321
322	sprintf(dev->mtd.name, "block2mtd: %s", devname);
323
324	dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
325	dev->mtd.erasesize = erase_size;
326	dev->mtd.type = MTD_RAM;
327	dev->mtd.flags = MTD_CAP_RAM;
328	dev->mtd.erase = block2mtd_erase;
329	dev->mtd.write = block2mtd_write;
330	dev->mtd.writev = default_mtd_writev;
331	dev->mtd.sync = block2mtd_sync;
332	dev->mtd.read = block2mtd_read;
333	dev->mtd.readv = default_mtd_readv;
334	dev->mtd.priv = dev;
335	dev->mtd.owner = THIS_MODULE;
336
337	if (add_mtd_device(&dev->mtd)) {
338		/* Device didnt get added, so free the entry */
339		goto devinit_err;
340	}
341	list_add(&dev->list, &blkmtd_device_list);
342	INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
343			dev->mtd.name + strlen("blkmtd: "),
344			dev->mtd.erasesize >> 10, dev->mtd.erasesize);
345	return dev;
346
347devinit_err:
348	block2mtd_free_device(dev);
349	return NULL;
350}
351
352
353static int ustrtoul(const char *cp, char **endp, unsigned int base)
354{
355	unsigned long result = simple_strtoul(cp, endp, base);
356	switch (**endp) {
357	case 'G' :
358		result *= 1024;
359	case 'M':
360		result *= 1024;
361	case 'k':
362		result *= 1024;
363	/* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
364		if ((*endp)[1] == 'i')
365			(*endp) += 2;
366	}
367	return result;
368}
369
370
371static int parse_num(size_t *num, const char *token)
372{
373	char *endp;
374	size_t n;
375
376	n = (size_t) ustrtoul(token, &endp, 0);
377	if (*endp)
378		return -EINVAL;
379
380	*num = n;
381	return 0;
382}
383
384
385static int parse_name(char **pname, const char *token, size_t limit)
386{
387	size_t len;
388	char *name;
389
390	len = strlen(token) + 1;
391	if (len > limit)
392		return -ENOSPC;
393
394	name = kmalloc(len, GFP_KERNEL);
395	if (!name)
396		return -ENOMEM;
397
398	strcpy(name, token);
399
400	*pname = name;
401	return 0;
402}
403
404
405static inline void kill_final_newline(char *str)
406{
407	char *newline = strrchr(str, '\n');
408	if (newline && !newline[1])
409		*newline = 0;
410}
411
412
413#define parse_err(fmt, args...) do {		\
414	ERROR("block2mtd: " fmt "\n", ## args);	\
415	return 0;				\
416} while (0)
417
418static int block2mtd_setup(const char *val, struct kernel_param *kp)
419{
420	char buf[80+12], *str=buf; /* 80 for device, 12 for erase size */
421	char *token[2];
422	char *name;
423	size_t erase_size = PAGE_SIZE;
424	int i, ret;
425
426	if (strnlen(val, sizeof(buf)) >= sizeof(buf))
427		parse_err("parameter too long");
428
429	strcpy(str, val);
430	kill_final_newline(str);
431
432	for (i=0; i<2; i++)
433		token[i] = strsep(&str, ",");
434
435	if (str)
436		parse_err("too many arguments");
437
438	if (!token[0])
439		parse_err("no argument");
440
441	ret = parse_name(&name, token[0], 80);
442	if (ret == -ENOMEM)
443		parse_err("out of memory");
444	if (ret == -ENOSPC)
445		parse_err("name too long");
446	if (ret)
447		return 0;
448
449	if (token[1]) {
450		ret = parse_num(&erase_size, token[1]);
451		if (ret)
452			parse_err("illegal erase size");
453	}
454
455	add_device(name, erase_size);
456
457	return 0;
458}
459
460
461module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
462MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
463
464static int __init block2mtd_init(void)
465{
466	INFO("version " VERSION);
467	return 0;
468}
469
470
471static void __devexit block2mtd_exit(void)
472{
473	struct list_head *pos, *next;
474
475	/* Remove the MTD devices */
476	list_for_each_safe(pos, next, &blkmtd_device_list) {
477		struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
478		block2mtd_sync(&dev->mtd);
479		del_mtd_device(&dev->mtd);
480		INFO("mtd%d: [%s] removed", dev->mtd.index,
481				dev->mtd.name + strlen("blkmtd: "));
482		list_del(&dev->list);
483		block2mtd_free_device(dev);
484	}
485}
486
487
488module_init(block2mtd_init);
489module_exit(block2mtd_exit);
490
491MODULE_LICENSE("GPL");
492MODULE_AUTHOR("Simon Evans <spse@secret.org.uk> and others");
493MODULE_DESCRIPTION("Emulate an MTD using a block device");
494