block2mtd.c revision 0ffb74ccc06a112042adfaf8229684b78202bcae
1/*
2 * $Id: block2mtd.c,v 1.30 2005/11/29 14:48:32 gleixner Exp $
3 *
4 * block2mtd.c - create an mtd from a block device
5 *
6 * Copyright (C) 2001,2002	Simon Evans <spse@secret.org.uk>
7 * Copyright (C) 2004-2006	Jörn Engel <joern@wh.fh-wedel.de>
8 *
9 * Licence: GPL
10 */
11#include <linux/module.h>
12#include <linux/fs.h>
13#include <linux/blkdev.h>
14#include <linux/bio.h>
15#include <linux/pagemap.h>
16#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/mtd/mtd.h>
19#include <linux/buffer_head.h>
20#include <linux/mutex.h>
21#include <linux/mount.h>
22
23#define VERSION "$Revision: 1.30 $"
24
25
26#define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
27#define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args)
28
29
30/* Info for the block device */
31struct block2mtd_dev {
32	struct list_head list;
33	struct block_device *blkdev;
34	struct mtd_info mtd;
35	struct mutex write_mutex;
36};
37
38
39/* Static info about the MTD, used in cleanup_module */
40static LIST_HEAD(blkmtd_device_list);
41
42
43#define PAGE_READAHEAD 64
44static void cache_readahead(struct address_space *mapping, int index)
45{
46	filler_t *filler = (filler_t*)mapping->a_ops->readpage;
47	int i, pagei;
48	unsigned ret = 0;
49	unsigned long end_index;
50	struct page *page;
51	LIST_HEAD(page_pool);
52	struct inode *inode = mapping->host;
53	loff_t isize = i_size_read(inode);
54
55	if (!isize) {
56		INFO("iSize=0 in cache_readahead\n");
57		return;
58	}
59
60	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
61
62	read_lock_irq(&mapping->tree_lock);
63	for (i = 0; i < PAGE_READAHEAD; i++) {
64		pagei = index + i;
65		if (pagei > end_index) {
66			INFO("Overrun end of disk in cache readahead\n");
67			break;
68		}
69		page = radix_tree_lookup(&mapping->page_tree, pagei);
70		if (page && (!i))
71			break;
72		if (page)
73			continue;
74		read_unlock_irq(&mapping->tree_lock);
75		page = page_cache_alloc_cold(mapping);
76		read_lock_irq(&mapping->tree_lock);
77		if (!page)
78			break;
79		page->index = pagei;
80		list_add(&page->lru, &page_pool);
81		ret++;
82	}
83	read_unlock_irq(&mapping->tree_lock);
84	if (ret)
85		read_cache_pages(mapping, &page_pool, filler, NULL);
86}
87
88
89static struct page* page_readahead(struct address_space *mapping, int index)
90{
91	filler_t *filler = (filler_t*)mapping->a_ops->readpage;
92	cache_readahead(mapping, index);
93	return read_cache_page(mapping, index, filler, NULL);
94}
95
96
97/* erase a specified part of the device */
98static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
99{
100	struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
101	struct page *page;
102	int index = to >> PAGE_SHIFT;	// page index
103	int pages = len >> PAGE_SHIFT;
104	u_long *p;
105	u_long *max;
106
107	while (pages) {
108		page = page_readahead(mapping, index);
109		if (!page)
110			return -ENOMEM;
111		if (IS_ERR(page))
112			return PTR_ERR(page);
113
114		max = page_address(page) + PAGE_SIZE;
115		for (p=page_address(page); p<max; p++)
116			if (*p != -1UL) {
117				lock_page(page);
118				memset(page_address(page), 0xff, PAGE_SIZE);
119				set_page_dirty(page);
120				unlock_page(page);
121				break;
122			}
123
124		page_cache_release(page);
125		pages--;
126		index++;
127	}
128	return 0;
129}
130static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
131{
132	struct block2mtd_dev *dev = mtd->priv;
133	size_t from = instr->addr;
134	size_t len = instr->len;
135	int err;
136
137	instr->state = MTD_ERASING;
138	mutex_lock(&dev->write_mutex);
139	err = _block2mtd_erase(dev, from, len);
140	mutex_unlock(&dev->write_mutex);
141	if (err) {
142		ERROR("erase failed err = %d", err);
143		instr->state = MTD_ERASE_FAILED;
144	} else
145		instr->state = MTD_ERASE_DONE;
146
147	instr->state = MTD_ERASE_DONE;
148	mtd_erase_callback(instr);
149	return err;
150}
151
152
153static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
154		size_t *retlen, u_char *buf)
155{
156	struct block2mtd_dev *dev = mtd->priv;
157	struct page *page;
158	int index = from >> PAGE_SHIFT;
159	int offset = from & (PAGE_SIZE-1);
160	int cpylen;
161
162	if (from > mtd->size)
163		return -EINVAL;
164	if (from + len > mtd->size)
165		len = mtd->size - from;
166
167	if (retlen)
168		*retlen = 0;
169
170	while (len) {
171		if ((offset + len) > PAGE_SIZE)
172			cpylen = PAGE_SIZE - offset;	// multiple pages
173		else
174			cpylen = len;	// this page
175		len = len - cpylen;
176
177		//      Get page
178		page = page_readahead(dev->blkdev->bd_inode->i_mapping, index);
179		if (!page)
180			return -ENOMEM;
181		if (IS_ERR(page))
182			return PTR_ERR(page);
183
184		memcpy(buf, page_address(page) + offset, cpylen);
185		page_cache_release(page);
186
187		if (retlen)
188			*retlen += cpylen;
189		buf += cpylen;
190		offset = 0;
191		index++;
192	}
193	return 0;
194}
195
196
197/* write data to the underlying device */
198static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
199		loff_t to, size_t len, size_t *retlen)
200{
201	struct page *page;
202	struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
203	int index = to >> PAGE_SHIFT;	// page index
204	int offset = to & ~PAGE_MASK;	// page offset
205	int cpylen;
206
207	if (retlen)
208		*retlen = 0;
209	while (len) {
210		if ((offset+len) > PAGE_SIZE)
211			cpylen = PAGE_SIZE - offset;	// multiple pages
212		else
213			cpylen = len;			// this page
214		len = len - cpylen;
215
216		//	Get page
217		page = page_readahead(mapping, index);
218		if (!page)
219			return -ENOMEM;
220		if (IS_ERR(page))
221			return PTR_ERR(page);
222
223		if (memcmp(page_address(page)+offset, buf, cpylen)) {
224			lock_page(page);
225			memcpy(page_address(page) + offset, buf, cpylen);
226			set_page_dirty(page);
227			unlock_page(page);
228		}
229		page_cache_release(page);
230
231		if (retlen)
232			*retlen += cpylen;
233
234		buf += cpylen;
235		offset = 0;
236		index++;
237	}
238	return 0;
239}
240
241
242static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
243		size_t *retlen, const u_char *buf)
244{
245	struct block2mtd_dev *dev = mtd->priv;
246	int err;
247
248	if (!len)
249		return 0;
250	if (to >= mtd->size)
251		return -ENOSPC;
252	if (to + len > mtd->size)
253		len = mtd->size - to;
254
255	mutex_lock(&dev->write_mutex);
256	err = _block2mtd_write(dev, buf, to, len, retlen);
257	mutex_unlock(&dev->write_mutex);
258	if (err > 0)
259		err = 0;
260	return err;
261}
262
263
264/* sync the device - wait until the write queue is empty */
265static void block2mtd_sync(struct mtd_info *mtd)
266{
267	struct block2mtd_dev *dev = mtd->priv;
268	sync_blockdev(dev->blkdev);
269	return;
270}
271
272
273static void block2mtd_free_device(struct block2mtd_dev *dev)
274{
275	if (!dev)
276		return;
277
278	kfree(dev->mtd.name);
279
280	if (dev->blkdev) {
281		invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
282					0, -1);
283		close_bdev_excl(dev->blkdev);
284	}
285
286	kfree(dev);
287}
288
289
290/* FIXME: ensure that mtd->size % erase_size == 0 */
291static struct block2mtd_dev *add_device(char *devname, int erase_size)
292{
293	struct block_device *bdev;
294	struct block2mtd_dev *dev;
295
296	if (!devname)
297		return NULL;
298
299	dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
300	if (!dev)
301		return NULL;
302
303	/* Get a handle on the device */
304	bdev = open_bdev_excl(devname, O_RDWR, NULL);
305#ifndef MODULE
306	if (IS_ERR(bdev)) {
307
308		/* We might not have rootfs mounted at this point. Try
309		   to resolve the device name by other means. */
310
311		dev_t dev = name_to_dev_t(devname);
312		if (dev != 0) {
313			bdev = open_by_devnum(dev, FMODE_WRITE | FMODE_READ);
314		}
315	}
316#endif
317
318	if (IS_ERR(bdev)) {
319		ERROR("error: cannot open device %s", devname);
320		goto devinit_err;
321	}
322	dev->blkdev = bdev;
323
324	if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
325		ERROR("attempting to use an MTD device as a block device");
326		goto devinit_err;
327	}
328
329	mutex_init(&dev->write_mutex);
330
331	/* Setup the MTD structure */
332	/* make the name contain the block device in */
333	dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname),
334			GFP_KERNEL);
335	if (!dev->mtd.name)
336		goto devinit_err;
337
338	sprintf(dev->mtd.name, "block2mtd: %s", devname);
339
340	dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
341	dev->mtd.erasesize = erase_size;
342	dev->mtd.writesize = 1;
343	dev->mtd.type = MTD_RAM;
344	dev->mtd.flags = MTD_CAP_RAM;
345	dev->mtd.erase = block2mtd_erase;
346	dev->mtd.write = block2mtd_write;
347	dev->mtd.writev = default_mtd_writev;
348	dev->mtd.sync = block2mtd_sync;
349	dev->mtd.read = block2mtd_read;
350	dev->mtd.priv = dev;
351	dev->mtd.owner = THIS_MODULE;
352
353	if (add_mtd_device(&dev->mtd)) {
354		/* Device didnt get added, so free the entry */
355		goto devinit_err;
356	}
357	list_add(&dev->list, &blkmtd_device_list);
358	INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
359			dev->mtd.name + strlen("blkmtd: "),
360			dev->mtd.erasesize >> 10, dev->mtd.erasesize);
361	return dev;
362
363devinit_err:
364	block2mtd_free_device(dev);
365	return NULL;
366}
367
368
369/* This function works similar to reguler strtoul.  In addition, it
370 * allows some suffixes for a more human-readable number format:
371 * ki, Ki, kiB, KiB	- multiply result with 1024
372 * Mi, MiB		- multiply result with 1024^2
373 * Gi, GiB		- multiply result with 1024^3
374 */
375static int ustrtoul(const char *cp, char **endp, unsigned int base)
376{
377	unsigned long result = simple_strtoul(cp, endp, base);
378	switch (**endp) {
379	case 'G' :
380		result *= 1024;
381	case 'M':
382		result *= 1024;
383	case 'K':
384	case 'k':
385		result *= 1024;
386	/* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
387		if ((*endp)[1] == 'i') {
388			if ((*endp)[2] == 'B')
389				(*endp) += 3;
390			else
391				(*endp) += 2;
392		}
393	}
394	return result;
395}
396
397
398static int parse_num(size_t *num, const char *token)
399{
400	char *endp;
401	size_t n;
402
403	n = (size_t) ustrtoul(token, &endp, 0);
404	if (*endp)
405		return -EINVAL;
406
407	*num = n;
408	return 0;
409}
410
411
412static inline void kill_final_newline(char *str)
413{
414	char *newline = strrchr(str, '\n');
415	if (newline && !newline[1])
416		*newline = 0;
417}
418
419
420#define parse_err(fmt, args...) do {		\
421	ERROR("block2mtd: " fmt "\n", ## args);	\
422	return 0;				\
423} while (0)
424
425#ifndef MODULE
426static int block2mtd_init_called = 0;
427static __initdata char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
428#endif
429
430
431static int block2mtd_setup2(const char *val)
432{
433	char buf[80 + 12]; /* 80 for device, 12 for erase size */
434	char *str = buf;
435	char *token[2];
436	char *name;
437	size_t erase_size = PAGE_SIZE;
438	int i, ret;
439
440	if (strnlen(val, sizeof(buf)) >= sizeof(buf))
441		parse_err("parameter too long");
442
443	strcpy(str, val);
444	kill_final_newline(str);
445
446	for (i = 0; i < 2; i++)
447		token[i] = strsep(&str, ",");
448
449	if (str)
450		parse_err("too many arguments");
451
452	if (!token[0])
453		parse_err("no argument");
454
455	name = token[0];
456	if (strlen(name) + 1 > 80)
457		parse_err("device name too long");
458
459	if (token[1]) {
460		ret = parse_num(&erase_size, token[1]);
461		if (ret) {
462			kfree(name);
463			parse_err("illegal erase size");
464		}
465	}
466
467	add_device(name, erase_size);
468
469	return 0;
470}
471
472
473static int block2mtd_setup(const char *val, struct kernel_param *kp)
474{
475#ifdef MODULE
476	return block2mtd_setup2(val);
477#else
478	/* If more parameters are later passed in via
479	   /sys/module/block2mtd/parameters/block2mtd
480	   and block2mtd_init() has already been called,
481	   we can parse the argument now. */
482
483	if (block2mtd_init_called)
484		return block2mtd_setup2(val);
485
486	/* During early boot stage, we only save the parameters
487	   here. We must parse them later: if the param passed
488	   from kernel boot command line, block2mtd_setup() is
489	   called so early that it is not possible to resolve
490	   the device (even kmalloc() fails). Deter that work to
491	   block2mtd_setup2(). */
492
493	strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
494
495	return 0;
496#endif
497}
498
499
500module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
501MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
502
503static int __init block2mtd_init(void)
504{
505	int ret = 0;
506	INFO("version " VERSION);
507
508#ifndef MODULE
509	if (strlen(block2mtd_paramline))
510		ret = block2mtd_setup2(block2mtd_paramline);
511	block2mtd_init_called = 1;
512#endif
513
514	return ret;
515}
516
517
518static void __devexit block2mtd_exit(void)
519{
520	struct list_head *pos, *next;
521
522	/* Remove the MTD devices */
523	list_for_each_safe(pos, next, &blkmtd_device_list) {
524		struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
525		block2mtd_sync(&dev->mtd);
526		del_mtd_device(&dev->mtd);
527		INFO("mtd%d: [%s] removed", dev->mtd.index,
528				dev->mtd.name + strlen("blkmtd: "));
529		list_del(&dev->list);
530		block2mtd_free_device(dev);
531	}
532}
533
534
535module_init(block2mtd_init);
536module_exit(block2mtd_exit);
537
538MODULE_LICENSE("GPL");
539MODULE_AUTHOR("Simon Evans <spse@secret.org.uk> and others");
540MODULE_DESCRIPTION("Emulate an MTD using a block device");
541