1/* vmu-flash.c
2 * Driver for SEGA Dreamcast Visual Memory Unit
3 *
4 * Copyright (c) Adrian McMenamin 2002 - 2009
5 * Copyright (c) Paul Mundt 2001
6 *
7 * Licensed under version 2 of the
8 * GNU General Public Licence
9 */
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/sched.h>
13#include <linux/delay.h>
14#include <linux/maple.h>
15#include <linux/mtd/mtd.h>
16#include <linux/mtd/map.h>
17
18struct vmu_cache {
19	unsigned char *buffer;		/* Cache */
20	unsigned int block;		/* Which block was cached */
21	unsigned long jiffies_atc;	/* When was it cached? */
22	int valid;
23};
24
25struct mdev_part {
26	struct maple_device *mdev;
27	int partition;
28};
29
30struct vmupart {
31	u16 user_blocks;
32	u16 root_block;
33	u16 numblocks;
34	char *name;
35	struct vmu_cache *pcache;
36};
37
38struct memcard {
39	u16 tempA;
40	u16 tempB;
41	u32 partitions;
42	u32 blocklen;
43	u32 writecnt;
44	u32 readcnt;
45	u32 removeable;
46	int partition;
47	int read;
48	unsigned char *blockread;
49	struct vmupart *parts;
50	struct mtd_info *mtd;
51};
52
53struct vmu_block {
54	unsigned int num; /* block number */
55	unsigned int ofs; /* block offset */
56};
57
58static struct vmu_block *ofs_to_block(unsigned long src_ofs,
59	struct mtd_info *mtd, int partition)
60{
61	struct vmu_block *vblock;
62	struct maple_device *mdev;
63	struct memcard *card;
64	struct mdev_part *mpart;
65	int num;
66
67	mpart = mtd->priv;
68	mdev = mpart->mdev;
69	card = maple_get_drvdata(mdev);
70
71	if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
72		goto failed;
73
74	num = src_ofs / card->blocklen;
75	if (num > card->parts[partition].numblocks)
76		goto failed;
77
78	vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
79	if (!vblock)
80		goto failed;
81
82	vblock->num = num;
83	vblock->ofs = src_ofs % card->blocklen;
84	return vblock;
85
86failed:
87	return NULL;
88}
89
90/* Maple bus callback function for reads */
91static void vmu_blockread(struct mapleq *mq)
92{
93	struct maple_device *mdev;
94	struct memcard *card;
95
96	mdev = mq->dev;
97	card = maple_get_drvdata(mdev);
98	/* copy the read in data */
99
100	if (unlikely(!card->blockread))
101		return;
102
103	memcpy(card->blockread, mq->recvbuf->buf + 12,
104		card->blocklen/card->readcnt);
105
106}
107
108/* Interface with maple bus to read blocks
109 * caching the results so that other parts
110 * of the driver can access block reads */
111static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
112	struct mtd_info *mtd)
113{
114	struct memcard *card;
115	struct mdev_part *mpart;
116	struct maple_device *mdev;
117	int partition, error = 0, x, wait;
118	unsigned char *blockread = NULL;
119	struct vmu_cache *pcache;
120	__be32 sendbuf;
121
122	mpart = mtd->priv;
123	mdev = mpart->mdev;
124	partition = mpart->partition;
125	card = maple_get_drvdata(mdev);
126	pcache = card->parts[partition].pcache;
127	pcache->valid = 0;
128
129	/* prepare the cache for this block */
130	if (!pcache->buffer) {
131		pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
132		if (!pcache->buffer) {
133			dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
134				" to lack of memory\n", mdev->port,
135				mdev->unit);
136			error = -ENOMEM;
137			goto outB;
138		}
139	}
140
141	/*
142	* Reads may be phased - again the hardware spec
143	* supports this - though may not be any devices in
144	* the wild that implement it, but we will here
145	*/
146	for (x = 0; x < card->readcnt; x++) {
147		sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
148
149		if (atomic_read(&mdev->busy) == 1) {
150			wait_event_interruptible_timeout(mdev->maple_wait,
151				atomic_read(&mdev->busy) == 0, HZ);
152			if (atomic_read(&mdev->busy) == 1) {
153				dev_notice(&mdev->dev, "VMU at (%d, %d)"
154					" is busy\n", mdev->port, mdev->unit);
155				error = -EAGAIN;
156				goto outB;
157			}
158		}
159
160		atomic_set(&mdev->busy, 1);
161		blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
162		if (!blockread) {
163			error = -ENOMEM;
164			atomic_set(&mdev->busy, 0);
165			goto outB;
166		}
167		card->blockread = blockread;
168
169		maple_getcond_callback(mdev, vmu_blockread, 0,
170			MAPLE_FUNC_MEMCARD);
171		error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
172				MAPLE_COMMAND_BREAD, 2, &sendbuf);
173		/* Very long timeouts seem to be needed when box is stressed */
174		wait = wait_event_interruptible_timeout(mdev->maple_wait,
175			(atomic_read(&mdev->busy) == 0 ||
176			atomic_read(&mdev->busy) == 2), HZ * 3);
177		/*
178		* MTD layer does not handle hotplugging well
179		* so have to return errors when VMU is unplugged
180		* in the middle of a read (busy == 2)
181		*/
182		if (error || atomic_read(&mdev->busy) == 2) {
183			if (atomic_read(&mdev->busy) == 2)
184				error = -ENXIO;
185			atomic_set(&mdev->busy, 0);
186			card->blockread = NULL;
187			goto outA;
188		}
189		if (wait == 0 || wait == -ERESTARTSYS) {
190			card->blockread = NULL;
191			atomic_set(&mdev->busy, 0);
192			error = -EIO;
193			list_del_init(&(mdev->mq->list));
194			kfree(mdev->mq->sendbuf);
195			mdev->mq->sendbuf = NULL;
196			if (wait == -ERESTARTSYS) {
197				dev_warn(&mdev->dev, "VMU read on (%d, %d)"
198					" interrupted on block 0x%X\n",
199					mdev->port, mdev->unit, num);
200			} else
201				dev_notice(&mdev->dev, "VMU read on (%d, %d)"
202					" timed out on block 0x%X\n",
203					mdev->port, mdev->unit, num);
204			goto outA;
205		}
206
207		memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
208			card->blocklen/card->readcnt);
209
210		memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
211			card->blockread, card->blocklen/card->readcnt);
212		card->blockread = NULL;
213		pcache->block = num;
214		pcache->jiffies_atc = jiffies;
215		pcache->valid = 1;
216		kfree(blockread);
217	}
218
219	return error;
220
221outA:
222	kfree(blockread);
223outB:
224	return error;
225}
226
227/* communicate with maple bus for phased writing */
228static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
229	struct mtd_info *mtd)
230{
231	struct memcard *card;
232	struct mdev_part *mpart;
233	struct maple_device *mdev;
234	int partition, error, locking, x, phaselen, wait;
235	__be32 *sendbuf;
236
237	mpart = mtd->priv;
238	mdev = mpart->mdev;
239	partition = mpart->partition;
240	card = maple_get_drvdata(mdev);
241
242	phaselen = card->blocklen/card->writecnt;
243
244	sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
245	if (!sendbuf) {
246		error = -ENOMEM;
247		goto fail_nosendbuf;
248	}
249	for (x = 0; x < card->writecnt; x++) {
250		sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
251		memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
252		/* wait until the device is not busy doing something else
253		* or 1 second - which ever is longer */
254		if (atomic_read(&mdev->busy) == 1) {
255			wait_event_interruptible_timeout(mdev->maple_wait,
256				atomic_read(&mdev->busy) == 0, HZ);
257			if (atomic_read(&mdev->busy) == 1) {
258				error = -EBUSY;
259				dev_notice(&mdev->dev, "VMU write at (%d, %d)"
260					"failed - device is busy\n",
261					mdev->port, mdev->unit);
262				goto fail_nolock;
263			}
264		}
265		atomic_set(&mdev->busy, 1);
266
267		locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
268			MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
269		wait = wait_event_interruptible_timeout(mdev->maple_wait,
270			atomic_read(&mdev->busy) == 0, HZ/10);
271		if (locking) {
272			error = -EIO;
273			atomic_set(&mdev->busy, 0);
274			goto fail_nolock;
275		}
276		if (atomic_read(&mdev->busy) == 2) {
277			atomic_set(&mdev->busy, 0);
278		} else if (wait == 0 || wait == -ERESTARTSYS) {
279			error = -EIO;
280			dev_warn(&mdev->dev, "Write at (%d, %d) of block"
281				" 0x%X at phase %d failed: could not"
282				" communicate with VMU", mdev->port,
283				mdev->unit, num, x);
284			atomic_set(&mdev->busy, 0);
285			kfree(mdev->mq->sendbuf);
286			mdev->mq->sendbuf = NULL;
287			list_del_init(&(mdev->mq->list));
288			goto fail_nolock;
289		}
290	}
291	kfree(sendbuf);
292
293	return card->blocklen;
294
295fail_nolock:
296	kfree(sendbuf);
297fail_nosendbuf:
298	dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
299		mdev->unit);
300	return error;
301}
302
303/* mtd function to simulate reading byte by byte */
304static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
305	struct mtd_info *mtd)
306{
307	struct vmu_block *vblock;
308	struct memcard *card;
309	struct mdev_part *mpart;
310	struct maple_device *mdev;
311	unsigned char *buf, ret;
312	int partition, error;
313
314	mpart = mtd->priv;
315	mdev = mpart->mdev;
316	partition = mpart->partition;
317	card = maple_get_drvdata(mdev);
318	*retval =  0;
319
320	buf = kmalloc(card->blocklen, GFP_KERNEL);
321	if (!buf) {
322		*retval = 1;
323		ret = -ENOMEM;
324		goto finish;
325	}
326
327	vblock = ofs_to_block(ofs, mtd, partition);
328	if (!vblock) {
329		*retval = 3;
330		ret = -ENOMEM;
331		goto out_buf;
332	}
333
334	error = maple_vmu_read_block(vblock->num, buf, mtd);
335	if (error) {
336		ret = error;
337		*retval = 2;
338		goto out_vblock;
339	}
340
341	ret = buf[vblock->ofs];
342
343out_vblock:
344	kfree(vblock);
345out_buf:
346	kfree(buf);
347finish:
348	return ret;
349}
350
351/* mtd higher order function to read flash */
352static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
353	size_t *retlen,  u_char *buf)
354{
355	struct maple_device *mdev;
356	struct memcard *card;
357	struct mdev_part *mpart;
358	struct vmu_cache *pcache;
359	struct vmu_block *vblock;
360	int index = 0, retval, partition, leftover, numblocks;
361	unsigned char cx;
362
363	if (len < 1)
364		return -EIO;
365
366	mpart = mtd->priv;
367	mdev = mpart->mdev;
368	partition = mpart->partition;
369	card = maple_get_drvdata(mdev);
370
371	numblocks = card->parts[partition].numblocks;
372	if (from + len > numblocks * card->blocklen)
373		len = numblocks * card->blocklen - from;
374	if (len == 0)
375		return -EIO;
376	/* Have we cached this bit already? */
377	pcache = card->parts[partition].pcache;
378	do {
379		vblock =  ofs_to_block(from + index, mtd, partition);
380		if (!vblock)
381			return -ENOMEM;
382		/* Have we cached this and is the cache valid and timely? */
383		if (pcache->valid &&
384			time_before(jiffies, pcache->jiffies_atc + HZ) &&
385			(pcache->block == vblock->num)) {
386			/* we have cached it, so do necessary copying */
387			leftover = card->blocklen - vblock->ofs;
388			if (vblock->ofs + len - index < card->blocklen) {
389				/* only a bit of this block to copy */
390				memcpy(buf + index,
391					pcache->buffer + vblock->ofs,
392					len - index);
393				index = len;
394			} else {
395				/* otherwise copy remainder of whole block */
396				memcpy(buf + index, pcache->buffer +
397					vblock->ofs, leftover);
398				index += leftover;
399			}
400		} else {
401			/*
402			* Not cached so read one byte -
403			* but cache the rest of the block
404			*/
405			cx = vmu_flash_read_char(from + index, &retval, mtd);
406			if (retval) {
407				*retlen = index;
408				kfree(vblock);
409				return cx;
410			}
411			memset(buf + index, cx, 1);
412			index++;
413		}
414		kfree(vblock);
415	} while (len > index);
416	*retlen = index;
417
418	return 0;
419}
420
421static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
422	size_t *retlen, const u_char *buf)
423{
424	struct maple_device *mdev;
425	struct memcard *card;
426	struct mdev_part *mpart;
427	int index = 0, partition, error = 0, numblocks;
428	struct vmu_cache *pcache;
429	struct vmu_block *vblock;
430	unsigned char *buffer;
431
432	mpart = mtd->priv;
433	mdev = mpart->mdev;
434	partition = mpart->partition;
435	card = maple_get_drvdata(mdev);
436
437	/* simple sanity checks */
438	if (len < 1) {
439		error = -EIO;
440		goto failed;
441	}
442	numblocks = card->parts[partition].numblocks;
443	if (to + len > numblocks * card->blocklen)
444		len = numblocks * card->blocklen - to;
445	if (len == 0) {
446		error = -EIO;
447		goto failed;
448	}
449
450	vblock = ofs_to_block(to, mtd, partition);
451	if (!vblock) {
452		error = -ENOMEM;
453		goto failed;
454	}
455
456	buffer = kmalloc(card->blocklen, GFP_KERNEL);
457	if (!buffer) {
458		error = -ENOMEM;
459		goto fail_buffer;
460	}
461
462	do {
463		/* Read in the block we are to write to */
464		error = maple_vmu_read_block(vblock->num, buffer, mtd);
465		if (error)
466			goto fail_io;
467
468		do {
469			buffer[vblock->ofs] = buf[index];
470			vblock->ofs++;
471			index++;
472			if (index >= len)
473				break;
474		} while (vblock->ofs < card->blocklen);
475
476		/* write out new buffer */
477		error = maple_vmu_write_block(vblock->num, buffer, mtd);
478		/* invalidate the cache */
479		pcache = card->parts[partition].pcache;
480		pcache->valid = 0;
481
482		if (error != card->blocklen)
483			goto fail_io;
484
485		vblock->num++;
486		vblock->ofs = 0;
487	} while (len > index);
488
489	kfree(buffer);
490	*retlen = index;
491	kfree(vblock);
492	return 0;
493
494fail_io:
495	kfree(buffer);
496fail_buffer:
497	kfree(vblock);
498failed:
499	dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
500	return error;
501}
502
503static void vmu_flash_sync(struct mtd_info *mtd)
504{
505	/* Do nothing here */
506}
507
508/* Maple bus callback function to recursively query hardware details */
509static void vmu_queryblocks(struct mapleq *mq)
510{
511	struct maple_device *mdev;
512	unsigned short *res;
513	struct memcard *card;
514	__be32 partnum;
515	struct vmu_cache *pcache;
516	struct mdev_part *mpart;
517	struct mtd_info *mtd_cur;
518	struct vmupart *part_cur;
519	int error;
520
521	mdev = mq->dev;
522	card = maple_get_drvdata(mdev);
523	res = (unsigned short *) (mq->recvbuf->buf);
524	card->tempA = res[12];
525	card->tempB = res[6];
526
527	dev_info(&mdev->dev, "VMU device at partition %d has %d user "
528		"blocks with a root block at %d\n", card->partition,
529		card->tempA, card->tempB);
530
531	part_cur = &card->parts[card->partition];
532	part_cur->user_blocks = card->tempA;
533	part_cur->root_block = card->tempB;
534	part_cur->numblocks = card->tempB + 1;
535	part_cur->name = kmalloc(12, GFP_KERNEL);
536	if (!part_cur->name)
537		goto fail_name;
538
539	sprintf(part_cur->name, "vmu%d.%d.%d",
540		mdev->port, mdev->unit, card->partition);
541	mtd_cur = &card->mtd[card->partition];
542	mtd_cur->name = part_cur->name;
543	mtd_cur->type = 8;
544	mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
545	mtd_cur->size = part_cur->numblocks * card->blocklen;
546	mtd_cur->erasesize = card->blocklen;
547	mtd_cur->write = vmu_flash_write;
548	mtd_cur->read = vmu_flash_read;
549	mtd_cur->sync = vmu_flash_sync;
550	mtd_cur->writesize = card->blocklen;
551
552	mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
553	if (!mpart)
554		goto fail_mpart;
555
556	mpart->mdev = mdev;
557	mpart->partition = card->partition;
558	mtd_cur->priv = mpart;
559	mtd_cur->owner = THIS_MODULE;
560
561	pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
562	if (!pcache)
563		goto fail_cache_create;
564	part_cur->pcache = pcache;
565
566	error = mtd_device_register(mtd_cur, NULL, 0);
567	if (error)
568		goto fail_mtd_register;
569
570	maple_getcond_callback(mdev, NULL, 0,
571		MAPLE_FUNC_MEMCARD);
572
573	/*
574	* Set up a recursive call to the (probably theoretical)
575	* second or more partition
576	*/
577	if (++card->partition < card->partitions) {
578		partnum = cpu_to_be32(card->partition << 24);
579		maple_getcond_callback(mdev, vmu_queryblocks, 0,
580			MAPLE_FUNC_MEMCARD);
581		maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
582			MAPLE_COMMAND_GETMINFO, 2, &partnum);
583	}
584	return;
585
586fail_mtd_register:
587	dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
588		"error is 0x%X\n", mdev->port, mdev->unit, error);
589	for (error = 0; error <= card->partition; error++) {
590		kfree(((card->parts)[error]).pcache);
591		((card->parts)[error]).pcache = NULL;
592	}
593fail_cache_create:
594fail_mpart:
595	for (error = 0; error <= card->partition; error++) {
596		kfree(((card->mtd)[error]).priv);
597		((card->mtd)[error]).priv = NULL;
598	}
599	maple_getcond_callback(mdev, NULL, 0,
600		MAPLE_FUNC_MEMCARD);
601	kfree(part_cur->name);
602fail_name:
603	return;
604}
605
606/* Handles very basic info about the flash, queries for details */
607static int __devinit vmu_connect(struct maple_device *mdev)
608{
609	unsigned long test_flash_data, basic_flash_data;
610	int c, error;
611	struct memcard *card;
612	u32 partnum = 0;
613
614	test_flash_data = be32_to_cpu(mdev->devinfo.function);
615	/* Need to count how many bits are set - to find out which
616	 * function_data element has details of the memory card
617	 */
618	c = hweight_long(test_flash_data);
619
620	basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
621
622	card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
623	if (!card) {
624		error = -ENOMEM;
625		goto fail_nomem;
626	}
627
628	card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
629	card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
630	card->writecnt = basic_flash_data >> 12 & 0xF;
631	card->readcnt = basic_flash_data >> 8 & 0xF;
632	card->removeable = basic_flash_data >> 7 & 1;
633
634	card->partition = 0;
635
636	/*
637	* Not sure there are actually any multi-partition devices in the
638	* real world, but the hardware supports them, so, so will we
639	*/
640	card->parts = kmalloc(sizeof(struct vmupart) * card->partitions,
641		GFP_KERNEL);
642	if (!card->parts) {
643		error = -ENOMEM;
644		goto fail_partitions;
645	}
646
647	card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions,
648		GFP_KERNEL);
649	if (!card->mtd) {
650		error = -ENOMEM;
651		goto fail_mtd_info;
652	}
653
654	maple_set_drvdata(mdev, card);
655
656	/*
657	* We want to trap meminfo not get cond
658	* so set interval to zero, but rely on maple bus
659	* driver to pass back the results of the meminfo
660	*/
661	maple_getcond_callback(mdev, vmu_queryblocks, 0,
662		MAPLE_FUNC_MEMCARD);
663
664	/* Make sure we are clear to go */
665	if (atomic_read(&mdev->busy) == 1) {
666		wait_event_interruptible_timeout(mdev->maple_wait,
667			atomic_read(&mdev->busy) == 0, HZ);
668		if (atomic_read(&mdev->busy) == 1) {
669			dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
670				mdev->port, mdev->unit);
671			error = -EAGAIN;
672			goto fail_device_busy;
673		}
674	}
675
676	atomic_set(&mdev->busy, 1);
677
678	/*
679	* Set up the minfo call: vmu_queryblocks will handle
680	* the information passed back
681	*/
682	error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
683		MAPLE_COMMAND_GETMINFO, 2, &partnum);
684	if (error) {
685		dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
686			" error is 0x%X\n", mdev->port, mdev->unit, error);
687		goto fail_mtd_info;
688	}
689	return 0;
690
691fail_device_busy:
692	kfree(card->mtd);
693fail_mtd_info:
694	kfree(card->parts);
695fail_partitions:
696	kfree(card);
697fail_nomem:
698	return error;
699}
700
701static void __devexit vmu_disconnect(struct maple_device *mdev)
702{
703	struct memcard *card;
704	struct mdev_part *mpart;
705	int x;
706
707	mdev->callback = NULL;
708	card = maple_get_drvdata(mdev);
709	for (x = 0; x < card->partitions; x++) {
710		mpart = ((card->mtd)[x]).priv;
711		mpart->mdev = NULL;
712		mtd_device_unregister(&((card->mtd)[x]));
713		kfree(((card->parts)[x]).name);
714	}
715	kfree(card->parts);
716	kfree(card->mtd);
717	kfree(card);
718}
719
720/* Callback to handle eccentricities of both mtd subsystem
721 * and general flakyness of Dreamcast VMUs
722 */
723static int vmu_can_unload(struct maple_device *mdev)
724{
725	struct memcard *card;
726	int x;
727	struct mtd_info *mtd;
728
729	card = maple_get_drvdata(mdev);
730	for (x = 0; x < card->partitions; x++) {
731		mtd = &((card->mtd)[x]);
732		if (mtd->usecount > 0)
733			return 0;
734	}
735	return 1;
736}
737
738#define ERRSTR "VMU at (%d, %d) file error -"
739
740static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
741{
742	enum maple_file_errors error = ((int *)recvbuf)[1];
743
744	switch (error) {
745
746	case MAPLE_FILEERR_INVALID_PARTITION:
747		dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
748			mdev->port, mdev->unit);
749		break;
750
751	case MAPLE_FILEERR_PHASE_ERROR:
752		dev_notice(&mdev->dev, ERRSTR " phase error\n",
753			mdev->port, mdev->unit);
754		break;
755
756	case MAPLE_FILEERR_INVALID_BLOCK:
757		dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
758			mdev->port, mdev->unit);
759		break;
760
761	case MAPLE_FILEERR_WRITE_ERROR:
762		dev_notice(&mdev->dev, ERRSTR " write error\n",
763			mdev->port, mdev->unit);
764		break;
765
766	case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
767		dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
768			mdev->port, mdev->unit);
769		break;
770
771	case MAPLE_FILEERR_BAD_CRC:
772		dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
773			mdev->port, mdev->unit);
774		break;
775
776	default:
777		dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
778			mdev->port, mdev->unit, error);
779	}
780}
781
782
783static int __devinit probe_maple_vmu(struct device *dev)
784{
785	int error;
786	struct maple_device *mdev = to_maple_dev(dev);
787	struct maple_driver *mdrv = to_maple_driver(dev->driver);
788
789	mdev->can_unload = vmu_can_unload;
790	mdev->fileerr_handler = vmu_file_error;
791	mdev->driver = mdrv;
792
793	error = vmu_connect(mdev);
794	if (error)
795		return error;
796
797	return 0;
798}
799
800static int __devexit remove_maple_vmu(struct device *dev)
801{
802	struct maple_device *mdev = to_maple_dev(dev);
803
804	vmu_disconnect(mdev);
805	return 0;
806}
807
808static struct maple_driver vmu_flash_driver = {
809	.function =	MAPLE_FUNC_MEMCARD,
810	.drv = {
811		.name =		"Dreamcast_visual_memory",
812		.probe =	probe_maple_vmu,
813		.remove = 	__devexit_p(remove_maple_vmu),
814	},
815};
816
817static int __init vmu_flash_map_init(void)
818{
819	return maple_driver_register(&vmu_flash_driver);
820}
821
822static void __exit vmu_flash_map_exit(void)
823{
824	maple_driver_unregister(&vmu_flash_driver);
825}
826
827module_init(vmu_flash_map_init);
828module_exit(vmu_flash_map_exit);
829
830MODULE_LICENSE("GPL");
831MODULE_AUTHOR("Adrian McMenamin");
832MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");
833