1/*
2 * VMEbus User access driver
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by:
8 *   Tom Armistead and Ajit Prem
9 *     Copyright 2004 Motorola Inc.
10 *
11 *
12 * This program is free software; you can redistribute  it and/or modify it
13 * under  the terms of  the GNU General  Public License as published by the
14 * Free Software Foundation;  either version 2 of the  License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/cdev.h>
19#include <linux/delay.h>
20#include <linux/device.h>
21#include <linux/dma-mapping.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/ioctl.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/module.h>
28#include <linux/pagemap.h>
29#include <linux/pci.h>
30#include <linux/semaphore.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <linux/syscalls.h>
34#include <linux/mutex.h>
35#include <linux/types.h>
36
37#include <linux/io.h>
38#include <linux/uaccess.h>
39
40#include "../vme.h"
41#include "vme_user.h"
42
43static DEFINE_MUTEX(vme_user_mutex);
44static const char driver_name[] = "vme_user";
45
46static int bus[VME_USER_BUS_MAX];
47static unsigned int bus_num;
48
49/* Currently Documentation/devices.txt defines the following for VME:
50 *
51 * 221 char	VME bus
52 *		  0 = /dev/bus/vme/m0		First master image
53 *		  1 = /dev/bus/vme/m1		Second master image
54 *		  2 = /dev/bus/vme/m2		Third master image
55 *		  3 = /dev/bus/vme/m3		Fourth master image
56 *		  4 = /dev/bus/vme/s0		First slave image
57 *		  5 = /dev/bus/vme/s1		Second slave image
58 *		  6 = /dev/bus/vme/s2		Third slave image
59 *		  7 = /dev/bus/vme/s3		Fourth slave image
60 *		  8 = /dev/bus/vme/ctl		Control
61 *
62 *		It is expected that all VME bus drivers will use the
63 *		same interface.  For interface documentation see
64 *		http://www.vmelinux.org/.
65 *
66 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
67 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
68 * We'll run with this or now as far as possible, however it probably makes
69 * sense to get rid of the old mappings and just do everything dynamically.
70 *
71 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
72 * defined above and try to support at least some of the interface from
73 * http://www.vmelinux.org/ as an alternative drive can be written providing a
74 * saner interface later.
75 *
76 * The vmelinux.org driver never supported slave images, the devices reserved
77 * for slaves were repurposed to support all 8 master images on the UniverseII!
78 * We shall support 4 masters and 4 slaves with this driver.
79 */
80#define VME_MAJOR	221	/* VME Major Device Number */
81#define VME_DEVS	9	/* Number of dev entries */
82
83#define MASTER_MINOR	0
84#define MASTER_MAX	3
85#define SLAVE_MINOR	4
86#define SLAVE_MAX	7
87#define CONTROL_MINOR	8
88
89#define PCI_BUF_SIZE  0x20000	/* Size of one slave image buffer */
90
91/*
92 * Structure to handle image related parameters.
93 */
94struct image_desc {
95	void *kern_buf;	/* Buffer address in kernel space */
96	dma_addr_t pci_buf;	/* Buffer address in PCI address space */
97	unsigned long long size_buf;	/* Buffer size */
98	struct semaphore sem;	/* Semaphore for locking image */
99	struct device *device;	/* Sysfs device */
100	struct vme_resource *resource;	/* VME resource */
101	int users;		/* Number of current users */
102};
103static struct image_desc image[VME_DEVS];
104
105struct driver_stats {
106	unsigned long reads;
107	unsigned long writes;
108	unsigned long ioctls;
109	unsigned long irqs;
110	unsigned long berrs;
111	unsigned long dmaErrors;
112	unsigned long timeouts;
113	unsigned long external;
114};
115static struct driver_stats statistics;
116
117static struct cdev *vme_user_cdev;		/* Character device */
118static struct class *vme_user_sysfs_class;	/* Sysfs class */
119static struct vme_dev *vme_user_bridge;		/* Pointer to user device */
120
121
122static const int type[VME_DEVS] = {	MASTER_MINOR,	MASTER_MINOR,
123					MASTER_MINOR,	MASTER_MINOR,
124					SLAVE_MINOR,	SLAVE_MINOR,
125					SLAVE_MINOR,	SLAVE_MINOR,
126					CONTROL_MINOR
127				};
128
129
130static int vme_user_open(struct inode *, struct file *);
131static int vme_user_release(struct inode *, struct file *);
132static ssize_t vme_user_read(struct file *, char __user *, size_t, loff_t *);
133static ssize_t vme_user_write(struct file *, const char __user *, size_t,
134	loff_t *);
135static loff_t vme_user_llseek(struct file *, loff_t, int);
136static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
137
138static int vme_user_match(struct vme_dev *);
139static int __devinit vme_user_probe(struct vme_dev *);
140static int __devexit vme_user_remove(struct vme_dev *);
141
142static const struct file_operations vme_user_fops = {
143	.open = vme_user_open,
144	.release = vme_user_release,
145	.read = vme_user_read,
146	.write = vme_user_write,
147	.llseek = vme_user_llseek,
148	.unlocked_ioctl = vme_user_unlocked_ioctl,
149};
150
151
152/*
153 * Reset all the statistic counters
154 */
155static void reset_counters(void)
156{
157	statistics.reads = 0;
158	statistics.writes = 0;
159	statistics.ioctls = 0;
160	statistics.irqs = 0;
161	statistics.berrs = 0;
162	statistics.dmaErrors = 0;
163	statistics.timeouts = 0;
164}
165
166static int vme_user_open(struct inode *inode, struct file *file)
167{
168	int err;
169	unsigned int minor = MINOR(inode->i_rdev);
170
171	down(&image[minor].sem);
172	/* Allow device to be opened if a resource is needed and allocated. */
173	if (minor < CONTROL_MINOR && image[minor].resource == NULL) {
174		printk(KERN_ERR "No resources allocated for device\n");
175		err = -EINVAL;
176		goto err_res;
177	}
178
179	/* Increment user count */
180	image[minor].users++;
181
182	up(&image[minor].sem);
183
184	return 0;
185
186err_res:
187	up(&image[minor].sem);
188
189	return err;
190}
191
192static int vme_user_release(struct inode *inode, struct file *file)
193{
194	unsigned int minor = MINOR(inode->i_rdev);
195
196	down(&image[minor].sem);
197
198	/* Decrement user count */
199	image[minor].users--;
200
201	up(&image[minor].sem);
202
203	return 0;
204}
205
206/*
207 * We are going ot alloc a page during init per window for small transfers.
208 * Small transfers will go VME -> buffer -> user space. Larger (more than a
209 * page) transfers will lock the user space buffer into memory and then
210 * transfer the data directly into the user space buffers.
211 */
212static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
213	loff_t *ppos)
214{
215	ssize_t retval;
216	ssize_t copied = 0;
217
218	if (count <= image[minor].size_buf) {
219		/* We copy to kernel buffer */
220		copied = vme_master_read(image[minor].resource,
221			image[minor].kern_buf, count, *ppos);
222		if (copied < 0)
223			return (int)copied;
224
225		retval = __copy_to_user(buf, image[minor].kern_buf,
226			(unsigned long)copied);
227		if (retval != 0) {
228			copied = (copied - retval);
229			printk(KERN_INFO "User copy failed\n");
230			return -EINVAL;
231		}
232
233	} else {
234		/* XXX Need to write this */
235		printk(KERN_INFO "Currently don't support large transfers\n");
236		/* Map in pages from userspace */
237
238		/* Call vme_master_read to do the transfer */
239		return -EINVAL;
240	}
241
242	return copied;
243}
244
245/*
246 * We are going ot alloc a page during init per window for small transfers.
247 * Small transfers will go user space -> buffer -> VME. Larger (more than a
248 * page) transfers will lock the user space buffer into memory and then
249 * transfer the data directly from the user space buffers out to VME.
250 */
251static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
252	size_t count, loff_t *ppos)
253{
254	ssize_t retval;
255	ssize_t copied = 0;
256
257	if (count <= image[minor].size_buf) {
258		retval = __copy_from_user(image[minor].kern_buf, buf,
259			(unsigned long)count);
260		if (retval != 0)
261			copied = (copied - retval);
262		else
263			copied = count;
264
265		copied = vme_master_write(image[minor].resource,
266			image[minor].kern_buf, copied, *ppos);
267	} else {
268		/* XXX Need to write this */
269		printk(KERN_INFO "Currently don't support large transfers\n");
270		/* Map in pages from userspace */
271
272		/* Call vme_master_write to do the transfer */
273		return -EINVAL;
274	}
275
276	return copied;
277}
278
279static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
280	size_t count, loff_t *ppos)
281{
282	void *image_ptr;
283	ssize_t retval;
284
285	image_ptr = image[minor].kern_buf + *ppos;
286
287	retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
288	if (retval != 0) {
289		retval = (count - retval);
290		printk(KERN_WARNING "Partial copy to userspace\n");
291	} else
292		retval = count;
293
294	/* Return number of bytes successfully read */
295	return retval;
296}
297
298static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
299	size_t count, loff_t *ppos)
300{
301	void *image_ptr;
302	size_t retval;
303
304	image_ptr = image[minor].kern_buf + *ppos;
305
306	retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
307	if (retval != 0) {
308		retval = (count - retval);
309		printk(KERN_WARNING "Partial copy to userspace\n");
310	} else
311		retval = count;
312
313	/* Return number of bytes successfully read */
314	return retval;
315}
316
317static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
318			loff_t *ppos)
319{
320	unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
321	ssize_t retval;
322	size_t image_size;
323	size_t okcount;
324
325	if (minor == CONTROL_MINOR)
326		return 0;
327
328	down(&image[minor].sem);
329
330	/* XXX Do we *really* want this helper - we can use vme_*_get ? */
331	image_size = vme_get_size(image[minor].resource);
332
333	/* Ensure we are starting at a valid location */
334	if ((*ppos < 0) || (*ppos > (image_size - 1))) {
335		up(&image[minor].sem);
336		return 0;
337	}
338
339	/* Ensure not reading past end of the image */
340	if (*ppos + count > image_size)
341		okcount = image_size - *ppos;
342	else
343		okcount = count;
344
345	switch (type[minor]) {
346	case MASTER_MINOR:
347		retval = resource_to_user(minor, buf, okcount, ppos);
348		break;
349	case SLAVE_MINOR:
350		retval = buffer_to_user(minor, buf, okcount, ppos);
351		break;
352	default:
353		retval = -EINVAL;
354	}
355
356	up(&image[minor].sem);
357
358	if (retval > 0)
359		*ppos += retval;
360
361	return retval;
362}
363
364static ssize_t vme_user_write(struct file *file, const char __user *buf,
365			size_t count, loff_t *ppos)
366{
367	unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
368	ssize_t retval;
369	size_t image_size;
370	size_t okcount;
371
372	if (minor == CONTROL_MINOR)
373		return 0;
374
375	down(&image[minor].sem);
376
377	image_size = vme_get_size(image[minor].resource);
378
379	/* Ensure we are starting at a valid location */
380	if ((*ppos < 0) || (*ppos > (image_size - 1))) {
381		up(&image[minor].sem);
382		return 0;
383	}
384
385	/* Ensure not reading past end of the image */
386	if (*ppos + count > image_size)
387		okcount = image_size - *ppos;
388	else
389		okcount = count;
390
391	switch (type[minor]) {
392	case MASTER_MINOR:
393		retval = resource_from_user(minor, buf, okcount, ppos);
394		break;
395	case SLAVE_MINOR:
396		retval = buffer_from_user(minor, buf, okcount, ppos);
397		break;
398	default:
399		retval = -EINVAL;
400	}
401
402	up(&image[minor].sem);
403
404	if (retval > 0)
405		*ppos += retval;
406
407	return retval;
408}
409
410static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
411{
412	loff_t absolute = -1;
413	unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
414	size_t image_size;
415
416	if (minor == CONTROL_MINOR)
417		return -EINVAL;
418
419	down(&image[minor].sem);
420	image_size = vme_get_size(image[minor].resource);
421
422	switch (whence) {
423	case SEEK_SET:
424		absolute = off;
425		break;
426	case SEEK_CUR:
427		absolute = file->f_pos + off;
428		break;
429	case SEEK_END:
430		absolute = image_size + off;
431		break;
432	default:
433		up(&image[minor].sem);
434		return -EINVAL;
435		break;
436	}
437
438	if ((absolute < 0) || (absolute >= image_size)) {
439		up(&image[minor].sem);
440		return -EINVAL;
441	}
442
443	file->f_pos = absolute;
444
445	up(&image[minor].sem);
446
447	return absolute;
448}
449
450/*
451 * The ioctls provided by the old VME access method (the one at vmelinux.org)
452 * are most certainly wrong as the effectively push the registers layout
453 * through to user space. Given that the VME core can handle multiple bridges,
454 * with different register layouts this is most certainly not the way to go.
455 *
456 * We aren't using the structures defined in the Motorola driver either - these
457 * are also quite low level, however we should use the definitions that have
458 * already been defined.
459 */
460static int vme_user_ioctl(struct inode *inode, struct file *file,
461	unsigned int cmd, unsigned long arg)
462{
463	struct vme_master master;
464	struct vme_slave slave;
465	struct vme_irq_id irq_req;
466	unsigned long copied;
467	unsigned int minor = MINOR(inode->i_rdev);
468	int retval;
469	dma_addr_t pci_addr;
470	void __user *argp = (void __user *)arg;
471
472	statistics.ioctls++;
473
474	switch (type[minor]) {
475	case CONTROL_MINOR:
476		switch (cmd) {
477		case VME_IRQ_GEN:
478			copied = copy_from_user(&irq_req, (char *)arg,
479						sizeof(struct vme_irq_id));
480			if (copied != 0) {
481				printk(KERN_WARNING "Partial copy from userspace\n");
482				return -EFAULT;
483			}
484
485			retval = vme_irq_generate(vme_user_bridge,
486						  irq_req.level,
487						  irq_req.statid);
488
489			return retval;
490		}
491		break;
492	case MASTER_MINOR:
493		switch (cmd) {
494		case VME_GET_MASTER:
495			memset(&master, 0, sizeof(struct vme_master));
496
497			/* XXX	We do not want to push aspace, cycle and width
498			 *	to userspace as they are
499			 */
500			retval = vme_master_get(image[minor].resource,
501				&master.enable, &master.vme_addr,
502				&master.size, &master.aspace,
503				&master.cycle, &master.dwidth);
504
505			copied = copy_to_user(argp, &master,
506				sizeof(struct vme_master));
507			if (copied != 0) {
508				printk(KERN_WARNING "Partial copy to "
509					"userspace\n");
510				return -EFAULT;
511			}
512
513			return retval;
514			break;
515
516		case VME_SET_MASTER:
517
518			copied = copy_from_user(&master, argp, sizeof(master));
519			if (copied != 0) {
520				printk(KERN_WARNING "Partial copy from "
521					"userspace\n");
522				return -EFAULT;
523			}
524
525			/* XXX	We do not want to push aspace, cycle and width
526			 *	to userspace as they are
527			 */
528			return vme_master_set(image[minor].resource,
529				master.enable, master.vme_addr, master.size,
530				master.aspace, master.cycle, master.dwidth);
531
532			break;
533		}
534		break;
535	case SLAVE_MINOR:
536		switch (cmd) {
537		case VME_GET_SLAVE:
538			memset(&slave, 0, sizeof(struct vme_slave));
539
540			/* XXX	We do not want to push aspace, cycle and width
541			 *	to userspace as they are
542			 */
543			retval = vme_slave_get(image[minor].resource,
544				&slave.enable, &slave.vme_addr,
545				&slave.size, &pci_addr, &slave.aspace,
546				&slave.cycle);
547
548			copied = copy_to_user(argp, &slave,
549				sizeof(struct vme_slave));
550			if (copied != 0) {
551				printk(KERN_WARNING "Partial copy to "
552					"userspace\n");
553				return -EFAULT;
554			}
555
556			return retval;
557			break;
558
559		case VME_SET_SLAVE:
560
561			copied = copy_from_user(&slave, argp, sizeof(slave));
562			if (copied != 0) {
563				printk(KERN_WARNING "Partial copy from "
564					"userspace\n");
565				return -EFAULT;
566			}
567
568			/* XXX	We do not want to push aspace, cycle and width
569			 *	to userspace as they are
570			 */
571			return vme_slave_set(image[minor].resource,
572				slave.enable, slave.vme_addr, slave.size,
573				image[minor].pci_buf, slave.aspace,
574				slave.cycle);
575
576			break;
577		}
578		break;
579	}
580
581	return -EINVAL;
582}
583
584static long
585vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
586{
587	int ret;
588
589	mutex_lock(&vme_user_mutex);
590	ret = vme_user_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
591	mutex_unlock(&vme_user_mutex);
592
593	return ret;
594}
595
596
597/*
598 * Unallocate a previously allocated buffer
599 */
600static void buf_unalloc(int num)
601{
602	if (image[num].kern_buf) {
603#ifdef VME_DEBUG
604		printk(KERN_DEBUG "UniverseII:Releasing buffer at %p\n",
605			image[num].pci_buf);
606#endif
607
608		vme_free_consistent(image[num].resource, image[num].size_buf,
609			image[num].kern_buf, image[num].pci_buf);
610
611		image[num].kern_buf = NULL;
612		image[num].pci_buf = 0;
613		image[num].size_buf = 0;
614
615#ifdef VME_DEBUG
616	} else {
617		printk(KERN_DEBUG "UniverseII: Buffer not allocated\n");
618#endif
619	}
620}
621
622static struct vme_driver vme_user_driver = {
623	.name = driver_name,
624	.match = vme_user_match,
625	.probe = vme_user_probe,
626	.remove = __devexit_p(vme_user_remove),
627};
628
629
630static int __init vme_user_init(void)
631{
632	int retval = 0;
633
634	printk(KERN_INFO "VME User Space Access Driver\n");
635
636	if (bus_num == 0) {
637		printk(KERN_ERR "%s: No cards, skipping registration\n",
638			driver_name);
639		retval = -ENODEV;
640		goto err_nocard;
641	}
642
643	/* Let's start by supporting one bus, we can support more than one
644	 * in future revisions if that ever becomes necessary.
645	 */
646	if (bus_num > VME_USER_BUS_MAX) {
647		printk(KERN_ERR "%s: Driver only able to handle %d buses\n",
648			driver_name, VME_USER_BUS_MAX);
649		bus_num = VME_USER_BUS_MAX;
650	}
651
652	/*
653	 * Here we just register the maximum number of devices we can and
654	 * leave vme_user_match() to allow only 1 to go through to probe().
655	 * This way, if we later want to allow multiple user access devices,
656	 * we just change the code in vme_user_match().
657	 */
658	retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
659	if (retval != 0)
660		goto err_reg;
661
662	return retval;
663
664err_reg:
665err_nocard:
666	return retval;
667}
668
669static int vme_user_match(struct vme_dev *vdev)
670{
671	if (vdev->num >= VME_USER_BUS_MAX)
672		return 0;
673	return 1;
674}
675
676/*
677 * In this simple access driver, the old behaviour is being preserved as much
678 * as practical. We will therefore reserve the buffers and request the images
679 * here so that we don't have to do it later.
680 */
681static int __devinit vme_user_probe(struct vme_dev *vdev)
682{
683	int i, err;
684	char name[12];
685
686	/* Save pointer to the bridge device */
687	if (vme_user_bridge != NULL) {
688		printk(KERN_ERR "%s: Driver can only be loaded for 1 device\n",
689			driver_name);
690		err = -EINVAL;
691		goto err_dev;
692	}
693	vme_user_bridge = vdev;
694
695	/* Initialise descriptors */
696	for (i = 0; i < VME_DEVS; i++) {
697		image[i].kern_buf = NULL;
698		image[i].pci_buf = 0;
699		sema_init(&image[i].sem, 1);
700		image[i].device = NULL;
701		image[i].resource = NULL;
702		image[i].users = 0;
703	}
704
705	/* Initialise statistics counters */
706	reset_counters();
707
708	/* Assign major and minor numbers for the driver */
709	err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
710		driver_name);
711	if (err) {
712		printk(KERN_WARNING "%s: Error getting Major Number %d for "
713		"driver.\n", driver_name, VME_MAJOR);
714		goto err_region;
715	}
716
717	/* Register the driver as a char device */
718	vme_user_cdev = cdev_alloc();
719	vme_user_cdev->ops = &vme_user_fops;
720	vme_user_cdev->owner = THIS_MODULE;
721	err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
722	if (err) {
723		printk(KERN_WARNING "%s: cdev_all failed\n", driver_name);
724		goto err_char;
725	}
726
727	/* Request slave resources and allocate buffers (128kB wide) */
728	for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
729		/* XXX Need to properly request attributes */
730		/* For ca91cx42 bridge there are only two slave windows
731		 * supporting A16 addressing, so we request A24 supported
732		 * by all windows.
733		 */
734		image[i].resource = vme_slave_request(vme_user_bridge,
735			VME_A24, VME_SCT);
736		if (image[i].resource == NULL) {
737			printk(KERN_WARNING "Unable to allocate slave "
738				"resource\n");
739			goto err_slave;
740		}
741		image[i].size_buf = PCI_BUF_SIZE;
742		image[i].kern_buf = vme_alloc_consistent(image[i].resource,
743			image[i].size_buf, &image[i].pci_buf);
744		if (image[i].kern_buf == NULL) {
745			printk(KERN_WARNING "Unable to allocate memory for "
746				"buffer\n");
747			image[i].pci_buf = 0;
748			vme_slave_free(image[i].resource);
749			err = -ENOMEM;
750			goto err_slave;
751		}
752	}
753
754	/*
755	 * Request master resources allocate page sized buffers for small
756	 * reads and writes
757	 */
758	for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
759		/* XXX Need to properly request attributes */
760		image[i].resource = vme_master_request(vme_user_bridge,
761			VME_A32, VME_SCT, VME_D32);
762		if (image[i].resource == NULL) {
763			printk(KERN_WARNING "Unable to allocate master "
764				"resource\n");
765			goto err_master;
766		}
767		image[i].size_buf = PCI_BUF_SIZE;
768		image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
769		if (image[i].kern_buf == NULL) {
770			printk(KERN_WARNING "Unable to allocate memory for "
771				"master window buffers\n");
772			err = -ENOMEM;
773			goto err_master_buf;
774		}
775	}
776
777	/* Create sysfs entries - on udev systems this creates the dev files */
778	vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
779	if (IS_ERR(vme_user_sysfs_class)) {
780		printk(KERN_ERR "Error creating vme_user class.\n");
781		err = PTR_ERR(vme_user_sysfs_class);
782		goto err_class;
783	}
784
785	/* Add sysfs Entries */
786	for (i = 0; i < VME_DEVS; i++) {
787		int num;
788		switch (type[i]) {
789		case MASTER_MINOR:
790			sprintf(name, "bus/vme/m%%d");
791			break;
792		case CONTROL_MINOR:
793			sprintf(name, "bus/vme/ctl");
794			break;
795		case SLAVE_MINOR:
796			sprintf(name, "bus/vme/s%%d");
797			break;
798		default:
799			err = -EINVAL;
800			goto err_sysfs;
801			break;
802		}
803
804		num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
805		image[i].device = device_create(vme_user_sysfs_class, NULL,
806					MKDEV(VME_MAJOR, i), NULL, name, num);
807		if (IS_ERR(image[i].device)) {
808			printk(KERN_INFO "%s: Error creating sysfs device\n",
809				driver_name);
810			err = PTR_ERR(image[i].device);
811			goto err_sysfs;
812		}
813	}
814
815	return 0;
816
817	/* Ensure counter set correcty to destroy all sysfs devices */
818	i = VME_DEVS;
819err_sysfs:
820	while (i > 0) {
821		i--;
822		device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
823	}
824	class_destroy(vme_user_sysfs_class);
825
826	/* Ensure counter set correcty to unalloc all master windows */
827	i = MASTER_MAX + 1;
828err_master_buf:
829	for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++)
830		kfree(image[i].kern_buf);
831err_master:
832	while (i > MASTER_MINOR) {
833		i--;
834		vme_master_free(image[i].resource);
835	}
836
837	/*
838	 * Ensure counter set correcty to unalloc all slave windows and buffers
839	 */
840	i = SLAVE_MAX + 1;
841err_slave:
842	while (i > SLAVE_MINOR) {
843		i--;
844		buf_unalloc(i);
845		vme_slave_free(image[i].resource);
846	}
847err_class:
848	cdev_del(vme_user_cdev);
849err_char:
850	unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
851err_region:
852err_dev:
853	return err;
854}
855
856static int __devexit vme_user_remove(struct vme_dev *dev)
857{
858	int i;
859
860	/* Remove sysfs Entries */
861	for (i = 0; i < VME_DEVS; i++)
862		device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
863	class_destroy(vme_user_sysfs_class);
864
865	for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
866		kfree(image[i].kern_buf);
867		vme_master_free(image[i].resource);
868	}
869
870	for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
871		vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
872		buf_unalloc(i);
873		vme_slave_free(image[i].resource);
874	}
875
876	/* Unregister device driver */
877	cdev_del(vme_user_cdev);
878
879	/* Unregiser the major and minor device numbers */
880	unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
881
882	return 0;
883}
884
885static void __exit vme_user_exit(void)
886{
887	vme_unregister_driver(&vme_user_driver);
888}
889
890
891MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
892module_param_array(bus, int, &bus_num, 0);
893
894MODULE_DESCRIPTION("VME User Space Access Driver");
895MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
896MODULE_LICENSE("GPL");
897
898module_init(vme_user_init);
899module_exit(vme_user_exit);
900