1/*
2 * USB Skeleton driver - 2.2
3 *
4 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
5 *
6 *	This program is free software; you can redistribute it and/or
7 *	modify it under the terms of the GNU General Public License as
8 *	published by the Free Software Foundation, version 2.
9 *
10 * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
11 * but has been rewritten to be easier to read and use.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/kref.h>
21#include <linux/uaccess.h>
22#include <linux/usb.h>
23#include <linux/mutex.h>
24
25
26/* Define these values to match your devices */
27#define USB_SKEL_VENDOR_ID	0xfff0
28#define USB_SKEL_PRODUCT_ID	0xfff0
29
30/* table of devices that work with this driver */
31static const struct usb_device_id skel_table[] = {
32	{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
33	{ }					/* Terminating entry */
34};
35MODULE_DEVICE_TABLE(usb, skel_table);
36
37
38/* Get a minor range for your devices from the usb maintainer */
39#define USB_SKEL_MINOR_BASE	192
40
41/* our private defines. if this grows any larger, use your own .h file */
42#define MAX_TRANSFER		(PAGE_SIZE - 512)
43/* MAX_TRANSFER is chosen so that the VM is not stressed by
44   allocations > PAGE_SIZE and the number of packets in a page
45   is an integer 512 is the largest possible packet on EHCI */
46#define WRITES_IN_FLIGHT	8
47/* arbitrarily chosen */
48
49/* Structure to hold all of our device specific stuff */
50struct usb_skel {
51	struct usb_device	*udev;			/* the usb device for this device */
52	struct usb_interface	*interface;		/* the interface for this device */
53	struct semaphore	limit_sem;		/* limiting the number of writes in progress */
54	struct usb_anchor	submitted;		/* in case we need to retract our submissions */
55	struct urb		*bulk_in_urb;		/* the urb to read data with */
56	unsigned char           *bulk_in_buffer;	/* the buffer to receive data */
57	size_t			bulk_in_size;		/* the size of the receive buffer */
58	size_t			bulk_in_filled;		/* number of bytes in the buffer */
59	size_t			bulk_in_copied;		/* already copied to user space */
60	__u8			bulk_in_endpointAddr;	/* the address of the bulk in endpoint */
61	__u8			bulk_out_endpointAddr;	/* the address of the bulk out endpoint */
62	int			errors;			/* the last request tanked */
63	bool			ongoing_read;		/* a read is going on */
64	bool			processed_urb;		/* indicates we haven't processed the urb */
65	spinlock_t		err_lock;		/* lock for errors */
66	struct kref		kref;
67	struct mutex		io_mutex;		/* synchronize I/O with disconnect */
68	struct completion	bulk_in_completion;	/* to wait for an ongoing read */
69};
70#define to_skel_dev(d) container_of(d, struct usb_skel, kref)
71
72static struct usb_driver skel_driver;
73static void skel_draw_down(struct usb_skel *dev);
74
75static void skel_delete(struct kref *kref)
76{
77	struct usb_skel *dev = to_skel_dev(kref);
78
79	usb_free_urb(dev->bulk_in_urb);
80	usb_put_dev(dev->udev);
81	kfree(dev->bulk_in_buffer);
82	kfree(dev);
83}
84
85static int skel_open(struct inode *inode, struct file *file)
86{
87	struct usb_skel *dev;
88	struct usb_interface *interface;
89	int subminor;
90	int retval = 0;
91
92	subminor = iminor(inode);
93
94	interface = usb_find_interface(&skel_driver, subminor);
95	if (!interface) {
96		err("%s - error, can't find device for minor %d",
97		     __func__, subminor);
98		retval = -ENODEV;
99		goto exit;
100	}
101
102	dev = usb_get_intfdata(interface);
103	if (!dev) {
104		retval = -ENODEV;
105		goto exit;
106	}
107
108	/* increment our usage count for the device */
109	kref_get(&dev->kref);
110
111	/* lock the device to allow correctly handling errors
112	 * in resumption */
113	mutex_lock(&dev->io_mutex);
114
115	retval = usb_autopm_get_interface(interface);
116	if (retval)
117		goto out_err;
118
119	/* save our object in the file's private structure */
120	file->private_data = dev;
121	mutex_unlock(&dev->io_mutex);
122
123exit:
124	return retval;
125}
126
127static int skel_release(struct inode *inode, struct file *file)
128{
129	struct usb_skel *dev;
130
131	dev = file->private_data;
132	if (dev == NULL)
133		return -ENODEV;
134
135	/* allow the device to be autosuspended */
136	mutex_lock(&dev->io_mutex);
137	if (dev->interface)
138		usb_autopm_put_interface(dev->interface);
139	mutex_unlock(&dev->io_mutex);
140
141	/* decrement the count on our device */
142	kref_put(&dev->kref, skel_delete);
143	return 0;
144}
145
146static int skel_flush(struct file *file, fl_owner_t id)
147{
148	struct usb_skel *dev;
149	int res;
150
151	dev = file->private_data;
152	if (dev == NULL)
153		return -ENODEV;
154
155	/* wait for io to stop */
156	mutex_lock(&dev->io_mutex);
157	skel_draw_down(dev);
158
159	/* read out errors, leave subsequent opens a clean slate */
160	spin_lock_irq(&dev->err_lock);
161	res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0;
162	dev->errors = 0;
163	spin_unlock_irq(&dev->err_lock);
164
165	mutex_unlock(&dev->io_mutex);
166
167	return res;
168}
169
170static void skel_read_bulk_callback(struct urb *urb)
171{
172	struct usb_skel *dev;
173
174	dev = urb->context;
175
176	spin_lock(&dev->err_lock);
177	/* sync/async unlink faults aren't errors */
178	if (urb->status) {
179		if (!(urb->status == -ENOENT ||
180		    urb->status == -ECONNRESET ||
181		    urb->status == -ESHUTDOWN))
182			err("%s - nonzero write bulk status received: %d",
183			    __func__, urb->status);
184
185		dev->errors = urb->status;
186	} else {
187		dev->bulk_in_filled = urb->actual_length;
188	}
189	dev->ongoing_read = 0;
190	spin_unlock(&dev->err_lock);
191
192	complete(&dev->bulk_in_completion);
193}
194
195static int skel_do_read_io(struct usb_skel *dev, size_t count)
196{
197	int rv;
198
199	/* prepare a read */
200	usb_fill_bulk_urb(dev->bulk_in_urb,
201			dev->udev,
202			usb_rcvbulkpipe(dev->udev,
203				dev->bulk_in_endpointAddr),
204			dev->bulk_in_buffer,
205			min(dev->bulk_in_size, count),
206			skel_read_bulk_callback,
207			dev);
208	/* tell everybody to leave the URB alone */
209	spin_lock_irq(&dev->err_lock);
210	dev->ongoing_read = 1;
211	spin_unlock_irq(&dev->err_lock);
212
213	/* do it */
214	rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
215	if (rv < 0) {
216		err("%s - failed submitting read urb, error %d",
217			__func__, rv);
218		dev->bulk_in_filled = 0;
219		rv = (rv == -ENOMEM) ? rv : -EIO;
220		spin_lock_irq(&dev->err_lock);
221		dev->ongoing_read = 0;
222		spin_unlock_irq(&dev->err_lock);
223	}
224
225	return rv;
226}
227
228static ssize_t skel_read(struct file *file, char *buffer, size_t count,
229			 loff_t *ppos)
230{
231	struct usb_skel *dev;
232	int rv;
233	bool ongoing_io;
234
235	dev = file->private_data;
236
237	/* if we cannot read at all, return EOF */
238	if (!dev->bulk_in_urb || !count)
239		return 0;
240
241	/* no concurrent readers */
242	rv = mutex_lock_interruptible(&dev->io_mutex);
243	if (rv < 0)
244		return rv;
245
246	if (!dev->interface) {		/* disconnect() was called */
247		rv = -ENODEV;
248		goto exit;
249	}
250
251	/* if IO is under way, we must not touch things */
252retry:
253	spin_lock_irq(&dev->err_lock);
254	ongoing_io = dev->ongoing_read;
255	spin_unlock_irq(&dev->err_lock);
256
257	if (ongoing_io) {
258		/* nonblocking IO shall not wait */
259		if (file->f_flags & O_NONBLOCK) {
260			rv = -EAGAIN;
261			goto exit;
262		}
263		/*
264		 * IO may take forever
265		 * hence wait in an interruptible state
266		 */
267		rv = wait_for_completion_interruptible(&dev->bulk_in_completion);
268		if (rv < 0)
269			goto exit;
270		/*
271		 * by waiting we also semiprocessed the urb
272		 * we must finish now
273		 */
274		dev->bulk_in_copied = 0;
275		dev->processed_urb = 1;
276	}
277
278	if (!dev->processed_urb) {
279		/*
280		 * the URB hasn't been processed
281		 * do it now
282		 */
283		wait_for_completion(&dev->bulk_in_completion);
284		dev->bulk_in_copied = 0;
285		dev->processed_urb = 1;
286	}
287
288	/* errors must be reported */
289	rv = dev->errors;
290	if (rv < 0) {
291		/* any error is reported once */
292		dev->errors = 0;
293		/* to preserve notifications about reset */
294		rv = (rv == -EPIPE) ? rv : -EIO;
295		/* no data to deliver */
296		dev->bulk_in_filled = 0;
297		/* report it */
298		goto exit;
299	}
300
301	/*
302	 * if the buffer is filled we may satisfy the read
303	 * else we need to start IO
304	 */
305
306	if (dev->bulk_in_filled) {
307		/* we had read data */
308		size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
309		size_t chunk = min(available, count);
310
311		if (!available) {
312			/*
313			 * all data has been used
314			 * actual IO needs to be done
315			 */
316			rv = skel_do_read_io(dev, count);
317			if (rv < 0)
318				goto exit;
319			else
320				goto retry;
321		}
322		/*
323		 * data is available
324		 * chunk tells us how much shall be copied
325		 */
326
327		if (copy_to_user(buffer,
328				 dev->bulk_in_buffer + dev->bulk_in_copied,
329				 chunk))
330			rv = -EFAULT;
331		else
332			rv = chunk;
333
334		dev->bulk_in_copied += chunk;
335
336		/*
337		 * if we are asked for more than we have,
338		 * we start IO but don't wait
339		 */
340		if (available < count)
341			skel_do_read_io(dev, count - chunk);
342	} else {
343		/* no data in the buffer */
344		rv = skel_do_read_io(dev, count);
345		if (rv < 0)
346			goto exit;
347		else if (!(file->f_flags & O_NONBLOCK))
348			goto retry;
349		rv = -EAGAIN;
350	}
351exit:
352	mutex_unlock(&dev->io_mutex);
353	return rv;
354}
355
356static void skel_write_bulk_callback(struct urb *urb)
357{
358	struct usb_skel *dev;
359
360	dev = urb->context;
361
362	/* sync/async unlink faults aren't errors */
363	if (urb->status) {
364		if (!(urb->status == -ENOENT ||
365		    urb->status == -ECONNRESET ||
366		    urb->status == -ESHUTDOWN))
367			err("%s - nonzero write bulk status received: %d",
368			    __func__, urb->status);
369
370		spin_lock(&dev->err_lock);
371		dev->errors = urb->status;
372		spin_unlock(&dev->err_lock);
373	}
374
375	/* free up our allocated buffer */
376	usb_free_coherent(urb->dev, urb->transfer_buffer_length,
377			  urb->transfer_buffer, urb->transfer_dma);
378	up(&dev->limit_sem);
379}
380
381static ssize_t skel_write(struct file *file, const char *user_buffer,
382			  size_t count, loff_t *ppos)
383{
384	struct usb_skel *dev;
385	int retval = 0;
386	struct urb *urb = NULL;
387	char *buf = NULL;
388	size_t writesize = min(count, (size_t)MAX_TRANSFER);
389
390	dev = file->private_data;
391
392	/* verify that we actually have some data to write */
393	if (count == 0)
394		goto exit;
395
396	/*
397	 * limit the number of URBs in flight to stop a user from using up all
398	 * RAM
399	 */
400	if (!(file->f_flags & O_NONBLOCK)) {
401		if (down_interruptible(&dev->limit_sem)) {
402			retval = -ERESTARTSYS;
403			goto exit;
404		}
405	} else {
406		if (down_trylock(&dev->limit_sem)) {
407			retval = -EAGAIN;
408			goto exit;
409		}
410	}
411
412	spin_lock_irq(&dev->err_lock);
413	retval = dev->errors;
414	if (retval < 0) {
415		/* any error is reported once */
416		dev->errors = 0;
417		/* to preserve notifications about reset */
418		retval = (retval == -EPIPE) ? retval : -EIO;
419	}
420	spin_unlock_irq(&dev->err_lock);
421	if (retval < 0)
422		goto error;
423
424	/* create a urb, and a buffer for it, and copy the data to the urb */
425	urb = usb_alloc_urb(0, GFP_KERNEL);
426	if (!urb) {
427		retval = -ENOMEM;
428		goto error;
429	}
430
431	buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
432				 &urb->transfer_dma);
433	if (!buf) {
434		retval = -ENOMEM;
435		goto error;
436	}
437
438	if (copy_from_user(buf, user_buffer, writesize)) {
439		retval = -EFAULT;
440		goto error;
441	}
442
443	/* this lock makes sure we don't submit URBs to gone devices */
444	mutex_lock(&dev->io_mutex);
445	if (!dev->interface) {		/* disconnect() was called */
446		mutex_unlock(&dev->io_mutex);
447		retval = -ENODEV;
448		goto error;
449	}
450
451	/* initialize the urb properly */
452	usb_fill_bulk_urb(urb, dev->udev,
453			  usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
454			  buf, writesize, skel_write_bulk_callback, dev);
455	urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
456	usb_anchor_urb(urb, &dev->submitted);
457
458	/* send the data out the bulk port */
459	retval = usb_submit_urb(urb, GFP_KERNEL);
460	mutex_unlock(&dev->io_mutex);
461	if (retval) {
462		err("%s - failed submitting write urb, error %d", __func__,
463		    retval);
464		goto error_unanchor;
465	}
466
467	/*
468	 * release our reference to this urb, the USB core will eventually free
469	 * it entirely
470	 */
471	usb_free_urb(urb);
472
473
474	return writesize;
475
476error_unanchor:
477	usb_unanchor_urb(urb);
478error:
479	if (urb) {
480		usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
481		usb_free_urb(urb);
482	}
483	up(&dev->limit_sem);
484
485exit:
486	return retval;
487}
488
489static const struct file_operations skel_fops = {
490	.owner =	THIS_MODULE,
491	.read =		skel_read,
492	.write =	skel_write,
493	.open =		skel_open,
494	.release =	skel_release,
495	.flush =	skel_flush,
496	.llseek =	noop_llseek,
497};
498
499/*
500 * usb class driver info in order to get a minor number from the usb core,
501 * and to have the device registered with the driver core
502 */
503static struct usb_class_driver skel_class = {
504	.name =		"skel%d",
505	.fops =		&skel_fops,
506	.minor_base =	USB_SKEL_MINOR_BASE,
507};
508
509static int skel_probe(struct usb_interface *interface,
510		      const struct usb_device_id *id)
511{
512	struct usb_skel *dev;
513	struct usb_host_interface *iface_desc;
514	struct usb_endpoint_descriptor *endpoint;
515	size_t buffer_size;
516	int i;
517	int retval = -ENOMEM;
518
519	/* allocate memory for our device state and initialize it */
520	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
521	if (!dev) {
522		err("Out of memory");
523		goto error;
524	}
525	kref_init(&dev->kref);
526	sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
527	mutex_init(&dev->io_mutex);
528	spin_lock_init(&dev->err_lock);
529	init_usb_anchor(&dev->submitted);
530	init_completion(&dev->bulk_in_completion);
531
532	dev->udev = usb_get_dev(interface_to_usbdev(interface));
533	dev->interface = interface;
534
535	/* set up the endpoint information */
536	/* use only the first bulk-in and bulk-out endpoints */
537	iface_desc = interface->cur_altsetting;
538	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
539		endpoint = &iface_desc->endpoint[i].desc;
540
541		if (!dev->bulk_in_endpointAddr &&
542		    usb_endpoint_is_bulk_in(endpoint)) {
543			/* we found a bulk in endpoint */
544			buffer_size = usb_endpoint_maxp(endpoint);
545			dev->bulk_in_size = buffer_size;
546			dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
547			dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
548			if (!dev->bulk_in_buffer) {
549				err("Could not allocate bulk_in_buffer");
550				goto error;
551			}
552			dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
553			if (!dev->bulk_in_urb) {
554				err("Could not allocate bulk_in_urb");
555				goto error;
556			}
557		}
558
559		if (!dev->bulk_out_endpointAddr &&
560		    usb_endpoint_is_bulk_out(endpoint)) {
561			/* we found a bulk out endpoint */
562			dev->bulk_out_endpointAddr = endpoint->bEndpointAddress;
563		}
564	}
565	if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) {
566		err("Could not find both bulk-in and bulk-out endpoints");
567		goto error;
568	}
569
570	/* save our data pointer in this interface device */
571	usb_set_intfdata(interface, dev);
572
573	/* we can register the device now, as it is ready */
574	retval = usb_register_dev(interface, &skel_class);
575	if (retval) {
576		/* something prevented us from registering this driver */
577		err("Not able to get a minor for this device.");
578		usb_set_intfdata(interface, NULL);
579		goto error;
580	}
581
582	/* let the user know what node this device is now attached to */
583	dev_info(&interface->dev,
584		 "USB Skeleton device now attached to USBSkel-%d",
585		 interface->minor);
586	return 0;
587
588error:
589	if (dev)
590		/* this frees allocated memory */
591		kref_put(&dev->kref, skel_delete);
592	return retval;
593}
594
595static void skel_disconnect(struct usb_interface *interface)
596{
597	struct usb_skel *dev;
598	int minor = interface->minor;
599
600	dev = usb_get_intfdata(interface);
601	usb_set_intfdata(interface, NULL);
602
603	/* give back our minor */
604	usb_deregister_dev(interface, &skel_class);
605
606	/* prevent more I/O from starting */
607	mutex_lock(&dev->io_mutex);
608	dev->interface = NULL;
609	mutex_unlock(&dev->io_mutex);
610
611	usb_kill_anchored_urbs(&dev->submitted);
612
613	/* decrement our usage count */
614	kref_put(&dev->kref, skel_delete);
615
616	dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
617}
618
619static void skel_draw_down(struct usb_skel *dev)
620{
621	int time;
622
623	time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
624	if (!time)
625		usb_kill_anchored_urbs(&dev->submitted);
626	usb_kill_urb(dev->bulk_in_urb);
627}
628
629static int skel_suspend(struct usb_interface *intf, pm_message_t message)
630{
631	struct usb_skel *dev = usb_get_intfdata(intf);
632
633	if (!dev)
634		return 0;
635	skel_draw_down(dev);
636	return 0;
637}
638
639static int skel_resume(struct usb_interface *intf)
640{
641	return 0;
642}
643
644static int skel_pre_reset(struct usb_interface *intf)
645{
646	struct usb_skel *dev = usb_get_intfdata(intf);
647
648	mutex_lock(&dev->io_mutex);
649	skel_draw_down(dev);
650
651	return 0;
652}
653
654static int skel_post_reset(struct usb_interface *intf)
655{
656	struct usb_skel *dev = usb_get_intfdata(intf);
657
658	/* we are sure no URBs are active - no locking needed */
659	dev->errors = -EPIPE;
660	mutex_unlock(&dev->io_mutex);
661
662	return 0;
663}
664
665static struct usb_driver skel_driver = {
666	.name =		"skeleton",
667	.probe =	skel_probe,
668	.disconnect =	skel_disconnect,
669	.suspend =	skel_suspend,
670	.resume =	skel_resume,
671	.pre_reset =	skel_pre_reset,
672	.post_reset =	skel_post_reset,
673	.id_table =	skel_table,
674	.supports_autosuspend = 1,
675};
676
677module_usb_driver(skel_driver);
678
679MODULE_LICENSE("GPL");
680