1/*
2 * f_fs.c -- user mode file system API for USB composite function controllers
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 * Author: Michal Nazarewicz <mina86@mina86.com>
6 *
7 * Based on inode.c (GadgetFS) which was:
8 * Copyright (C) 2003-2004 David Brownell
9 * Copyright (C) 2003 Agilent Technologies
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17
18/* #define DEBUG */
19/* #define VERBOSE_DEBUG */
20
21#include <linux/blkdev.h>
22#include <linux/pagemap.h>
23#include <linux/export.h>
24#include <linux/hid.h>
25#include <linux/module.h>
26#include <asm/unaligned.h>
27
28#include <linux/usb/composite.h>
29#include <linux/usb/functionfs.h>
30
31#include <linux/aio.h>
32#include <linux/mmu_context.h>
33#include <linux/poll.h>
34
35#include "u_fs.h"
36#include "u_f.h"
37#include "u_os_desc.h"
38#include "configfs.h"
39
40#define FUNCTIONFS_MAGIC	0xa647361 /* Chosen by a honest dice roll ;) */
41
42/* Reference counter handling */
43static void ffs_data_get(struct ffs_data *ffs);
44static void ffs_data_put(struct ffs_data *ffs);
45/* Creates new ffs_data object. */
46static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
47
48/* Opened counter handling. */
49static void ffs_data_opened(struct ffs_data *ffs);
50static void ffs_data_closed(struct ffs_data *ffs);
51
52/* Called with ffs->mutex held; take over ownership of data. */
53static int __must_check
54__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
55static int __must_check
56__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
57
58
59/* The function structure ***************************************************/
60
61struct ffs_ep;
62
63struct ffs_function {
64	struct usb_configuration	*conf;
65	struct usb_gadget		*gadget;
66	struct ffs_data			*ffs;
67
68	struct ffs_ep			*eps;
69	u8				eps_revmap[16];
70	short				*interfaces_nums;
71
72	struct usb_function		function;
73};
74
75
76static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
77{
78	return container_of(f, struct ffs_function, function);
79}
80
81
82static inline enum ffs_setup_state
83ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
84{
85	return (enum ffs_setup_state)
86		cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
87}
88
89
90static void ffs_func_eps_disable(struct ffs_function *func);
91static int __must_check ffs_func_eps_enable(struct ffs_function *func);
92
93static int ffs_func_bind(struct usb_configuration *,
94			 struct usb_function *);
95static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
96static void ffs_func_disable(struct usb_function *);
97static int ffs_func_setup(struct usb_function *,
98			  const struct usb_ctrlrequest *);
99static void ffs_func_suspend(struct usb_function *);
100static void ffs_func_resume(struct usb_function *);
101
102
103static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
104static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
105
106
107/* The endpoints structures *************************************************/
108
109struct ffs_ep {
110	struct usb_ep			*ep;	/* P: ffs->eps_lock */
111	struct usb_request		*req;	/* P: epfile->mutex */
112
113	/* [0]: full speed, [1]: high speed, [2]: super speed */
114	struct usb_endpoint_descriptor	*descs[3];
115
116	u8				num;
117
118	int				status;	/* P: epfile->mutex */
119};
120
121struct ffs_epfile {
122	/* Protects ep->ep and ep->req. */
123	struct mutex			mutex;
124	wait_queue_head_t		wait;
125
126	struct ffs_data			*ffs;
127	struct ffs_ep			*ep;	/* P: ffs->eps_lock */
128
129	struct dentry			*dentry;
130
131	char				name[5];
132
133	unsigned char			in;	/* P: ffs->eps_lock */
134	unsigned char			isoc;	/* P: ffs->eps_lock */
135
136	unsigned char			_pad;
137};
138
139/*  ffs_io_data structure ***************************************************/
140
141struct ffs_io_data {
142	bool aio;
143	bool read;
144
145	struct kiocb *kiocb;
146	const struct iovec *iovec;
147	unsigned long nr_segs;
148	char __user *buf;
149	size_t len;
150
151	struct mm_struct *mm;
152	struct work_struct work;
153
154	struct usb_ep *ep;
155	struct usb_request *req;
156};
157
158struct ffs_desc_helper {
159	struct ffs_data *ffs;
160	unsigned interfaces_count;
161	unsigned eps_count;
162};
163
164static int  __must_check ffs_epfiles_create(struct ffs_data *ffs);
165static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
166
167static struct dentry *
168ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
169		   const struct file_operations *fops);
170
171/* Devices management *******************************************************/
172
173DEFINE_MUTEX(ffs_lock);
174EXPORT_SYMBOL_GPL(ffs_lock);
175
176static struct ffs_dev *_ffs_find_dev(const char *name);
177static struct ffs_dev *_ffs_alloc_dev(void);
178static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
179static void _ffs_free_dev(struct ffs_dev *dev);
180static void *ffs_acquire_dev(const char *dev_name);
181static void ffs_release_dev(struct ffs_data *ffs_data);
182static int ffs_ready(struct ffs_data *ffs);
183static void ffs_closed(struct ffs_data *ffs);
184
185/* Misc helper functions ****************************************************/
186
187static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
188	__attribute__((warn_unused_result, nonnull));
189static char *ffs_prepare_buffer(const char __user *buf, size_t len)
190	__attribute__((warn_unused_result, nonnull));
191
192
193/* Control file aka ep0 *****************************************************/
194
195static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
196{
197	struct ffs_data *ffs = req->context;
198
199	complete_all(&ffs->ep0req_completion);
200}
201
202static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
203{
204	struct usb_request *req = ffs->ep0req;
205	int ret;
206
207	req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
208
209	spin_unlock_irq(&ffs->ev.waitq.lock);
210
211	req->buf      = data;
212	req->length   = len;
213
214	/*
215	 * UDC layer requires to provide a buffer even for ZLP, but should
216	 * not use it at all. Let's provide some poisoned pointer to catch
217	 * possible bug in the driver.
218	 */
219	if (req->buf == NULL)
220		req->buf = (void *)0xDEADBABE;
221
222	reinit_completion(&ffs->ep0req_completion);
223
224	ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
225	if (unlikely(ret < 0))
226		return ret;
227
228	ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
229	if (unlikely(ret)) {
230		usb_ep_dequeue(ffs->gadget->ep0, req);
231		return -EINTR;
232	}
233
234	ffs->setup_state = FFS_NO_SETUP;
235	return req->status ? req->status : req->actual;
236}
237
238static int __ffs_ep0_stall(struct ffs_data *ffs)
239{
240	if (ffs->ev.can_stall) {
241		pr_vdebug("ep0 stall\n");
242		usb_ep_set_halt(ffs->gadget->ep0);
243		ffs->setup_state = FFS_NO_SETUP;
244		return -EL2HLT;
245	} else {
246		pr_debug("bogus ep0 stall!\n");
247		return -ESRCH;
248	}
249}
250
251static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
252			     size_t len, loff_t *ptr)
253{
254	struct ffs_data *ffs = file->private_data;
255	ssize_t ret;
256	char *data;
257
258	ENTER();
259
260	/* Fast check if setup was canceled */
261	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
262		return -EIDRM;
263
264	/* Acquire mutex */
265	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
266	if (unlikely(ret < 0))
267		return ret;
268
269	/* Check state */
270	switch (ffs->state) {
271	case FFS_READ_DESCRIPTORS:
272	case FFS_READ_STRINGS:
273		/* Copy data */
274		if (unlikely(len < 16)) {
275			ret = -EINVAL;
276			break;
277		}
278
279		data = ffs_prepare_buffer(buf, len);
280		if (IS_ERR(data)) {
281			ret = PTR_ERR(data);
282			break;
283		}
284
285		/* Handle data */
286		if (ffs->state == FFS_READ_DESCRIPTORS) {
287			pr_info("read descriptors\n");
288			ret = __ffs_data_got_descs(ffs, data, len);
289			if (unlikely(ret < 0))
290				break;
291
292			ffs->state = FFS_READ_STRINGS;
293			ret = len;
294		} else {
295			pr_info("read strings\n");
296			ret = __ffs_data_got_strings(ffs, data, len);
297			if (unlikely(ret < 0))
298				break;
299
300			ret = ffs_epfiles_create(ffs);
301			if (unlikely(ret)) {
302				ffs->state = FFS_CLOSING;
303				break;
304			}
305
306			ffs->state = FFS_ACTIVE;
307			mutex_unlock(&ffs->mutex);
308
309			ret = ffs_ready(ffs);
310			if (unlikely(ret < 0)) {
311				ffs->state = FFS_CLOSING;
312				return ret;
313			}
314
315			set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
316			return len;
317		}
318		break;
319
320	case FFS_ACTIVE:
321		data = NULL;
322		/*
323		 * We're called from user space, we can use _irq
324		 * rather then _irqsave
325		 */
326		spin_lock_irq(&ffs->ev.waitq.lock);
327		switch (ffs_setup_state_clear_cancelled(ffs)) {
328		case FFS_SETUP_CANCELLED:
329			ret = -EIDRM;
330			goto done_spin;
331
332		case FFS_NO_SETUP:
333			ret = -ESRCH;
334			goto done_spin;
335
336		case FFS_SETUP_PENDING:
337			break;
338		}
339
340		/* FFS_SETUP_PENDING */
341		if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
342			spin_unlock_irq(&ffs->ev.waitq.lock);
343			ret = __ffs_ep0_stall(ffs);
344			break;
345		}
346
347		/* FFS_SETUP_PENDING and not stall */
348		len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
349
350		spin_unlock_irq(&ffs->ev.waitq.lock);
351
352		data = ffs_prepare_buffer(buf, len);
353		if (IS_ERR(data)) {
354			ret = PTR_ERR(data);
355			break;
356		}
357
358		spin_lock_irq(&ffs->ev.waitq.lock);
359
360		/*
361		 * We are guaranteed to be still in FFS_ACTIVE state
362		 * but the state of setup could have changed from
363		 * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
364		 * to check for that.  If that happened we copied data
365		 * from user space in vain but it's unlikely.
366		 *
367		 * For sure we are not in FFS_NO_SETUP since this is
368		 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
369		 * transition can be performed and it's protected by
370		 * mutex.
371		 */
372		if (ffs_setup_state_clear_cancelled(ffs) ==
373		    FFS_SETUP_CANCELLED) {
374			ret = -EIDRM;
375done_spin:
376			spin_unlock_irq(&ffs->ev.waitq.lock);
377		} else {
378			/* unlocks spinlock */
379			ret = __ffs_ep0_queue_wait(ffs, data, len);
380		}
381		kfree(data);
382		break;
383
384	default:
385		ret = -EBADFD;
386		break;
387	}
388
389	mutex_unlock(&ffs->mutex);
390	return ret;
391}
392
393static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
394				     size_t n)
395{
396	/*
397	 * We are holding ffs->ev.waitq.lock and ffs->mutex and we need
398	 * to release them.
399	 */
400	struct usb_functionfs_event events[n];
401	unsigned i = 0;
402
403	memset(events, 0, sizeof events);
404
405	do {
406		events[i].type = ffs->ev.types[i];
407		if (events[i].type == FUNCTIONFS_SETUP) {
408			events[i].u.setup = ffs->ev.setup;
409			ffs->setup_state = FFS_SETUP_PENDING;
410		}
411	} while (++i < n);
412
413	if (n < ffs->ev.count) {
414		ffs->ev.count -= n;
415		memmove(ffs->ev.types, ffs->ev.types + n,
416			ffs->ev.count * sizeof *ffs->ev.types);
417	} else {
418		ffs->ev.count = 0;
419	}
420
421	spin_unlock_irq(&ffs->ev.waitq.lock);
422	mutex_unlock(&ffs->mutex);
423
424	return unlikely(__copy_to_user(buf, events, sizeof events))
425		? -EFAULT : sizeof events;
426}
427
428static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
429			    size_t len, loff_t *ptr)
430{
431	struct ffs_data *ffs = file->private_data;
432	char *data = NULL;
433	size_t n;
434	int ret;
435
436	ENTER();
437
438	/* Fast check if setup was canceled */
439	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
440		return -EIDRM;
441
442	/* Acquire mutex */
443	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
444	if (unlikely(ret < 0))
445		return ret;
446
447	/* Check state */
448	if (ffs->state != FFS_ACTIVE) {
449		ret = -EBADFD;
450		goto done_mutex;
451	}
452
453	/*
454	 * We're called from user space, we can use _irq rather then
455	 * _irqsave
456	 */
457	spin_lock_irq(&ffs->ev.waitq.lock);
458
459	switch (ffs_setup_state_clear_cancelled(ffs)) {
460	case FFS_SETUP_CANCELLED:
461		ret = -EIDRM;
462		break;
463
464	case FFS_NO_SETUP:
465		n = len / sizeof(struct usb_functionfs_event);
466		if (unlikely(!n)) {
467			ret = -EINVAL;
468			break;
469		}
470
471		if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
472			ret = -EAGAIN;
473			break;
474		}
475
476		if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
477							ffs->ev.count)) {
478			ret = -EINTR;
479			break;
480		}
481
482		return __ffs_ep0_read_events(ffs, buf,
483					     min(n, (size_t)ffs->ev.count));
484
485	case FFS_SETUP_PENDING:
486		if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
487			spin_unlock_irq(&ffs->ev.waitq.lock);
488			ret = __ffs_ep0_stall(ffs);
489			goto done_mutex;
490		}
491
492		len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
493
494		spin_unlock_irq(&ffs->ev.waitq.lock);
495
496		if (likely(len)) {
497			data = kmalloc(len, GFP_KERNEL);
498			if (unlikely(!data)) {
499				ret = -ENOMEM;
500				goto done_mutex;
501			}
502		}
503
504		spin_lock_irq(&ffs->ev.waitq.lock);
505
506		/* See ffs_ep0_write() */
507		if (ffs_setup_state_clear_cancelled(ffs) ==
508		    FFS_SETUP_CANCELLED) {
509			ret = -EIDRM;
510			break;
511		}
512
513		/* unlocks spinlock */
514		ret = __ffs_ep0_queue_wait(ffs, data, len);
515		if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
516			ret = -EFAULT;
517		goto done_mutex;
518
519	default:
520		ret = -EBADFD;
521		break;
522	}
523
524	spin_unlock_irq(&ffs->ev.waitq.lock);
525done_mutex:
526	mutex_unlock(&ffs->mutex);
527	kfree(data);
528	return ret;
529}
530
531static int ffs_ep0_open(struct inode *inode, struct file *file)
532{
533	struct ffs_data *ffs = inode->i_private;
534
535	ENTER();
536
537	if (unlikely(ffs->state == FFS_CLOSING))
538		return -EBUSY;
539
540	file->private_data = ffs;
541	ffs_data_opened(ffs);
542
543	return 0;
544}
545
546static int ffs_ep0_release(struct inode *inode, struct file *file)
547{
548	struct ffs_data *ffs = file->private_data;
549
550	ENTER();
551
552	ffs_data_closed(ffs);
553
554	return 0;
555}
556
557static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
558{
559	struct ffs_data *ffs = file->private_data;
560	struct usb_gadget *gadget = ffs->gadget;
561	long ret;
562
563	ENTER();
564
565	if (code == FUNCTIONFS_INTERFACE_REVMAP) {
566		struct ffs_function *func = ffs->func;
567		ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
568	} else if (gadget && gadget->ops->ioctl) {
569		ret = gadget->ops->ioctl(gadget, code, value);
570	} else {
571		ret = -ENOTTY;
572	}
573
574	return ret;
575}
576
577static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
578{
579	struct ffs_data *ffs = file->private_data;
580	unsigned int mask = POLLWRNORM;
581	int ret;
582
583	poll_wait(file, &ffs->ev.waitq, wait);
584
585	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
586	if (unlikely(ret < 0))
587		return mask;
588
589	switch (ffs->state) {
590	case FFS_READ_DESCRIPTORS:
591	case FFS_READ_STRINGS:
592		mask |= POLLOUT;
593		break;
594
595	case FFS_ACTIVE:
596		switch (ffs->setup_state) {
597		case FFS_NO_SETUP:
598			if (ffs->ev.count)
599				mask |= POLLIN;
600			break;
601
602		case FFS_SETUP_PENDING:
603		case FFS_SETUP_CANCELLED:
604			mask |= (POLLIN | POLLOUT);
605			break;
606		}
607	case FFS_CLOSING:
608		break;
609	}
610
611	mutex_unlock(&ffs->mutex);
612
613	return mask;
614}
615
616static const struct file_operations ffs_ep0_operations = {
617	.llseek =	no_llseek,
618
619	.open =		ffs_ep0_open,
620	.write =	ffs_ep0_write,
621	.read =		ffs_ep0_read,
622	.release =	ffs_ep0_release,
623	.unlocked_ioctl =	ffs_ep0_ioctl,
624	.poll =		ffs_ep0_poll,
625};
626
627
628/* "Normal" endpoints operations ********************************************/
629
630static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
631{
632	ENTER();
633	if (likely(req->context)) {
634		struct ffs_ep *ep = _ep->driver_data;
635		ep->status = req->status ? req->status : req->actual;
636		complete(req->context);
637	}
638}
639
640static void ffs_user_copy_worker(struct work_struct *work)
641{
642	struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
643						   work);
644	int ret = io_data->req->status ? io_data->req->status :
645					 io_data->req->actual;
646
647	if (io_data->read && ret > 0) {
648		int i;
649		size_t pos = 0;
650
651		/*
652		 * Since req->length may be bigger than io_data->len (after
653		 * being rounded up to maxpacketsize), we may end up with more
654		 * data then user space has space for.
655		 */
656		ret = min_t(int, ret, io_data->len);
657
658		use_mm(io_data->mm);
659		for (i = 0; i < io_data->nr_segs; i++) {
660			size_t len = min_t(size_t, ret - pos,
661					io_data->iovec[i].iov_len);
662			if (!len)
663				break;
664			if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
665						 &io_data->buf[pos], len))) {
666				ret = -EFAULT;
667				break;
668			}
669			pos += len;
670		}
671		unuse_mm(io_data->mm);
672	}
673
674	aio_complete(io_data->kiocb, ret, ret);
675
676	usb_ep_free_request(io_data->ep, io_data->req);
677
678	io_data->kiocb->private = NULL;
679	if (io_data->read)
680		kfree(io_data->iovec);
681	kfree(io_data->buf);
682	kfree(io_data);
683}
684
685static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
686					 struct usb_request *req)
687{
688	struct ffs_io_data *io_data = req->context;
689
690	ENTER();
691
692	INIT_WORK(&io_data->work, ffs_user_copy_worker);
693	schedule_work(&io_data->work);
694}
695
696static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
697{
698	struct ffs_epfile *epfile = file->private_data;
699	struct ffs_ep *ep;
700	char *data = NULL;
701	ssize_t ret, data_len = -EINVAL;
702	int halt;
703
704	/* Are we still active? */
705	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
706		ret = -ENODEV;
707		goto error;
708	}
709
710	/* Wait for endpoint to be enabled */
711	ep = epfile->ep;
712	if (!ep) {
713		if (file->f_flags & O_NONBLOCK) {
714			ret = -EAGAIN;
715			goto error;
716		}
717
718		ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
719		if (ret) {
720			ret = -EINTR;
721			goto error;
722		}
723	}
724
725	/* Do we halt? */
726	halt = (!io_data->read == !epfile->in);
727	if (halt && epfile->isoc) {
728		ret = -EINVAL;
729		goto error;
730	}
731
732	/* Allocate & copy */
733	if (!halt) {
734		/*
735		 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
736		 * before the waiting completes, so do not assign to 'gadget' earlier
737		 */
738		struct usb_gadget *gadget = epfile->ffs->gadget;
739
740		spin_lock_irq(&epfile->ffs->eps_lock);
741		/* In the meantime, endpoint got disabled or changed. */
742		if (epfile->ep != ep) {
743			spin_unlock_irq(&epfile->ffs->eps_lock);
744			return -ESHUTDOWN;
745		}
746		/*
747		 * Controller may require buffer size to be aligned to
748		 * maxpacketsize of an out endpoint.
749		 */
750		data_len = io_data->read ?
751			   usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
752			   io_data->len;
753		spin_unlock_irq(&epfile->ffs->eps_lock);
754
755		data = kmalloc(data_len, GFP_KERNEL);
756		if (unlikely(!data))
757			return -ENOMEM;
758		if (io_data->aio && !io_data->read) {
759			int i;
760			size_t pos = 0;
761			for (i = 0; i < io_data->nr_segs; i++) {
762				if (unlikely(copy_from_user(&data[pos],
763					     io_data->iovec[i].iov_base,
764					     io_data->iovec[i].iov_len))) {
765					ret = -EFAULT;
766					goto error;
767				}
768				pos += io_data->iovec[i].iov_len;
769			}
770		} else {
771			if (!io_data->read &&
772			    unlikely(__copy_from_user(data, io_data->buf,
773						      io_data->len))) {
774				ret = -EFAULT;
775				goto error;
776			}
777		}
778	}
779
780	/* We will be using request */
781	ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
782	if (unlikely(ret))
783		goto error;
784
785	spin_lock_irq(&epfile->ffs->eps_lock);
786
787	if (epfile->ep != ep) {
788		/* In the meantime, endpoint got disabled or changed. */
789		ret = -ESHUTDOWN;
790		spin_unlock_irq(&epfile->ffs->eps_lock);
791	} else if (halt) {
792		/* Halt */
793		if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
794			usb_ep_set_halt(ep->ep);
795		spin_unlock_irq(&epfile->ffs->eps_lock);
796		ret = -EBADMSG;
797	} else {
798		/* Fire the request */
799		struct usb_request *req;
800
801		/*
802		 * Sanity Check: even though data_len can't be used
803		 * uninitialized at the time I write this comment, some
804		 * compilers complain about this situation.
805		 * In order to keep the code clean from warnings, data_len is
806		 * being initialized to -EINVAL during its declaration, which
807		 * means we can't rely on compiler anymore to warn no future
808		 * changes won't result in data_len being used uninitialized.
809		 * For such reason, we're adding this redundant sanity check
810		 * here.
811		 */
812		if (unlikely(data_len == -EINVAL)) {
813			WARN(1, "%s: data_len == -EINVAL\n", __func__);
814			ret = -EINVAL;
815			goto error_lock;
816		}
817
818		if (io_data->aio) {
819			req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
820			if (unlikely(!req))
821				goto error_lock;
822
823			req->buf      = data;
824			req->length   = data_len;
825
826			io_data->buf = data;
827			io_data->ep = ep->ep;
828			io_data->req = req;
829
830			req->context  = io_data;
831			req->complete = ffs_epfile_async_io_complete;
832
833			ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
834			if (unlikely(ret)) {
835				usb_ep_free_request(ep->ep, req);
836				goto error_lock;
837			}
838			ret = -EIOCBQUEUED;
839
840			spin_unlock_irq(&epfile->ffs->eps_lock);
841		} else {
842			DECLARE_COMPLETION_ONSTACK(done);
843
844			req = ep->req;
845			req->buf      = data;
846			req->length   = data_len;
847
848			req->context  = &done;
849			req->complete = ffs_epfile_io_complete;
850
851			ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
852
853			spin_unlock_irq(&epfile->ffs->eps_lock);
854
855			if (unlikely(ret < 0)) {
856				/* nop */
857			} else if (unlikely(
858				   wait_for_completion_interruptible(&done))) {
859				ret = -EINTR;
860				usb_ep_dequeue(ep->ep, req);
861			} else {
862				/*
863				 * XXX We may end up silently droping data
864				 * here.  Since data_len (i.e. req->length) may
865				 * be bigger than len (after being rounded up
866				 * to maxpacketsize), we may end up with more
867				 * data then user space has space for.
868				 */
869				ret = ep->status;
870				if (io_data->read && ret > 0) {
871					ret = min_t(size_t, ret, io_data->len);
872
873					if (unlikely(copy_to_user(io_data->buf,
874						data, ret)))
875						ret = -EFAULT;
876				}
877			}
878			kfree(data);
879		}
880	}
881
882	mutex_unlock(&epfile->mutex);
883	return ret;
884
885error_lock:
886	spin_unlock_irq(&epfile->ffs->eps_lock);
887	mutex_unlock(&epfile->mutex);
888error:
889	kfree(data);
890	return ret;
891}
892
893static ssize_t
894ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
895		 loff_t *ptr)
896{
897	struct ffs_io_data io_data;
898
899	ENTER();
900
901	io_data.aio = false;
902	io_data.read = false;
903	io_data.buf = (char * __user)buf;
904	io_data.len = len;
905
906	return ffs_epfile_io(file, &io_data);
907}
908
909static ssize_t
910ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
911{
912	struct ffs_io_data io_data;
913
914	ENTER();
915
916	io_data.aio = false;
917	io_data.read = true;
918	io_data.buf = buf;
919	io_data.len = len;
920
921	return ffs_epfile_io(file, &io_data);
922}
923
924static int
925ffs_epfile_open(struct inode *inode, struct file *file)
926{
927	struct ffs_epfile *epfile = inode->i_private;
928
929	ENTER();
930
931	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
932		return -ENODEV;
933
934	file->private_data = epfile;
935	ffs_data_opened(epfile->ffs);
936
937	return 0;
938}
939
940static int ffs_aio_cancel(struct kiocb *kiocb)
941{
942	struct ffs_io_data *io_data = kiocb->private;
943	struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
944	int value;
945
946	ENTER();
947
948	spin_lock_irq(&epfile->ffs->eps_lock);
949
950	if (likely(io_data && io_data->ep && io_data->req))
951		value = usb_ep_dequeue(io_data->ep, io_data->req);
952	else
953		value = -EINVAL;
954
955	spin_unlock_irq(&epfile->ffs->eps_lock);
956
957	return value;
958}
959
960static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb,
961				    const struct iovec *iovec,
962				    unsigned long nr_segs, loff_t loff)
963{
964	struct ffs_io_data *io_data;
965
966	ENTER();
967
968	io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
969	if (unlikely(!io_data))
970		return -ENOMEM;
971
972	io_data->aio = true;
973	io_data->read = false;
974	io_data->kiocb = kiocb;
975	io_data->iovec = iovec;
976	io_data->nr_segs = nr_segs;
977	io_data->len = kiocb->ki_nbytes;
978	io_data->mm = current->mm;
979
980	kiocb->private = io_data;
981
982	kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
983
984	return ffs_epfile_io(kiocb->ki_filp, io_data);
985}
986
987static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb,
988				   const struct iovec *iovec,
989				   unsigned long nr_segs, loff_t loff)
990{
991	struct ffs_io_data *io_data;
992	struct iovec *iovec_copy;
993
994	ENTER();
995
996	iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL);
997	if (unlikely(!iovec_copy))
998		return -ENOMEM;
999
1000	memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
1001
1002	io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
1003	if (unlikely(!io_data)) {
1004		kfree(iovec_copy);
1005		return -ENOMEM;
1006	}
1007
1008	io_data->aio = true;
1009	io_data->read = true;
1010	io_data->kiocb = kiocb;
1011	io_data->iovec = iovec_copy;
1012	io_data->nr_segs = nr_segs;
1013	io_data->len = kiocb->ki_nbytes;
1014	io_data->mm = current->mm;
1015
1016	kiocb->private = io_data;
1017
1018	kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
1019
1020	return ffs_epfile_io(kiocb->ki_filp, io_data);
1021}
1022
1023static int
1024ffs_epfile_release(struct inode *inode, struct file *file)
1025{
1026	struct ffs_epfile *epfile = inode->i_private;
1027
1028	ENTER();
1029
1030	ffs_data_closed(epfile->ffs);
1031
1032	return 0;
1033}
1034
1035static long ffs_epfile_ioctl(struct file *file, unsigned code,
1036			     unsigned long value)
1037{
1038	struct ffs_epfile *epfile = file->private_data;
1039	int ret;
1040
1041	ENTER();
1042
1043	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1044		return -ENODEV;
1045
1046	spin_lock_irq(&epfile->ffs->eps_lock);
1047	if (likely(epfile->ep)) {
1048		switch (code) {
1049		case FUNCTIONFS_FIFO_STATUS:
1050			ret = usb_ep_fifo_status(epfile->ep->ep);
1051			break;
1052		case FUNCTIONFS_FIFO_FLUSH:
1053			usb_ep_fifo_flush(epfile->ep->ep);
1054			ret = 0;
1055			break;
1056		case FUNCTIONFS_CLEAR_HALT:
1057			ret = usb_ep_clear_halt(epfile->ep->ep);
1058			break;
1059		case FUNCTIONFS_ENDPOINT_REVMAP:
1060			ret = epfile->ep->num;
1061			break;
1062		case FUNCTIONFS_ENDPOINT_DESC:
1063		{
1064			int desc_idx;
1065			struct usb_endpoint_descriptor *desc;
1066
1067			switch (epfile->ffs->gadget->speed) {
1068			case USB_SPEED_SUPER:
1069				desc_idx = 2;
1070				break;
1071			case USB_SPEED_HIGH:
1072				desc_idx = 1;
1073				break;
1074			default:
1075				desc_idx = 0;
1076			}
1077			desc = epfile->ep->descs[desc_idx];
1078
1079			spin_unlock_irq(&epfile->ffs->eps_lock);
1080			ret = copy_to_user((void *)value, desc, sizeof(*desc));
1081			if (ret)
1082				ret = -EFAULT;
1083			return ret;
1084		}
1085		default:
1086			ret = -ENOTTY;
1087		}
1088	} else {
1089		ret = -ENODEV;
1090	}
1091	spin_unlock_irq(&epfile->ffs->eps_lock);
1092
1093	return ret;
1094}
1095
1096static const struct file_operations ffs_epfile_operations = {
1097	.llseek =	no_llseek,
1098
1099	.open =		ffs_epfile_open,
1100	.write =	ffs_epfile_write,
1101	.read =		ffs_epfile_read,
1102	.aio_write =	ffs_epfile_aio_write,
1103	.aio_read =	ffs_epfile_aio_read,
1104	.release =	ffs_epfile_release,
1105	.unlocked_ioctl =	ffs_epfile_ioctl,
1106};
1107
1108
1109/* File system and super block operations ***********************************/
1110
1111/*
1112 * Mounting the file system creates a controller file, used first for
1113 * function configuration then later for event monitoring.
1114 */
1115
1116static struct inode *__must_check
1117ffs_sb_make_inode(struct super_block *sb, void *data,
1118		  const struct file_operations *fops,
1119		  const struct inode_operations *iops,
1120		  struct ffs_file_perms *perms)
1121{
1122	struct inode *inode;
1123
1124	ENTER();
1125
1126	inode = new_inode(sb);
1127
1128	if (likely(inode)) {
1129		struct timespec current_time = CURRENT_TIME;
1130
1131		inode->i_ino	 = get_next_ino();
1132		inode->i_mode    = perms->mode;
1133		inode->i_uid     = perms->uid;
1134		inode->i_gid     = perms->gid;
1135		inode->i_atime   = current_time;
1136		inode->i_mtime   = current_time;
1137		inode->i_ctime   = current_time;
1138		inode->i_private = data;
1139		if (fops)
1140			inode->i_fop = fops;
1141		if (iops)
1142			inode->i_op  = iops;
1143	}
1144
1145	return inode;
1146}
1147
1148/* Create "regular" file */
1149static struct dentry *ffs_sb_create_file(struct super_block *sb,
1150					const char *name, void *data,
1151					const struct file_operations *fops)
1152{
1153	struct ffs_data	*ffs = sb->s_fs_info;
1154	struct dentry	*dentry;
1155	struct inode	*inode;
1156
1157	ENTER();
1158
1159	dentry = d_alloc_name(sb->s_root, name);
1160	if (unlikely(!dentry))
1161		return NULL;
1162
1163	inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
1164	if (unlikely(!inode)) {
1165		dput(dentry);
1166		return NULL;
1167	}
1168
1169	d_add(dentry, inode);
1170	return dentry;
1171}
1172
1173/* Super block */
1174static const struct super_operations ffs_sb_operations = {
1175	.statfs =	simple_statfs,
1176	.drop_inode =	generic_delete_inode,
1177};
1178
1179struct ffs_sb_fill_data {
1180	struct ffs_file_perms perms;
1181	umode_t root_mode;
1182	const char *dev_name;
1183	struct ffs_data *ffs_data;
1184};
1185
1186static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1187{
1188	struct ffs_sb_fill_data *data = _data;
1189	struct inode	*inode;
1190	struct ffs_data	*ffs = data->ffs_data;
1191
1192	ENTER();
1193
1194	ffs->sb              = sb;
1195	data->ffs_data       = NULL;
1196	sb->s_fs_info        = ffs;
1197	sb->s_blocksize      = PAGE_CACHE_SIZE;
1198	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1199	sb->s_magic          = FUNCTIONFS_MAGIC;
1200	sb->s_op             = &ffs_sb_operations;
1201	sb->s_time_gran      = 1;
1202
1203	/* Root inode */
1204	data->perms.mode = data->root_mode;
1205	inode = ffs_sb_make_inode(sb, NULL,
1206				  &simple_dir_operations,
1207				  &simple_dir_inode_operations,
1208				  &data->perms);
1209	sb->s_root = d_make_root(inode);
1210	if (unlikely(!sb->s_root))
1211		return -ENOMEM;
1212
1213	/* EP0 file */
1214	if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
1215					 &ffs_ep0_operations)))
1216		return -ENOMEM;
1217
1218	return 0;
1219}
1220
1221static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1222{
1223	ENTER();
1224
1225	if (!opts || !*opts)
1226		return 0;
1227
1228	for (;;) {
1229		unsigned long value;
1230		char *eq, *comma;
1231
1232		/* Option limit */
1233		comma = strchr(opts, ',');
1234		if (comma)
1235			*comma = 0;
1236
1237		/* Value limit */
1238		eq = strchr(opts, '=');
1239		if (unlikely(!eq)) {
1240			pr_err("'=' missing in %s\n", opts);
1241			return -EINVAL;
1242		}
1243		*eq = 0;
1244
1245		/* Parse value */
1246		if (kstrtoul(eq + 1, 0, &value)) {
1247			pr_err("%s: invalid value: %s\n", opts, eq + 1);
1248			return -EINVAL;
1249		}
1250
1251		/* Interpret option */
1252		switch (eq - opts) {
1253		case 5:
1254			if (!memcmp(opts, "rmode", 5))
1255				data->root_mode  = (value & 0555) | S_IFDIR;
1256			else if (!memcmp(opts, "fmode", 5))
1257				data->perms.mode = (value & 0666) | S_IFREG;
1258			else
1259				goto invalid;
1260			break;
1261
1262		case 4:
1263			if (!memcmp(opts, "mode", 4)) {
1264				data->root_mode  = (value & 0555) | S_IFDIR;
1265				data->perms.mode = (value & 0666) | S_IFREG;
1266			} else {
1267				goto invalid;
1268			}
1269			break;
1270
1271		case 3:
1272			if (!memcmp(opts, "uid", 3)) {
1273				data->perms.uid = make_kuid(current_user_ns(), value);
1274				if (!uid_valid(data->perms.uid)) {
1275					pr_err("%s: unmapped value: %lu\n", opts, value);
1276					return -EINVAL;
1277				}
1278			} else if (!memcmp(opts, "gid", 3)) {
1279				data->perms.gid = make_kgid(current_user_ns(), value);
1280				if (!gid_valid(data->perms.gid)) {
1281					pr_err("%s: unmapped value: %lu\n", opts, value);
1282					return -EINVAL;
1283				}
1284			} else {
1285				goto invalid;
1286			}
1287			break;
1288
1289		default:
1290invalid:
1291			pr_err("%s: invalid option\n", opts);
1292			return -EINVAL;
1293		}
1294
1295		/* Next iteration */
1296		if (!comma)
1297			break;
1298		opts = comma + 1;
1299	}
1300
1301	return 0;
1302}
1303
1304/* "mount -t functionfs dev_name /dev/function" ends up here */
1305
1306static struct dentry *
1307ffs_fs_mount(struct file_system_type *t, int flags,
1308	      const char *dev_name, void *opts)
1309{
1310	struct ffs_sb_fill_data data = {
1311		.perms = {
1312			.mode = S_IFREG | 0600,
1313			.uid = GLOBAL_ROOT_UID,
1314			.gid = GLOBAL_ROOT_GID,
1315		},
1316		.root_mode = S_IFDIR | 0500,
1317	};
1318	struct dentry *rv;
1319	int ret;
1320	void *ffs_dev;
1321	struct ffs_data	*ffs;
1322
1323	ENTER();
1324
1325	ret = ffs_fs_parse_opts(&data, opts);
1326	if (unlikely(ret < 0))
1327		return ERR_PTR(ret);
1328
1329	ffs = ffs_data_new();
1330	if (unlikely(!ffs))
1331		return ERR_PTR(-ENOMEM);
1332	ffs->file_perms = data.perms;
1333
1334	ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
1335	if (unlikely(!ffs->dev_name)) {
1336		ffs_data_put(ffs);
1337		return ERR_PTR(-ENOMEM);
1338	}
1339
1340	ffs_dev = ffs_acquire_dev(dev_name);
1341	if (IS_ERR(ffs_dev)) {
1342		ffs_data_put(ffs);
1343		return ERR_CAST(ffs_dev);
1344	}
1345	ffs->private_data = ffs_dev;
1346	data.ffs_data = ffs;
1347
1348	rv = mount_nodev(t, flags, &data, ffs_sb_fill);
1349	if (IS_ERR(rv) && data.ffs_data) {
1350		ffs_release_dev(data.ffs_data);
1351		ffs_data_put(data.ffs_data);
1352	}
1353	return rv;
1354}
1355
1356static void
1357ffs_fs_kill_sb(struct super_block *sb)
1358{
1359	ENTER();
1360
1361	kill_litter_super(sb);
1362	if (sb->s_fs_info) {
1363		ffs_release_dev(sb->s_fs_info);
1364		ffs_data_put(sb->s_fs_info);
1365	}
1366}
1367
1368static struct file_system_type ffs_fs_type = {
1369	.owner		= THIS_MODULE,
1370	.name		= "functionfs",
1371	.mount		= ffs_fs_mount,
1372	.kill_sb	= ffs_fs_kill_sb,
1373};
1374MODULE_ALIAS_FS("functionfs");
1375
1376
1377/* Driver's main init/cleanup functions *************************************/
1378
1379static int functionfs_init(void)
1380{
1381	int ret;
1382
1383	ENTER();
1384
1385	ret = register_filesystem(&ffs_fs_type);
1386	if (likely(!ret))
1387		pr_info("file system registered\n");
1388	else
1389		pr_err("failed registering file system (%d)\n", ret);
1390
1391	return ret;
1392}
1393
1394static void functionfs_cleanup(void)
1395{
1396	ENTER();
1397
1398	pr_info("unloading\n");
1399	unregister_filesystem(&ffs_fs_type);
1400}
1401
1402
1403/* ffs_data and ffs_function construction and destruction code **************/
1404
1405static void ffs_data_clear(struct ffs_data *ffs);
1406static void ffs_data_reset(struct ffs_data *ffs);
1407
1408static void ffs_data_get(struct ffs_data *ffs)
1409{
1410	ENTER();
1411
1412	atomic_inc(&ffs->ref);
1413}
1414
1415static void ffs_data_opened(struct ffs_data *ffs)
1416{
1417	ENTER();
1418
1419	atomic_inc(&ffs->ref);
1420	atomic_inc(&ffs->opened);
1421}
1422
1423static void ffs_data_put(struct ffs_data *ffs)
1424{
1425	ENTER();
1426
1427	if (unlikely(atomic_dec_and_test(&ffs->ref))) {
1428		pr_info("%s(): freeing\n", __func__);
1429		ffs_data_clear(ffs);
1430		BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
1431		       waitqueue_active(&ffs->ep0req_completion.wait));
1432		kfree(ffs->dev_name);
1433		kfree(ffs);
1434	}
1435}
1436
1437static void ffs_data_closed(struct ffs_data *ffs)
1438{
1439	ENTER();
1440
1441	if (atomic_dec_and_test(&ffs->opened)) {
1442		ffs->state = FFS_CLOSING;
1443		ffs_data_reset(ffs);
1444	}
1445
1446	ffs_data_put(ffs);
1447}
1448
1449static struct ffs_data *ffs_data_new(void)
1450{
1451	struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1452	if (unlikely(!ffs))
1453		return NULL;
1454
1455	ENTER();
1456
1457	atomic_set(&ffs->ref, 1);
1458	atomic_set(&ffs->opened, 0);
1459	ffs->state = FFS_READ_DESCRIPTORS;
1460	mutex_init(&ffs->mutex);
1461	spin_lock_init(&ffs->eps_lock);
1462	init_waitqueue_head(&ffs->ev.waitq);
1463	init_completion(&ffs->ep0req_completion);
1464
1465	/* XXX REVISIT need to update it in some places, or do we? */
1466	ffs->ev.can_stall = 1;
1467
1468	return ffs;
1469}
1470
1471static void ffs_data_clear(struct ffs_data *ffs)
1472{
1473	ENTER();
1474
1475	if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
1476		ffs_closed(ffs);
1477
1478	BUG_ON(ffs->gadget);
1479
1480	if (ffs->epfiles)
1481		ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
1482
1483	kfree(ffs->raw_descs_data);
1484	kfree(ffs->raw_strings);
1485	kfree(ffs->stringtabs);
1486}
1487
1488static void ffs_data_reset(struct ffs_data *ffs)
1489{
1490	ENTER();
1491
1492	ffs_data_clear(ffs);
1493
1494	ffs->epfiles = NULL;
1495	ffs->raw_descs_data = NULL;
1496	ffs->raw_descs = NULL;
1497	ffs->raw_strings = NULL;
1498	ffs->stringtabs = NULL;
1499
1500	ffs->raw_descs_length = 0;
1501	ffs->fs_descs_count = 0;
1502	ffs->hs_descs_count = 0;
1503	ffs->ss_descs_count = 0;
1504
1505	ffs->strings_count = 0;
1506	ffs->interfaces_count = 0;
1507	ffs->eps_count = 0;
1508
1509	ffs->ev.count = 0;
1510
1511	ffs->state = FFS_READ_DESCRIPTORS;
1512	ffs->setup_state = FFS_NO_SETUP;
1513	ffs->flags = 0;
1514}
1515
1516
1517static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
1518{
1519	struct usb_gadget_strings **lang;
1520	int first_id;
1521
1522	ENTER();
1523
1524	if (WARN_ON(ffs->state != FFS_ACTIVE
1525		 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
1526		return -EBADFD;
1527
1528	first_id = usb_string_ids_n(cdev, ffs->strings_count);
1529	if (unlikely(first_id < 0))
1530		return first_id;
1531
1532	ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
1533	if (unlikely(!ffs->ep0req))
1534		return -ENOMEM;
1535	ffs->ep0req->complete = ffs_ep0_complete;
1536	ffs->ep0req->context = ffs;
1537
1538	lang = ffs->stringtabs;
1539	if (lang) {
1540		for (; *lang; ++lang) {
1541			struct usb_string *str = (*lang)->strings;
1542			int id = first_id;
1543			for (; str->s; ++id, ++str)
1544				str->id = id;
1545		}
1546	}
1547
1548	ffs->gadget = cdev->gadget;
1549	ffs_data_get(ffs);
1550	return 0;
1551}
1552
1553static void functionfs_unbind(struct ffs_data *ffs)
1554{
1555	ENTER();
1556
1557	if (!WARN_ON(!ffs->gadget)) {
1558		usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
1559		ffs->ep0req = NULL;
1560		ffs->gadget = NULL;
1561		clear_bit(FFS_FL_BOUND, &ffs->flags);
1562		ffs_data_put(ffs);
1563	}
1564}
1565
1566static int ffs_epfiles_create(struct ffs_data *ffs)
1567{
1568	struct ffs_epfile *epfile, *epfiles;
1569	unsigned i, count;
1570
1571	ENTER();
1572
1573	count = ffs->eps_count;
1574	epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
1575	if (!epfiles)
1576		return -ENOMEM;
1577
1578	epfile = epfiles;
1579	for (i = 1; i <= count; ++i, ++epfile) {
1580		epfile->ffs = ffs;
1581		mutex_init(&epfile->mutex);
1582		init_waitqueue_head(&epfile->wait);
1583		if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
1584			sprintf(epfiles->name, "ep%02x", ffs->eps_addrmap[i]);
1585		else
1586			sprintf(epfiles->name, "ep%u", i);
1587		epfile->dentry = ffs_sb_create_file(ffs->sb, epfiles->name,
1588						 epfile,
1589						 &ffs_epfile_operations);
1590		if (unlikely(!epfile->dentry)) {
1591			ffs_epfiles_destroy(epfiles, i - 1);
1592			return -ENOMEM;
1593		}
1594	}
1595
1596	ffs->epfiles = epfiles;
1597	return 0;
1598}
1599
1600static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
1601{
1602	struct ffs_epfile *epfile = epfiles;
1603
1604	ENTER();
1605
1606	for (; count; --count, ++epfile) {
1607		BUG_ON(mutex_is_locked(&epfile->mutex) ||
1608		       waitqueue_active(&epfile->wait));
1609		if (epfile->dentry) {
1610			d_delete(epfile->dentry);
1611			dput(epfile->dentry);
1612			epfile->dentry = NULL;
1613		}
1614	}
1615
1616	kfree(epfiles);
1617}
1618
1619
1620static void ffs_func_eps_disable(struct ffs_function *func)
1621{
1622	struct ffs_ep *ep         = func->eps;
1623	struct ffs_epfile *epfile = func->ffs->epfiles;
1624	unsigned count            = func->ffs->eps_count;
1625	unsigned long flags;
1626
1627	spin_lock_irqsave(&func->ffs->eps_lock, flags);
1628	do {
1629		/* pending requests get nuked */
1630		if (likely(ep->ep))
1631			usb_ep_disable(ep->ep);
1632		epfile->ep = NULL;
1633
1634		++ep;
1635		++epfile;
1636	} while (--count);
1637	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1638}
1639
1640static int ffs_func_eps_enable(struct ffs_function *func)
1641{
1642	struct ffs_data *ffs      = func->ffs;
1643	struct ffs_ep *ep         = func->eps;
1644	struct ffs_epfile *epfile = ffs->epfiles;
1645	unsigned count            = ffs->eps_count;
1646	unsigned long flags;
1647	int ret = 0;
1648
1649	spin_lock_irqsave(&func->ffs->eps_lock, flags);
1650	do {
1651		struct usb_endpoint_descriptor *ds;
1652		int desc_idx;
1653
1654		if (ffs->gadget->speed == USB_SPEED_SUPER)
1655			desc_idx = 2;
1656		else if (ffs->gadget->speed == USB_SPEED_HIGH)
1657			desc_idx = 1;
1658		else
1659			desc_idx = 0;
1660
1661		/* fall-back to lower speed if desc missing for current speed */
1662		do {
1663			ds = ep->descs[desc_idx];
1664		} while (!ds && --desc_idx >= 0);
1665
1666		if (!ds) {
1667			ret = -EINVAL;
1668			break;
1669		}
1670
1671		ep->ep->driver_data = ep;
1672		ep->ep->desc = ds;
1673		ret = usb_ep_enable(ep->ep);
1674		if (likely(!ret)) {
1675			epfile->ep = ep;
1676			epfile->in = usb_endpoint_dir_in(ds);
1677			epfile->isoc = usb_endpoint_xfer_isoc(ds);
1678		} else {
1679			break;
1680		}
1681
1682		wake_up(&epfile->wait);
1683
1684		++ep;
1685		++epfile;
1686	} while (--count);
1687	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1688
1689	return ret;
1690}
1691
1692
1693/* Parsing and building descriptors and strings *****************************/
1694
1695/*
1696 * This validates if data pointed by data is a valid USB descriptor as
1697 * well as record how many interfaces, endpoints and strings are
1698 * required by given configuration.  Returns address after the
1699 * descriptor or NULL if data is invalid.
1700 */
1701
1702enum ffs_entity_type {
1703	FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
1704};
1705
1706enum ffs_os_desc_type {
1707	FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
1708};
1709
1710typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
1711				   u8 *valuep,
1712				   struct usb_descriptor_header *desc,
1713				   void *priv);
1714
1715typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
1716				    struct usb_os_desc_header *h, void *data,
1717				    unsigned len, void *priv);
1718
1719static int __must_check ffs_do_single_desc(char *data, unsigned len,
1720					   ffs_entity_callback entity,
1721					   void *priv)
1722{
1723	struct usb_descriptor_header *_ds = (void *)data;
1724	u8 length;
1725	int ret;
1726
1727	ENTER();
1728
1729	/* At least two bytes are required: length and type */
1730	if (len < 2) {
1731		pr_vdebug("descriptor too short\n");
1732		return -EINVAL;
1733	}
1734
1735	/* If we have at least as many bytes as the descriptor takes? */
1736	length = _ds->bLength;
1737	if (len < length) {
1738		pr_vdebug("descriptor longer then available data\n");
1739		return -EINVAL;
1740	}
1741
1742#define __entity_check_INTERFACE(val)  1
1743#define __entity_check_STRING(val)     (val)
1744#define __entity_check_ENDPOINT(val)   ((val) & USB_ENDPOINT_NUMBER_MASK)
1745#define __entity(type, val) do {					\
1746		pr_vdebug("entity " #type "(%02x)\n", (val));		\
1747		if (unlikely(!__entity_check_ ##type(val))) {		\
1748			pr_vdebug("invalid entity's value\n");		\
1749			return -EINVAL;					\
1750		}							\
1751		ret = entity(FFS_ ##type, &val, _ds, priv);		\
1752		if (unlikely(ret < 0)) {				\
1753			pr_debug("entity " #type "(%02x); ret = %d\n",	\
1754				 (val), ret);				\
1755			return ret;					\
1756		}							\
1757	} while (0)
1758
1759	/* Parse descriptor depending on type. */
1760	switch (_ds->bDescriptorType) {
1761	case USB_DT_DEVICE:
1762	case USB_DT_CONFIG:
1763	case USB_DT_STRING:
1764	case USB_DT_DEVICE_QUALIFIER:
1765		/* function can't have any of those */
1766		pr_vdebug("descriptor reserved for gadget: %d\n",
1767		      _ds->bDescriptorType);
1768		return -EINVAL;
1769
1770	case USB_DT_INTERFACE: {
1771		struct usb_interface_descriptor *ds = (void *)_ds;
1772		pr_vdebug("interface descriptor\n");
1773		if (length != sizeof *ds)
1774			goto inv_length;
1775
1776		__entity(INTERFACE, ds->bInterfaceNumber);
1777		if (ds->iInterface)
1778			__entity(STRING, ds->iInterface);
1779	}
1780		break;
1781
1782	case USB_DT_ENDPOINT: {
1783		struct usb_endpoint_descriptor *ds = (void *)_ds;
1784		pr_vdebug("endpoint descriptor\n");
1785		if (length != USB_DT_ENDPOINT_SIZE &&
1786		    length != USB_DT_ENDPOINT_AUDIO_SIZE)
1787			goto inv_length;
1788		__entity(ENDPOINT, ds->bEndpointAddress);
1789	}
1790		break;
1791
1792	case HID_DT_HID:
1793		pr_vdebug("hid descriptor\n");
1794		if (length != sizeof(struct hid_descriptor))
1795			goto inv_length;
1796		break;
1797
1798	case USB_DT_OTG:
1799		if (length != sizeof(struct usb_otg_descriptor))
1800			goto inv_length;
1801		break;
1802
1803	case USB_DT_INTERFACE_ASSOCIATION: {
1804		struct usb_interface_assoc_descriptor *ds = (void *)_ds;
1805		pr_vdebug("interface association descriptor\n");
1806		if (length != sizeof *ds)
1807			goto inv_length;
1808		if (ds->iFunction)
1809			__entity(STRING, ds->iFunction);
1810	}
1811		break;
1812
1813	case USB_DT_SS_ENDPOINT_COMP:
1814		pr_vdebug("EP SS companion descriptor\n");
1815		if (length != sizeof(struct usb_ss_ep_comp_descriptor))
1816			goto inv_length;
1817		break;
1818
1819	case USB_DT_OTHER_SPEED_CONFIG:
1820	case USB_DT_INTERFACE_POWER:
1821	case USB_DT_DEBUG:
1822	case USB_DT_SECURITY:
1823	case USB_DT_CS_RADIO_CONTROL:
1824		/* TODO */
1825		pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
1826		return -EINVAL;
1827
1828	default:
1829		/* We should never be here */
1830		pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
1831		return -EINVAL;
1832
1833inv_length:
1834		pr_vdebug("invalid length: %d (descriptor %d)\n",
1835			  _ds->bLength, _ds->bDescriptorType);
1836		return -EINVAL;
1837	}
1838
1839#undef __entity
1840#undef __entity_check_DESCRIPTOR
1841#undef __entity_check_INTERFACE
1842#undef __entity_check_STRING
1843#undef __entity_check_ENDPOINT
1844
1845	return length;
1846}
1847
1848static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
1849				     ffs_entity_callback entity, void *priv)
1850{
1851	const unsigned _len = len;
1852	unsigned long num = 0;
1853
1854	ENTER();
1855
1856	for (;;) {
1857		int ret;
1858
1859		if (num == count)
1860			data = NULL;
1861
1862		/* Record "descriptor" entity */
1863		ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
1864		if (unlikely(ret < 0)) {
1865			pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
1866				 num, ret);
1867			return ret;
1868		}
1869
1870		if (!data)
1871			return _len - len;
1872
1873		ret = ffs_do_single_desc(data, len, entity, priv);
1874		if (unlikely(ret < 0)) {
1875			pr_debug("%s returns %d\n", __func__, ret);
1876			return ret;
1877		}
1878
1879		len -= ret;
1880		data += ret;
1881		++num;
1882	}
1883}
1884
1885static int __ffs_data_do_entity(enum ffs_entity_type type,
1886				u8 *valuep, struct usb_descriptor_header *desc,
1887				void *priv)
1888{
1889	struct ffs_desc_helper *helper = priv;
1890	struct usb_endpoint_descriptor *d;
1891
1892	ENTER();
1893
1894	switch (type) {
1895	case FFS_DESCRIPTOR:
1896		break;
1897
1898	case FFS_INTERFACE:
1899		/*
1900		 * Interfaces are indexed from zero so if we
1901		 * encountered interface "n" then there are at least
1902		 * "n+1" interfaces.
1903		 */
1904		if (*valuep >= helper->interfaces_count)
1905			helper->interfaces_count = *valuep + 1;
1906		break;
1907
1908	case FFS_STRING:
1909		/*
1910		 * Strings are indexed from 1 (0 is magic ;) reserved
1911		 * for languages list or some such)
1912		 */
1913		if (*valuep > helper->ffs->strings_count)
1914			helper->ffs->strings_count = *valuep;
1915		break;
1916
1917	case FFS_ENDPOINT:
1918		d = (void *)desc;
1919		helper->eps_count++;
1920		if (helper->eps_count >= 15)
1921			return -EINVAL;
1922		/* Check if descriptors for any speed were already parsed */
1923		if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
1924			helper->ffs->eps_addrmap[helper->eps_count] =
1925				d->bEndpointAddress;
1926		else if (helper->ffs->eps_addrmap[helper->eps_count] !=
1927				d->bEndpointAddress)
1928			return -EINVAL;
1929		break;
1930	}
1931
1932	return 0;
1933}
1934
1935static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
1936				   struct usb_os_desc_header *desc)
1937{
1938	u16 bcd_version = le16_to_cpu(desc->bcdVersion);
1939	u16 w_index = le16_to_cpu(desc->wIndex);
1940
1941	if (bcd_version != 1) {
1942		pr_vdebug("unsupported os descriptors version: %d",
1943			  bcd_version);
1944		return -EINVAL;
1945	}
1946	switch (w_index) {
1947	case 0x4:
1948		*next_type = FFS_OS_DESC_EXT_COMPAT;
1949		break;
1950	case 0x5:
1951		*next_type = FFS_OS_DESC_EXT_PROP;
1952		break;
1953	default:
1954		pr_vdebug("unsupported os descriptor type: %d", w_index);
1955		return -EINVAL;
1956	}
1957
1958	return sizeof(*desc);
1959}
1960
1961/*
1962 * Process all extended compatibility/extended property descriptors
1963 * of a feature descriptor
1964 */
1965static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
1966					      enum ffs_os_desc_type type,
1967					      u16 feature_count,
1968					      ffs_os_desc_callback entity,
1969					      void *priv,
1970					      struct usb_os_desc_header *h)
1971{
1972	int ret;
1973	const unsigned _len = len;
1974
1975	ENTER();
1976
1977	/* loop over all ext compat/ext prop descriptors */
1978	while (feature_count--) {
1979		ret = entity(type, h, data, len, priv);
1980		if (unlikely(ret < 0)) {
1981			pr_debug("bad OS descriptor, type: %d\n", type);
1982			return ret;
1983		}
1984		data += ret;
1985		len -= ret;
1986	}
1987	return _len - len;
1988}
1989
1990/* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
1991static int __must_check ffs_do_os_descs(unsigned count,
1992					char *data, unsigned len,
1993					ffs_os_desc_callback entity, void *priv)
1994{
1995	const unsigned _len = len;
1996	unsigned long num = 0;
1997
1998	ENTER();
1999
2000	for (num = 0; num < count; ++num) {
2001		int ret;
2002		enum ffs_os_desc_type type;
2003		u16 feature_count;
2004		struct usb_os_desc_header *desc = (void *)data;
2005
2006		if (len < sizeof(*desc))
2007			return -EINVAL;
2008
2009		/*
2010		 * Record "descriptor" entity.
2011		 * Process dwLength, bcdVersion, wIndex, get b/wCount.
2012		 * Move the data pointer to the beginning of extended
2013		 * compatibilities proper or extended properties proper
2014		 * portions of the data
2015		 */
2016		if (le32_to_cpu(desc->dwLength) > len)
2017			return -EINVAL;
2018
2019		ret = __ffs_do_os_desc_header(&type, desc);
2020		if (unlikely(ret < 0)) {
2021			pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
2022				 num, ret);
2023			return ret;
2024		}
2025		/*
2026		 * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
2027		 */
2028		feature_count = le16_to_cpu(desc->wCount);
2029		if (type == FFS_OS_DESC_EXT_COMPAT &&
2030		    (feature_count > 255 || desc->Reserved))
2031				return -EINVAL;
2032		len -= ret;
2033		data += ret;
2034
2035		/*
2036		 * Process all function/property descriptors
2037		 * of this Feature Descriptor
2038		 */
2039		ret = ffs_do_single_os_desc(data, len, type,
2040					    feature_count, entity, priv, desc);
2041		if (unlikely(ret < 0)) {
2042			pr_debug("%s returns %d\n", __func__, ret);
2043			return ret;
2044		}
2045
2046		len -= ret;
2047		data += ret;
2048	}
2049	return _len - len;
2050}
2051
2052/**
2053 * Validate contents of the buffer from userspace related to OS descriptors.
2054 */
2055static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2056				 struct usb_os_desc_header *h, void *data,
2057				 unsigned len, void *priv)
2058{
2059	struct ffs_data *ffs = priv;
2060	u8 length;
2061
2062	ENTER();
2063
2064	switch (type) {
2065	case FFS_OS_DESC_EXT_COMPAT: {
2066		struct usb_ext_compat_desc *d = data;
2067		int i;
2068
2069		if (len < sizeof(*d) ||
2070		    d->bFirstInterfaceNumber >= ffs->interfaces_count ||
2071		    d->Reserved1)
2072			return -EINVAL;
2073		for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2074			if (d->Reserved2[i])
2075				return -EINVAL;
2076
2077		length = sizeof(struct usb_ext_compat_desc);
2078	}
2079		break;
2080	case FFS_OS_DESC_EXT_PROP: {
2081		struct usb_ext_prop_desc *d = data;
2082		u32 type, pdl;
2083		u16 pnl;
2084
2085		if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
2086			return -EINVAL;
2087		length = le32_to_cpu(d->dwSize);
2088		type = le32_to_cpu(d->dwPropertyDataType);
2089		if (type < USB_EXT_PROP_UNICODE ||
2090		    type > USB_EXT_PROP_UNICODE_MULTI) {
2091			pr_vdebug("unsupported os descriptor property type: %d",
2092				  type);
2093			return -EINVAL;
2094		}
2095		pnl = le16_to_cpu(d->wPropertyNameLength);
2096		pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
2097		if (length != 14 + pnl + pdl) {
2098			pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
2099				  length, pnl, pdl, type);
2100			return -EINVAL;
2101		}
2102		++ffs->ms_os_descs_ext_prop_count;
2103		/* property name reported to the host as "WCHAR"s */
2104		ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
2105		ffs->ms_os_descs_ext_prop_data_len += pdl;
2106	}
2107		break;
2108	default:
2109		pr_vdebug("unknown descriptor: %d\n", type);
2110		return -EINVAL;
2111	}
2112	return length;
2113}
2114
2115static int __ffs_data_got_descs(struct ffs_data *ffs,
2116				char *const _data, size_t len)
2117{
2118	char *data = _data, *raw_descs;
2119	unsigned os_descs_count = 0, counts[3], flags;
2120	int ret = -EINVAL, i;
2121	struct ffs_desc_helper helper;
2122
2123	ENTER();
2124
2125	if (get_unaligned_le32(data + 4) != len)
2126		goto error;
2127
2128	switch (get_unaligned_le32(data)) {
2129	case FUNCTIONFS_DESCRIPTORS_MAGIC:
2130		flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
2131		data += 8;
2132		len  -= 8;
2133		break;
2134	case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
2135		flags = get_unaligned_le32(data + 8);
2136		ffs->user_flags = flags;
2137		if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
2138			      FUNCTIONFS_HAS_HS_DESC |
2139			      FUNCTIONFS_HAS_SS_DESC |
2140			      FUNCTIONFS_HAS_MS_OS_DESC |
2141			      FUNCTIONFS_VIRTUAL_ADDR)) {
2142			ret = -ENOSYS;
2143			goto error;
2144		}
2145		data += 12;
2146		len  -= 12;
2147		break;
2148	default:
2149		goto error;
2150	}
2151
2152	/* Read fs_count, hs_count and ss_count (if present) */
2153	for (i = 0; i < 3; ++i) {
2154		if (!(flags & (1 << i))) {
2155			counts[i] = 0;
2156		} else if (len < 4) {
2157			goto error;
2158		} else {
2159			counts[i] = get_unaligned_le32(data);
2160			data += 4;
2161			len  -= 4;
2162		}
2163	}
2164	if (flags & (1 << i)) {
2165		os_descs_count = get_unaligned_le32(data);
2166		data += 4;
2167		len -= 4;
2168	};
2169
2170	/* Read descriptors */
2171	raw_descs = data;
2172	helper.ffs = ffs;
2173	for (i = 0; i < 3; ++i) {
2174		if (!counts[i])
2175			continue;
2176		helper.interfaces_count = 0;
2177		helper.eps_count = 0;
2178		ret = ffs_do_descs(counts[i], data, len,
2179				   __ffs_data_do_entity, &helper);
2180		if (ret < 0)
2181			goto error;
2182		if (!ffs->eps_count && !ffs->interfaces_count) {
2183			ffs->eps_count = helper.eps_count;
2184			ffs->interfaces_count = helper.interfaces_count;
2185		} else {
2186			if (ffs->eps_count != helper.eps_count) {
2187				ret = -EINVAL;
2188				goto error;
2189			}
2190			if (ffs->interfaces_count != helper.interfaces_count) {
2191				ret = -EINVAL;
2192				goto error;
2193			}
2194		}
2195		data += ret;
2196		len  -= ret;
2197	}
2198	if (os_descs_count) {
2199		ret = ffs_do_os_descs(os_descs_count, data, len,
2200				      __ffs_data_do_os_desc, ffs);
2201		if (ret < 0)
2202			goto error;
2203		data += ret;
2204		len -= ret;
2205	}
2206
2207	if (raw_descs == data || len) {
2208		ret = -EINVAL;
2209		goto error;
2210	}
2211
2212	ffs->raw_descs_data	= _data;
2213	ffs->raw_descs		= raw_descs;
2214	ffs->raw_descs_length	= data - raw_descs;
2215	ffs->fs_descs_count	= counts[0];
2216	ffs->hs_descs_count	= counts[1];
2217	ffs->ss_descs_count	= counts[2];
2218	ffs->ms_os_descs_count	= os_descs_count;
2219
2220	return 0;
2221
2222error:
2223	kfree(_data);
2224	return ret;
2225}
2226
2227static int __ffs_data_got_strings(struct ffs_data *ffs,
2228				  char *const _data, size_t len)
2229{
2230	u32 str_count, needed_count, lang_count;
2231	struct usb_gadget_strings **stringtabs, *t;
2232	struct usb_string *strings, *s;
2233	const char *data = _data;
2234
2235	ENTER();
2236
2237	if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
2238		     get_unaligned_le32(data + 4) != len))
2239		goto error;
2240	str_count  = get_unaligned_le32(data + 8);
2241	lang_count = get_unaligned_le32(data + 12);
2242
2243	/* if one is zero the other must be zero */
2244	if (unlikely(!str_count != !lang_count))
2245		goto error;
2246
2247	/* Do we have at least as many strings as descriptors need? */
2248	needed_count = ffs->strings_count;
2249	if (unlikely(str_count < needed_count))
2250		goto error;
2251
2252	/*
2253	 * If we don't need any strings just return and free all
2254	 * memory.
2255	 */
2256	if (!needed_count) {
2257		kfree(_data);
2258		return 0;
2259	}
2260
2261	/* Allocate everything in one chunk so there's less maintenance. */
2262	{
2263		unsigned i = 0;
2264		vla_group(d);
2265		vla_item(d, struct usb_gadget_strings *, stringtabs,
2266			lang_count + 1);
2267		vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
2268		vla_item(d, struct usb_string, strings,
2269			lang_count*(needed_count+1));
2270
2271		char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
2272
2273		if (unlikely(!vlabuf)) {
2274			kfree(_data);
2275			return -ENOMEM;
2276		}
2277
2278		/* Initialize the VLA pointers */
2279		stringtabs = vla_ptr(vlabuf, d, stringtabs);
2280		t = vla_ptr(vlabuf, d, stringtab);
2281		i = lang_count;
2282		do {
2283			*stringtabs++ = t++;
2284		} while (--i);
2285		*stringtabs = NULL;
2286
2287		/* stringtabs = vlabuf = d_stringtabs for later kfree */
2288		stringtabs = vla_ptr(vlabuf, d, stringtabs);
2289		t = vla_ptr(vlabuf, d, stringtab);
2290		s = vla_ptr(vlabuf, d, strings);
2291		strings = s;
2292	}
2293
2294	/* For each language */
2295	data += 16;
2296	len -= 16;
2297
2298	do { /* lang_count > 0 so we can use do-while */
2299		unsigned needed = needed_count;
2300
2301		if (unlikely(len < 3))
2302			goto error_free;
2303		t->language = get_unaligned_le16(data);
2304		t->strings  = s;
2305		++t;
2306
2307		data += 2;
2308		len -= 2;
2309
2310		/* For each string */
2311		do { /* str_count > 0 so we can use do-while */
2312			size_t length = strnlen(data, len);
2313
2314			if (unlikely(length == len))
2315				goto error_free;
2316
2317			/*
2318			 * User may provide more strings then we need,
2319			 * if that's the case we simply ignore the
2320			 * rest
2321			 */
2322			if (likely(needed)) {
2323				/*
2324				 * s->id will be set while adding
2325				 * function to configuration so for
2326				 * now just leave garbage here.
2327				 */
2328				s->s = data;
2329				--needed;
2330				++s;
2331			}
2332
2333			data += length + 1;
2334			len -= length + 1;
2335		} while (--str_count);
2336
2337		s->id = 0;   /* terminator */
2338		s->s = NULL;
2339		++s;
2340
2341	} while (--lang_count);
2342
2343	/* Some garbage left? */
2344	if (unlikely(len))
2345		goto error_free;
2346
2347	/* Done! */
2348	ffs->stringtabs = stringtabs;
2349	ffs->raw_strings = _data;
2350
2351	return 0;
2352
2353error_free:
2354	kfree(stringtabs);
2355error:
2356	kfree(_data);
2357	return -EINVAL;
2358}
2359
2360
2361/* Events handling and management *******************************************/
2362
2363static void __ffs_event_add(struct ffs_data *ffs,
2364			    enum usb_functionfs_event_type type)
2365{
2366	enum usb_functionfs_event_type rem_type1, rem_type2 = type;
2367	int neg = 0;
2368
2369	/*
2370	 * Abort any unhandled setup
2371	 *
2372	 * We do not need to worry about some cmpxchg() changing value
2373	 * of ffs->setup_state without holding the lock because when
2374	 * state is FFS_SETUP_PENDING cmpxchg() in several places in
2375	 * the source does nothing.
2376	 */
2377	if (ffs->setup_state == FFS_SETUP_PENDING)
2378		ffs->setup_state = FFS_SETUP_CANCELLED;
2379
2380	switch (type) {
2381	case FUNCTIONFS_RESUME:
2382		rem_type2 = FUNCTIONFS_SUSPEND;
2383		/* FALL THROUGH */
2384	case FUNCTIONFS_SUSPEND:
2385	case FUNCTIONFS_SETUP:
2386		rem_type1 = type;
2387		/* Discard all similar events */
2388		break;
2389
2390	case FUNCTIONFS_BIND:
2391	case FUNCTIONFS_UNBIND:
2392	case FUNCTIONFS_DISABLE:
2393	case FUNCTIONFS_ENABLE:
2394		/* Discard everything other then power management. */
2395		rem_type1 = FUNCTIONFS_SUSPEND;
2396		rem_type2 = FUNCTIONFS_RESUME;
2397		neg = 1;
2398		break;
2399
2400	default:
2401		WARN(1, "%d: unknown event, this should not happen\n", type);
2402		return;
2403	}
2404
2405	{
2406		u8 *ev  = ffs->ev.types, *out = ev;
2407		unsigned n = ffs->ev.count;
2408		for (; n; --n, ++ev)
2409			if ((*ev == rem_type1 || *ev == rem_type2) == neg)
2410				*out++ = *ev;
2411			else
2412				pr_vdebug("purging event %d\n", *ev);
2413		ffs->ev.count = out - ffs->ev.types;
2414	}
2415
2416	pr_vdebug("adding event %d\n", type);
2417	ffs->ev.types[ffs->ev.count++] = type;
2418	wake_up_locked(&ffs->ev.waitq);
2419}
2420
2421static void ffs_event_add(struct ffs_data *ffs,
2422			  enum usb_functionfs_event_type type)
2423{
2424	unsigned long flags;
2425	spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2426	__ffs_event_add(ffs, type);
2427	spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2428}
2429
2430/* Bind/unbind USB function hooks *******************************************/
2431
2432static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
2433{
2434	int i;
2435
2436	for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
2437		if (ffs->eps_addrmap[i] == endpoint_address)
2438			return i;
2439	return -ENOENT;
2440}
2441
2442static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2443				    struct usb_descriptor_header *desc,
2444				    void *priv)
2445{
2446	struct usb_endpoint_descriptor *ds = (void *)desc;
2447	struct ffs_function *func = priv;
2448	struct ffs_ep *ffs_ep;
2449	unsigned ep_desc_id;
2450	int idx;
2451	static const char *speed_names[] = { "full", "high", "super" };
2452
2453	if (type != FFS_DESCRIPTOR)
2454		return 0;
2455
2456	/*
2457	 * If ss_descriptors is not NULL, we are reading super speed
2458	 * descriptors; if hs_descriptors is not NULL, we are reading high
2459	 * speed descriptors; otherwise, we are reading full speed
2460	 * descriptors.
2461	 */
2462	if (func->function.ss_descriptors) {
2463		ep_desc_id = 2;
2464		func->function.ss_descriptors[(long)valuep] = desc;
2465	} else if (func->function.hs_descriptors) {
2466		ep_desc_id = 1;
2467		func->function.hs_descriptors[(long)valuep] = desc;
2468	} else {
2469		ep_desc_id = 0;
2470		func->function.fs_descriptors[(long)valuep]    = desc;
2471	}
2472
2473	if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
2474		return 0;
2475
2476	idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
2477	if (idx < 0)
2478		return idx;
2479
2480	ffs_ep = func->eps + idx;
2481
2482	if (unlikely(ffs_ep->descs[ep_desc_id])) {
2483		pr_err("two %sspeed descriptors for EP %d\n",
2484			  speed_names[ep_desc_id],
2485			  ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2486		return -EINVAL;
2487	}
2488	ffs_ep->descs[ep_desc_id] = ds;
2489
2490	ffs_dump_mem(": Original  ep desc", ds, ds->bLength);
2491	if (ffs_ep->ep) {
2492		ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
2493		if (!ds->wMaxPacketSize)
2494			ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
2495	} else {
2496		struct usb_request *req;
2497		struct usb_ep *ep;
2498		u8 bEndpointAddress;
2499
2500		/*
2501		 * We back up bEndpointAddress because autoconfig overwrites
2502		 * it with physical endpoint address.
2503		 */
2504		bEndpointAddress = ds->bEndpointAddress;
2505		pr_vdebug("autoconfig\n");
2506		ep = usb_ep_autoconfig(func->gadget, ds);
2507		if (unlikely(!ep))
2508			return -ENOTSUPP;
2509		ep->driver_data = func->eps + idx;
2510
2511		req = usb_ep_alloc_request(ep, GFP_KERNEL);
2512		if (unlikely(!req))
2513			return -ENOMEM;
2514
2515		ffs_ep->ep  = ep;
2516		ffs_ep->req = req;
2517		func->eps_revmap[ds->bEndpointAddress &
2518				 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
2519		/*
2520		 * If we use virtual address mapping, we restore
2521		 * original bEndpointAddress value.
2522		 */
2523		if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
2524			ds->bEndpointAddress = bEndpointAddress;
2525	}
2526	ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
2527
2528	return 0;
2529}
2530
2531static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2532				   struct usb_descriptor_header *desc,
2533				   void *priv)
2534{
2535	struct ffs_function *func = priv;
2536	unsigned idx;
2537	u8 newValue;
2538
2539	switch (type) {
2540	default:
2541	case FFS_DESCRIPTOR:
2542		/* Handled in previous pass by __ffs_func_bind_do_descs() */
2543		return 0;
2544
2545	case FFS_INTERFACE:
2546		idx = *valuep;
2547		if (func->interfaces_nums[idx] < 0) {
2548			int id = usb_interface_id(func->conf, &func->function);
2549			if (unlikely(id < 0))
2550				return id;
2551			func->interfaces_nums[idx] = id;
2552		}
2553		newValue = func->interfaces_nums[idx];
2554		break;
2555
2556	case FFS_STRING:
2557		/* String' IDs are allocated when fsf_data is bound to cdev */
2558		newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
2559		break;
2560
2561	case FFS_ENDPOINT:
2562		/*
2563		 * USB_DT_ENDPOINT are handled in
2564		 * __ffs_func_bind_do_descs().
2565		 */
2566		if (desc->bDescriptorType == USB_DT_ENDPOINT)
2567			return 0;
2568
2569		idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
2570		if (unlikely(!func->eps[idx].ep))
2571			return -EINVAL;
2572
2573		{
2574			struct usb_endpoint_descriptor **descs;
2575			descs = func->eps[idx].descs;
2576			newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
2577		}
2578		break;
2579	}
2580
2581	pr_vdebug("%02x -> %02x\n", *valuep, newValue);
2582	*valuep = newValue;
2583	return 0;
2584}
2585
2586static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
2587				      struct usb_os_desc_header *h, void *data,
2588				      unsigned len, void *priv)
2589{
2590	struct ffs_function *func = priv;
2591	u8 length = 0;
2592
2593	switch (type) {
2594	case FFS_OS_DESC_EXT_COMPAT: {
2595		struct usb_ext_compat_desc *desc = data;
2596		struct usb_os_desc_table *t;
2597
2598		t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
2599		t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
2600		memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
2601		       ARRAY_SIZE(desc->CompatibleID) +
2602		       ARRAY_SIZE(desc->SubCompatibleID));
2603		length = sizeof(*desc);
2604	}
2605		break;
2606	case FFS_OS_DESC_EXT_PROP: {
2607		struct usb_ext_prop_desc *desc = data;
2608		struct usb_os_desc_table *t;
2609		struct usb_os_desc_ext_prop *ext_prop;
2610		char *ext_prop_name;
2611		char *ext_prop_data;
2612
2613		t = &func->function.os_desc_table[h->interface];
2614		t->if_id = func->interfaces_nums[h->interface];
2615
2616		ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
2617		func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
2618
2619		ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
2620		ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
2621		ext_prop->data_len = le32_to_cpu(*(u32 *)
2622			usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
2623		length = ext_prop->name_len + ext_prop->data_len + 14;
2624
2625		ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
2626		func->ffs->ms_os_descs_ext_prop_name_avail +=
2627			ext_prop->name_len;
2628
2629		ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
2630		func->ffs->ms_os_descs_ext_prop_data_avail +=
2631			ext_prop->data_len;
2632		memcpy(ext_prop_data,
2633		       usb_ext_prop_data_ptr(data, ext_prop->name_len),
2634		       ext_prop->data_len);
2635		/* unicode data reported to the host as "WCHAR"s */
2636		switch (ext_prop->type) {
2637		case USB_EXT_PROP_UNICODE:
2638		case USB_EXT_PROP_UNICODE_ENV:
2639		case USB_EXT_PROP_UNICODE_LINK:
2640		case USB_EXT_PROP_UNICODE_MULTI:
2641			ext_prop->data_len *= 2;
2642			break;
2643		}
2644		ext_prop->data = ext_prop_data;
2645
2646		memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
2647		       ext_prop->name_len);
2648		/* property name reported to the host as "WCHAR"s */
2649		ext_prop->name_len *= 2;
2650		ext_prop->name = ext_prop_name;
2651
2652		t->os_desc->ext_prop_len +=
2653			ext_prop->name_len + ext_prop->data_len + 14;
2654		++t->os_desc->ext_prop_count;
2655		list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
2656	}
2657		break;
2658	default:
2659		pr_vdebug("unknown descriptor: %d\n", type);
2660	}
2661
2662	return length;
2663}
2664
2665static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
2666						struct usb_configuration *c)
2667{
2668	struct ffs_function *func = ffs_func_from_usb(f);
2669	struct f_fs_opts *ffs_opts =
2670		container_of(f->fi, struct f_fs_opts, func_inst);
2671	int ret;
2672
2673	ENTER();
2674
2675	/*
2676	 * Legacy gadget triggers binding in functionfs_ready_callback,
2677	 * which already uses locking; taking the same lock here would
2678	 * cause a deadlock.
2679	 *
2680	 * Configfs-enabled gadgets however do need ffs_dev_lock.
2681	 */
2682	if (!ffs_opts->no_configfs)
2683		ffs_dev_lock();
2684	ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
2685	func->ffs = ffs_opts->dev->ffs_data;
2686	if (!ffs_opts->no_configfs)
2687		ffs_dev_unlock();
2688	if (ret)
2689		return ERR_PTR(ret);
2690
2691	func->conf = c;
2692	func->gadget = c->cdev->gadget;
2693
2694	/*
2695	 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
2696	 * configurations are bound in sequence with list_for_each_entry,
2697	 * in each configuration its functions are bound in sequence
2698	 * with list_for_each_entry, so we assume no race condition
2699	 * with regard to ffs_opts->bound access
2700	 */
2701	if (!ffs_opts->refcnt) {
2702		ret = functionfs_bind(func->ffs, c->cdev);
2703		if (ret)
2704			return ERR_PTR(ret);
2705	}
2706	ffs_opts->refcnt++;
2707	func->function.strings = func->ffs->stringtabs;
2708
2709	return ffs_opts;
2710}
2711
2712static int _ffs_func_bind(struct usb_configuration *c,
2713			  struct usb_function *f)
2714{
2715	struct ffs_function *func = ffs_func_from_usb(f);
2716	struct ffs_data *ffs = func->ffs;
2717
2718	const int full = !!func->ffs->fs_descs_count;
2719	const int high = gadget_is_dualspeed(func->gadget) &&
2720		func->ffs->hs_descs_count;
2721	const int super = gadget_is_superspeed(func->gadget) &&
2722		func->ffs->ss_descs_count;
2723
2724	int fs_len, hs_len, ss_len, ret, i;
2725
2726	/* Make it a single chunk, less management later on */
2727	vla_group(d);
2728	vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
2729	vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
2730		full ? ffs->fs_descs_count + 1 : 0);
2731	vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
2732		high ? ffs->hs_descs_count + 1 : 0);
2733	vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
2734		super ? ffs->ss_descs_count + 1 : 0);
2735	vla_item_with_sz(d, short, inums, ffs->interfaces_count);
2736	vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
2737			 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2738	vla_item_with_sz(d, char[16], ext_compat,
2739			 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2740	vla_item_with_sz(d, struct usb_os_desc, os_desc,
2741			 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2742	vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
2743			 ffs->ms_os_descs_ext_prop_count);
2744	vla_item_with_sz(d, char, ext_prop_name,
2745			 ffs->ms_os_descs_ext_prop_name_len);
2746	vla_item_with_sz(d, char, ext_prop_data,
2747			 ffs->ms_os_descs_ext_prop_data_len);
2748	vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
2749	char *vlabuf;
2750
2751	ENTER();
2752
2753	/* Has descriptors only for speeds gadget does not support */
2754	if (unlikely(!(full | high | super)))
2755		return -ENOTSUPP;
2756
2757	/* Allocate a single chunk, less management later on */
2758	vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
2759	if (unlikely(!vlabuf))
2760		return -ENOMEM;
2761
2762	ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
2763	ffs->ms_os_descs_ext_prop_name_avail =
2764		vla_ptr(vlabuf, d, ext_prop_name);
2765	ffs->ms_os_descs_ext_prop_data_avail =
2766		vla_ptr(vlabuf, d, ext_prop_data);
2767
2768	/* Copy descriptors  */
2769	memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
2770	       ffs->raw_descs_length);
2771
2772	memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
2773	for (ret = ffs->eps_count; ret; --ret) {
2774		struct ffs_ep *ptr;
2775
2776		ptr = vla_ptr(vlabuf, d, eps);
2777		ptr[ret].num = -1;
2778	}
2779
2780	/* Save pointers
2781	 * d_eps == vlabuf, func->eps used to kfree vlabuf later
2782	*/
2783	func->eps             = vla_ptr(vlabuf, d, eps);
2784	func->interfaces_nums = vla_ptr(vlabuf, d, inums);
2785
2786	/*
2787	 * Go through all the endpoint descriptors and allocate
2788	 * endpoints first, so that later we can rewrite the endpoint
2789	 * numbers without worrying that it may be described later on.
2790	 */
2791	if (likely(full)) {
2792		func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
2793		fs_len = ffs_do_descs(ffs->fs_descs_count,
2794				      vla_ptr(vlabuf, d, raw_descs),
2795				      d_raw_descs__sz,
2796				      __ffs_func_bind_do_descs, func);
2797		if (unlikely(fs_len < 0)) {
2798			ret = fs_len;
2799			goto error;
2800		}
2801	} else {
2802		fs_len = 0;
2803	}
2804
2805	if (likely(high)) {
2806		func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
2807		hs_len = ffs_do_descs(ffs->hs_descs_count,
2808				      vla_ptr(vlabuf, d, raw_descs) + fs_len,
2809				      d_raw_descs__sz - fs_len,
2810				      __ffs_func_bind_do_descs, func);
2811		if (unlikely(hs_len < 0)) {
2812			ret = hs_len;
2813			goto error;
2814		}
2815	} else {
2816		hs_len = 0;
2817	}
2818
2819	if (likely(super)) {
2820		func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
2821		ss_len = ffs_do_descs(ffs->ss_descs_count,
2822				vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
2823				d_raw_descs__sz - fs_len - hs_len,
2824				__ffs_func_bind_do_descs, func);
2825		if (unlikely(ss_len < 0)) {
2826			ret = ss_len;
2827			goto error;
2828		}
2829	} else {
2830		ss_len = 0;
2831	}
2832
2833	/*
2834	 * Now handle interface numbers allocation and interface and
2835	 * endpoint numbers rewriting.  We can do that in one go
2836	 * now.
2837	 */
2838	ret = ffs_do_descs(ffs->fs_descs_count +
2839			   (high ? ffs->hs_descs_count : 0) +
2840			   (super ? ffs->ss_descs_count : 0),
2841			   vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
2842			   __ffs_func_bind_do_nums, func);
2843	if (unlikely(ret < 0))
2844		goto error;
2845
2846	func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
2847	if (c->cdev->use_os_string)
2848		for (i = 0; i < ffs->interfaces_count; ++i) {
2849			struct usb_os_desc *desc;
2850
2851			desc = func->function.os_desc_table[i].os_desc =
2852				vla_ptr(vlabuf, d, os_desc) +
2853				i * sizeof(struct usb_os_desc);
2854			desc->ext_compat_id =
2855				vla_ptr(vlabuf, d, ext_compat) + i * 16;
2856			INIT_LIST_HEAD(&desc->ext_prop);
2857		}
2858	ret = ffs_do_os_descs(ffs->ms_os_descs_count,
2859			      vla_ptr(vlabuf, d, raw_descs) +
2860			      fs_len + hs_len + ss_len,
2861			      d_raw_descs__sz - fs_len - hs_len - ss_len,
2862			      __ffs_func_bind_do_os_desc, func);
2863	if (unlikely(ret < 0))
2864		goto error;
2865	func->function.os_desc_n =
2866		c->cdev->use_os_string ? ffs->interfaces_count : 0;
2867
2868	/* And we're done */
2869	ffs_event_add(ffs, FUNCTIONFS_BIND);
2870	return 0;
2871
2872error:
2873	/* XXX Do we need to release all claimed endpoints here? */
2874	return ret;
2875}
2876
2877static int ffs_func_bind(struct usb_configuration *c,
2878			 struct usb_function *f)
2879{
2880	struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
2881
2882	if (IS_ERR(ffs_opts))
2883		return PTR_ERR(ffs_opts);
2884
2885	return _ffs_func_bind(c, f);
2886}
2887
2888
2889/* Other USB function hooks *************************************************/
2890
2891static int ffs_func_set_alt(struct usb_function *f,
2892			    unsigned interface, unsigned alt)
2893{
2894	struct ffs_function *func = ffs_func_from_usb(f);
2895	struct ffs_data *ffs = func->ffs;
2896	int ret = 0, intf;
2897
2898	if (alt != (unsigned)-1) {
2899		intf = ffs_func_revmap_intf(func, interface);
2900		if (unlikely(intf < 0))
2901			return intf;
2902	}
2903
2904	if (ffs->func)
2905		ffs_func_eps_disable(ffs->func);
2906
2907	if (ffs->state != FFS_ACTIVE)
2908		return -ENODEV;
2909
2910	if (alt == (unsigned)-1) {
2911		ffs->func = NULL;
2912		ffs_event_add(ffs, FUNCTIONFS_DISABLE);
2913		return 0;
2914	}
2915
2916	ffs->func = func;
2917	ret = ffs_func_eps_enable(func);
2918	if (likely(ret >= 0))
2919		ffs_event_add(ffs, FUNCTIONFS_ENABLE);
2920	return ret;
2921}
2922
2923static void ffs_func_disable(struct usb_function *f)
2924{
2925	ffs_func_set_alt(f, 0, (unsigned)-1);
2926}
2927
2928static int ffs_func_setup(struct usb_function *f,
2929			  const struct usb_ctrlrequest *creq)
2930{
2931	struct ffs_function *func = ffs_func_from_usb(f);
2932	struct ffs_data *ffs = func->ffs;
2933	unsigned long flags;
2934	int ret;
2935
2936	ENTER();
2937
2938	pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
2939	pr_vdebug("creq->bRequest     = %02x\n", creq->bRequest);
2940	pr_vdebug("creq->wValue       = %04x\n", le16_to_cpu(creq->wValue));
2941	pr_vdebug("creq->wIndex       = %04x\n", le16_to_cpu(creq->wIndex));
2942	pr_vdebug("creq->wLength      = %04x\n", le16_to_cpu(creq->wLength));
2943
2944	/*
2945	 * Most requests directed to interface go through here
2946	 * (notable exceptions are set/get interface) so we need to
2947	 * handle them.  All other either handled by composite or
2948	 * passed to usb_configuration->setup() (if one is set).  No
2949	 * matter, we will handle requests directed to endpoint here
2950	 * as well (as it's straightforward) but what to do with any
2951	 * other request?
2952	 */
2953	if (ffs->state != FFS_ACTIVE)
2954		return -ENODEV;
2955
2956	switch (creq->bRequestType & USB_RECIP_MASK) {
2957	case USB_RECIP_INTERFACE:
2958		ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
2959		if (unlikely(ret < 0))
2960			return ret;
2961		break;
2962
2963	case USB_RECIP_ENDPOINT:
2964		ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
2965		if (unlikely(ret < 0))
2966			return ret;
2967		if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
2968			ret = func->ffs->eps_addrmap[ret];
2969		break;
2970
2971	default:
2972		return -EOPNOTSUPP;
2973	}
2974
2975	spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2976	ffs->ev.setup = *creq;
2977	ffs->ev.setup.wIndex = cpu_to_le16(ret);
2978	__ffs_event_add(ffs, FUNCTIONFS_SETUP);
2979	spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2980
2981	return 0;
2982}
2983
2984static void ffs_func_suspend(struct usb_function *f)
2985{
2986	ENTER();
2987	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
2988}
2989
2990static void ffs_func_resume(struct usb_function *f)
2991{
2992	ENTER();
2993	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
2994}
2995
2996
2997/* Endpoint and interface numbers reverse mapping ***************************/
2998
2999static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
3000{
3001	num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
3002	return num ? num : -EDOM;
3003}
3004
3005static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
3006{
3007	short *nums = func->interfaces_nums;
3008	unsigned count = func->ffs->interfaces_count;
3009
3010	for (; count; --count, ++nums) {
3011		if (*nums >= 0 && *nums == intf)
3012			return nums - func->interfaces_nums;
3013	}
3014
3015	return -EDOM;
3016}
3017
3018
3019/* Devices management *******************************************************/
3020
3021static LIST_HEAD(ffs_devices);
3022
3023static struct ffs_dev *_ffs_do_find_dev(const char *name)
3024{
3025	struct ffs_dev *dev;
3026
3027	list_for_each_entry(dev, &ffs_devices, entry) {
3028		if (!dev->name || !name)
3029			continue;
3030		if (strcmp(dev->name, name) == 0)
3031			return dev;
3032	}
3033
3034	return NULL;
3035}
3036
3037/*
3038 * ffs_lock must be taken by the caller of this function
3039 */
3040static struct ffs_dev *_ffs_get_single_dev(void)
3041{
3042	struct ffs_dev *dev;
3043
3044	if (list_is_singular(&ffs_devices)) {
3045		dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
3046		if (dev->single)
3047			return dev;
3048	}
3049
3050	return NULL;
3051}
3052
3053/*
3054 * ffs_lock must be taken by the caller of this function
3055 */
3056static struct ffs_dev *_ffs_find_dev(const char *name)
3057{
3058	struct ffs_dev *dev;
3059
3060	dev = _ffs_get_single_dev();
3061	if (dev)
3062		return dev;
3063
3064	return _ffs_do_find_dev(name);
3065}
3066
3067/* Configfs support *********************************************************/
3068
3069static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
3070{
3071	return container_of(to_config_group(item), struct f_fs_opts,
3072			    func_inst.group);
3073}
3074
3075static void ffs_attr_release(struct config_item *item)
3076{
3077	struct f_fs_opts *opts = to_ffs_opts(item);
3078
3079	usb_put_function_instance(&opts->func_inst);
3080}
3081
3082static struct configfs_item_operations ffs_item_ops = {
3083	.release	= ffs_attr_release,
3084};
3085
3086static struct config_item_type ffs_func_type = {
3087	.ct_item_ops	= &ffs_item_ops,
3088	.ct_owner	= THIS_MODULE,
3089};
3090
3091
3092/* Function registration interface ******************************************/
3093
3094static void ffs_free_inst(struct usb_function_instance *f)
3095{
3096	struct f_fs_opts *opts;
3097
3098	opts = to_f_fs_opts(f);
3099	ffs_dev_lock();
3100	_ffs_free_dev(opts->dev);
3101	ffs_dev_unlock();
3102	kfree(opts);
3103}
3104
3105#define MAX_INST_NAME_LEN	40
3106
3107static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
3108{
3109	struct f_fs_opts *opts;
3110	char *ptr;
3111	const char *tmp;
3112	int name_len, ret;
3113
3114	name_len = strlen(name) + 1;
3115	if (name_len > MAX_INST_NAME_LEN)
3116		return -ENAMETOOLONG;
3117
3118	ptr = kstrndup(name, name_len, GFP_KERNEL);
3119	if (!ptr)
3120		return -ENOMEM;
3121
3122	opts = to_f_fs_opts(fi);
3123	tmp = NULL;
3124
3125	ffs_dev_lock();
3126
3127	tmp = opts->dev->name_allocated ? opts->dev->name : NULL;
3128	ret = _ffs_name_dev(opts->dev, ptr);
3129	if (ret) {
3130		kfree(ptr);
3131		ffs_dev_unlock();
3132		return ret;
3133	}
3134	opts->dev->name_allocated = true;
3135
3136	ffs_dev_unlock();
3137
3138	kfree(tmp);
3139
3140	return 0;
3141}
3142
3143static struct usb_function_instance *ffs_alloc_inst(void)
3144{
3145	struct f_fs_opts *opts;
3146	struct ffs_dev *dev;
3147
3148	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
3149	if (!opts)
3150		return ERR_PTR(-ENOMEM);
3151
3152	opts->func_inst.set_inst_name = ffs_set_inst_name;
3153	opts->func_inst.free_func_inst = ffs_free_inst;
3154	ffs_dev_lock();
3155	dev = _ffs_alloc_dev();
3156	ffs_dev_unlock();
3157	if (IS_ERR(dev)) {
3158		kfree(opts);
3159		return ERR_CAST(dev);
3160	}
3161	opts->dev = dev;
3162	dev->opts = opts;
3163
3164	config_group_init_type_name(&opts->func_inst.group, "",
3165				    &ffs_func_type);
3166	return &opts->func_inst;
3167}
3168
3169static void ffs_free(struct usb_function *f)
3170{
3171	kfree(ffs_func_from_usb(f));
3172}
3173
3174static void ffs_func_unbind(struct usb_configuration *c,
3175			    struct usb_function *f)
3176{
3177	struct ffs_function *func = ffs_func_from_usb(f);
3178	struct ffs_data *ffs = func->ffs;
3179	struct f_fs_opts *opts =
3180		container_of(f->fi, struct f_fs_opts, func_inst);
3181	struct ffs_ep *ep = func->eps;
3182	unsigned count = ffs->eps_count;
3183	unsigned long flags;
3184
3185	ENTER();
3186	if (ffs->func == func) {
3187		ffs_func_eps_disable(func);
3188		ffs->func = NULL;
3189	}
3190
3191	if (!--opts->refcnt)
3192		functionfs_unbind(ffs);
3193
3194	/* cleanup after autoconfig */
3195	spin_lock_irqsave(&func->ffs->eps_lock, flags);
3196	do {
3197		if (ep->ep && ep->req)
3198			usb_ep_free_request(ep->ep, ep->req);
3199		ep->req = NULL;
3200		++ep;
3201	} while (--count);
3202	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
3203	kfree(func->eps);
3204	func->eps = NULL;
3205	/*
3206	 * eps, descriptors and interfaces_nums are allocated in the
3207	 * same chunk so only one free is required.
3208	 */
3209	func->function.fs_descriptors = NULL;
3210	func->function.hs_descriptors = NULL;
3211	func->function.ss_descriptors = NULL;
3212	func->interfaces_nums = NULL;
3213
3214	ffs_event_add(ffs, FUNCTIONFS_UNBIND);
3215}
3216
3217static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
3218{
3219	struct ffs_function *func;
3220
3221	ENTER();
3222
3223	func = kzalloc(sizeof(*func), GFP_KERNEL);
3224	if (unlikely(!func))
3225		return ERR_PTR(-ENOMEM);
3226
3227	func->function.name    = "Function FS Gadget";
3228
3229	func->function.bind    = ffs_func_bind;
3230	func->function.unbind  = ffs_func_unbind;
3231	func->function.set_alt = ffs_func_set_alt;
3232	func->function.disable = ffs_func_disable;
3233	func->function.setup   = ffs_func_setup;
3234	func->function.suspend = ffs_func_suspend;
3235	func->function.resume  = ffs_func_resume;
3236	func->function.free_func = ffs_free;
3237
3238	return &func->function;
3239}
3240
3241/*
3242 * ffs_lock must be taken by the caller of this function
3243 */
3244static struct ffs_dev *_ffs_alloc_dev(void)
3245{
3246	struct ffs_dev *dev;
3247	int ret;
3248
3249	if (_ffs_get_single_dev())
3250			return ERR_PTR(-EBUSY);
3251
3252	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3253	if (!dev)
3254		return ERR_PTR(-ENOMEM);
3255
3256	if (list_empty(&ffs_devices)) {
3257		ret = functionfs_init();
3258		if (ret) {
3259			kfree(dev);
3260			return ERR_PTR(ret);
3261		}
3262	}
3263
3264	list_add(&dev->entry, &ffs_devices);
3265
3266	return dev;
3267}
3268
3269/*
3270 * ffs_lock must be taken by the caller of this function
3271 * The caller is responsible for "name" being available whenever f_fs needs it
3272 */
3273static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
3274{
3275	struct ffs_dev *existing;
3276
3277	existing = _ffs_do_find_dev(name);
3278	if (existing)
3279		return -EBUSY;
3280
3281	dev->name = name;
3282
3283	return 0;
3284}
3285
3286/*
3287 * The caller is responsible for "name" being available whenever f_fs needs it
3288 */
3289int ffs_name_dev(struct ffs_dev *dev, const char *name)
3290{
3291	int ret;
3292
3293	ffs_dev_lock();
3294	ret = _ffs_name_dev(dev, name);
3295	ffs_dev_unlock();
3296
3297	return ret;
3298}
3299EXPORT_SYMBOL_GPL(ffs_name_dev);
3300
3301int ffs_single_dev(struct ffs_dev *dev)
3302{
3303	int ret;
3304
3305	ret = 0;
3306	ffs_dev_lock();
3307
3308	if (!list_is_singular(&ffs_devices))
3309		ret = -EBUSY;
3310	else
3311		dev->single = true;
3312
3313	ffs_dev_unlock();
3314	return ret;
3315}
3316EXPORT_SYMBOL_GPL(ffs_single_dev);
3317
3318/*
3319 * ffs_lock must be taken by the caller of this function
3320 */
3321static void _ffs_free_dev(struct ffs_dev *dev)
3322{
3323	list_del(&dev->entry);
3324	if (dev->name_allocated)
3325		kfree(dev->name);
3326	kfree(dev);
3327	if (list_empty(&ffs_devices))
3328		functionfs_cleanup();
3329}
3330
3331static void *ffs_acquire_dev(const char *dev_name)
3332{
3333	struct ffs_dev *ffs_dev;
3334
3335	ENTER();
3336	ffs_dev_lock();
3337
3338	ffs_dev = _ffs_find_dev(dev_name);
3339	if (!ffs_dev)
3340		ffs_dev = ERR_PTR(-ENOENT);
3341	else if (ffs_dev->mounted)
3342		ffs_dev = ERR_PTR(-EBUSY);
3343	else if (ffs_dev->ffs_acquire_dev_callback &&
3344	    ffs_dev->ffs_acquire_dev_callback(ffs_dev))
3345		ffs_dev = ERR_PTR(-ENOENT);
3346	else
3347		ffs_dev->mounted = true;
3348
3349	ffs_dev_unlock();
3350	return ffs_dev;
3351}
3352
3353static void ffs_release_dev(struct ffs_data *ffs_data)
3354{
3355	struct ffs_dev *ffs_dev;
3356
3357	ENTER();
3358	ffs_dev_lock();
3359
3360	ffs_dev = ffs_data->private_data;
3361	if (ffs_dev) {
3362		ffs_dev->mounted = false;
3363
3364		if (ffs_dev->ffs_release_dev_callback)
3365			ffs_dev->ffs_release_dev_callback(ffs_dev);
3366	}
3367
3368	ffs_dev_unlock();
3369}
3370
3371static int ffs_ready(struct ffs_data *ffs)
3372{
3373	struct ffs_dev *ffs_obj;
3374	int ret = 0;
3375
3376	ENTER();
3377	ffs_dev_lock();
3378
3379	ffs_obj = ffs->private_data;
3380	if (!ffs_obj) {
3381		ret = -EINVAL;
3382		goto done;
3383	}
3384	if (WARN_ON(ffs_obj->desc_ready)) {
3385		ret = -EBUSY;
3386		goto done;
3387	}
3388
3389	ffs_obj->desc_ready = true;
3390	ffs_obj->ffs_data = ffs;
3391
3392	if (ffs_obj->ffs_ready_callback)
3393		ret = ffs_obj->ffs_ready_callback(ffs);
3394
3395done:
3396	ffs_dev_unlock();
3397	return ret;
3398}
3399
3400static void ffs_closed(struct ffs_data *ffs)
3401{
3402	struct ffs_dev *ffs_obj;
3403
3404	ENTER();
3405	ffs_dev_lock();
3406
3407	ffs_obj = ffs->private_data;
3408	if (!ffs_obj)
3409		goto done;
3410
3411	ffs_obj->desc_ready = false;
3412
3413	if (ffs_obj->ffs_closed_callback)
3414		ffs_obj->ffs_closed_callback(ffs);
3415
3416	if (!ffs_obj->opts || ffs_obj->opts->no_configfs
3417	    || !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
3418		goto done;
3419
3420	unregister_gadget_item(ffs_obj->opts->
3421			       func_inst.group.cg_item.ci_parent->ci_parent);
3422done:
3423	ffs_dev_unlock();
3424}
3425
3426/* Misc helper functions ****************************************************/
3427
3428static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
3429{
3430	return nonblock
3431		? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
3432		: mutex_lock_interruptible(mutex);
3433}
3434
3435static char *ffs_prepare_buffer(const char __user *buf, size_t len)
3436{
3437	char *data;
3438
3439	if (unlikely(!len))
3440		return NULL;
3441
3442	data = kmalloc(len, GFP_KERNEL);
3443	if (unlikely(!data))
3444		return ERR_PTR(-ENOMEM);
3445
3446	if (unlikely(__copy_from_user(data, buf, len))) {
3447		kfree(data);
3448		return ERR_PTR(-EFAULT);
3449	}
3450
3451	pr_vdebug("Buffer from user space:\n");
3452	ffs_dump_mem("", data, len);
3453
3454	return data;
3455}
3456
3457DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
3458MODULE_LICENSE("GPL");
3459MODULE_AUTHOR("Michal Nazarewicz");
3460