gadget.c revision c71fc37c191747ea1f00424e84f96c1f88e52bfc
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions, and the following disclaimer,
14 *    without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 *    to endorse or promote products derived from this software without
20 *    specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <linux/kernel.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/interrupt.h>
46#include <linux/io.h>
47#include <linux/list.h>
48#include <linux/dma-mapping.h>
49
50#include <linux/usb/ch9.h>
51#include <linux/usb/gadget.h>
52
53#include "core.h"
54#include "gadget.h"
55#include "io.h"
56
57#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
58
59void dwc3_map_buffer_to_dma(struct dwc3_request *req)
60{
61	struct dwc3			*dwc = req->dep->dwc;
62
63	if (req->request.length == 0) {
64		/* req->request.dma = dwc->setup_buf_addr; */
65		return;
66	}
67
68	if (req->request.dma == DMA_ADDR_INVALID) {
69		req->request.dma = dma_map_single(dwc->dev, req->request.buf,
70				req->request.length, req->direction
71				? DMA_TO_DEVICE : DMA_FROM_DEVICE);
72		req->mapped = true;
73	}
74}
75
76void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
77{
78	struct dwc3			*dwc = req->dep->dwc;
79
80	if (req->request.length == 0) {
81		req->request.dma = DMA_ADDR_INVALID;
82		return;
83	}
84
85	if (req->mapped) {
86		dma_unmap_single(dwc->dev, req->request.dma,
87				req->request.length, req->direction
88				? DMA_TO_DEVICE : DMA_FROM_DEVICE);
89		req->mapped = 0;
90		req->request.dma = DMA_ADDR_INVALID;
91	}
92}
93
94void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
95		int status)
96{
97	struct dwc3			*dwc = dep->dwc;
98
99	if (req->queued) {
100		dep->busy_slot++;
101		/*
102		 * Skip LINK TRB. We can't use req->trb and check for
103		 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
104		 * completed (not the LINK TRB).
105		 */
106		if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
107				usb_endpoint_xfer_isoc(dep->desc))
108			dep->busy_slot++;
109	}
110	list_del(&req->list);
111
112	if (req->request.status == -EINPROGRESS)
113		req->request.status = status;
114
115	dwc3_unmap_buffer_from_dma(req);
116
117	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
118			req, dep->name, req->request.actual,
119			req->request.length, status);
120
121	spin_unlock(&dwc->lock);
122	req->request.complete(&req->dep->endpoint, &req->request);
123	spin_lock(&dwc->lock);
124}
125
126static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
127{
128	switch (cmd) {
129	case DWC3_DEPCMD_DEPSTARTCFG:
130		return "Start New Configuration";
131	case DWC3_DEPCMD_ENDTRANSFER:
132		return "End Transfer";
133	case DWC3_DEPCMD_UPDATETRANSFER:
134		return "Update Transfer";
135	case DWC3_DEPCMD_STARTTRANSFER:
136		return "Start Transfer";
137	case DWC3_DEPCMD_CLEARSTALL:
138		return "Clear Stall";
139	case DWC3_DEPCMD_SETSTALL:
140		return "Set Stall";
141	case DWC3_DEPCMD_GETSEQNUMBER:
142		return "Get Data Sequence Number";
143	case DWC3_DEPCMD_SETTRANSFRESOURCE:
144		return "Set Endpoint Transfer Resource";
145	case DWC3_DEPCMD_SETEPCONFIG:
146		return "Set Endpoint Configuration";
147	default:
148		return "UNKNOWN command";
149	}
150}
151
152int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
153		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
154{
155	struct dwc3_ep		*dep = dwc->eps[ep];
156	u32			timeout = 500;
157	u32			reg;
158
159	dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
160			dep->name,
161			dwc3_gadget_ep_cmd_string(cmd), params->param0,
162			params->param1, params->param2);
163
164	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
165	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
166	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
167
168	dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
169	do {
170		reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
171		if (!(reg & DWC3_DEPCMD_CMDACT)) {
172			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
173					DWC3_DEPCMD_STATUS(reg));
174			return 0;
175		}
176
177		/*
178		 * We can't sleep here, because it is also called from
179		 * interrupt context.
180		 */
181		timeout--;
182		if (!timeout)
183			return -ETIMEDOUT;
184
185		udelay(1);
186	} while (1);
187}
188
189static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
190		struct dwc3_trb_hw *trb)
191{
192	u32		offset = (char *) trb - (char *) dep->trb_pool;
193
194	return dep->trb_pool_dma + offset;
195}
196
197static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
198{
199	struct dwc3		*dwc = dep->dwc;
200
201	if (dep->trb_pool)
202		return 0;
203
204	if (dep->number == 0 || dep->number == 1)
205		return 0;
206
207	dep->trb_pool = dma_alloc_coherent(dwc->dev,
208			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
209			&dep->trb_pool_dma, GFP_KERNEL);
210	if (!dep->trb_pool) {
211		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
212				dep->name);
213		return -ENOMEM;
214	}
215
216	return 0;
217}
218
219static void dwc3_free_trb_pool(struct dwc3_ep *dep)
220{
221	struct dwc3		*dwc = dep->dwc;
222
223	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
224			dep->trb_pool, dep->trb_pool_dma);
225
226	dep->trb_pool = NULL;
227	dep->trb_pool_dma = 0;
228}
229
230static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
231{
232	struct dwc3_gadget_ep_cmd_params params;
233	u32			cmd;
234
235	memset(&params, 0x00, sizeof(params));
236
237	if (dep->number != 1) {
238		cmd = DWC3_DEPCMD_DEPSTARTCFG;
239		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
240		if (dep->number > 1) {
241			if (dwc->start_config_issued)
242				return 0;
243			dwc->start_config_issued = true;
244			cmd |= DWC3_DEPCMD_PARAM(2);
245		}
246
247		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
248	}
249
250	return 0;
251}
252
253static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
254		const struct usb_endpoint_descriptor *desc)
255{
256	struct dwc3_gadget_ep_cmd_params params;
257
258	memset(&params, 0x00, sizeof(params));
259
260	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
261		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
262		| DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
263
264	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
265		| DWC3_DEPCFG_XFER_NOT_READY_EN;
266
267	if (usb_endpoint_xfer_bulk(desc) && dep->endpoint.max_streams) {
268		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
269			| DWC3_DEPCFG_STREAM_EVENT_EN;
270		dep->stream_capable = true;
271	}
272
273	if (usb_endpoint_xfer_isoc(desc))
274		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
275
276	/*
277	 * We are doing 1:1 mapping for endpoints, meaning
278	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
279	 * so on. We consider the direction bit as part of the physical
280	 * endpoint number. So USB endpoint 0x81 is 0x03.
281	 */
282	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
283
284	/*
285	 * We must use the lower 16 TX FIFOs even though
286	 * HW might have more
287	 */
288	if (dep->direction)
289		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
290
291	if (desc->bInterval) {
292		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
293		dep->interval = 1 << (desc->bInterval - 1);
294	}
295
296	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
297			DWC3_DEPCMD_SETEPCONFIG, &params);
298}
299
300static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
301{
302	struct dwc3_gadget_ep_cmd_params params;
303
304	memset(&params, 0x00, sizeof(params));
305
306	params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
307
308	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
309			DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
310}
311
312/**
313 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
314 * @dep: endpoint to be initialized
315 * @desc: USB Endpoint Descriptor
316 *
317 * Caller should take care of locking
318 */
319static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
320		const struct usb_endpoint_descriptor *desc)
321{
322	struct dwc3		*dwc = dep->dwc;
323	u32			reg;
324	int			ret = -ENOMEM;
325
326	if (!(dep->flags & DWC3_EP_ENABLED)) {
327		ret = dwc3_gadget_start_config(dwc, dep);
328		if (ret)
329			return ret;
330	}
331
332	ret = dwc3_gadget_set_ep_config(dwc, dep, desc);
333	if (ret)
334		return ret;
335
336	if (!(dep->flags & DWC3_EP_ENABLED)) {
337		struct dwc3_trb_hw	*trb_st_hw;
338		struct dwc3_trb_hw	*trb_link_hw;
339		struct dwc3_trb		trb_link;
340
341		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
342		if (ret)
343			return ret;
344
345		dep->desc = desc;
346		dep->type = usb_endpoint_type(desc);
347		dep->flags |= DWC3_EP_ENABLED;
348
349		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
350		reg |= DWC3_DALEPENA_EP(dep->number);
351		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
352
353		if (!usb_endpoint_xfer_isoc(desc))
354			return 0;
355
356		memset(&trb_link, 0, sizeof(trb_link));
357
358		/* Link TRB for ISOC. The HWO but is never reset */
359		trb_st_hw = &dep->trb_pool[0];
360
361		trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw);
362		trb_link.trbctl = DWC3_TRBCTL_LINK_TRB;
363		trb_link.hwo = true;
364
365		trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1];
366		dwc3_trb_to_hw(&trb_link, trb_link_hw);
367	}
368
369	return 0;
370}
371
372static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
373static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
374{
375	struct dwc3_request		*req;
376
377	if (!list_empty(&dep->req_queued))
378		dwc3_stop_active_transfer(dwc, dep->number);
379
380	while (!list_empty(&dep->request_list)) {
381		req = next_request(&dep->request_list);
382
383		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
384	}
385}
386
387/**
388 * __dwc3_gadget_ep_disable - Disables a HW endpoint
389 * @dep: the endpoint to disable
390 *
391 * This function also removes requests which are currently processed ny the
392 * hardware and those which are not yet scheduled.
393 * Caller should take care of locking.
394 */
395static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
396{
397	struct dwc3		*dwc = dep->dwc;
398	u32			reg;
399
400	dwc3_remove_requests(dwc, dep);
401
402	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
403	reg &= ~DWC3_DALEPENA_EP(dep->number);
404	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
405
406	dep->stream_capable = false;
407	dep->desc = NULL;
408	dep->type = 0;
409	dep->flags = 0;
410
411	return 0;
412}
413
414/* -------------------------------------------------------------------------- */
415
416static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
417		const struct usb_endpoint_descriptor *desc)
418{
419	return -EINVAL;
420}
421
422static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
423{
424	return -EINVAL;
425}
426
427/* -------------------------------------------------------------------------- */
428
429static int dwc3_gadget_ep_enable(struct usb_ep *ep,
430		const struct usb_endpoint_descriptor *desc)
431{
432	struct dwc3_ep			*dep;
433	struct dwc3			*dwc;
434	unsigned long			flags;
435	int				ret;
436
437	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
438		pr_debug("dwc3: invalid parameters\n");
439		return -EINVAL;
440	}
441
442	if (!desc->wMaxPacketSize) {
443		pr_debug("dwc3: missing wMaxPacketSize\n");
444		return -EINVAL;
445	}
446
447	dep = to_dwc3_ep(ep);
448	dwc = dep->dwc;
449
450	switch (usb_endpoint_type(desc)) {
451	case USB_ENDPOINT_XFER_CONTROL:
452		strncat(dep->name, "-control", sizeof(dep->name));
453		break;
454	case USB_ENDPOINT_XFER_ISOC:
455		strncat(dep->name, "-isoc", sizeof(dep->name));
456		break;
457	case USB_ENDPOINT_XFER_BULK:
458		strncat(dep->name, "-bulk", sizeof(dep->name));
459		break;
460	case USB_ENDPOINT_XFER_INT:
461		strncat(dep->name, "-int", sizeof(dep->name));
462		break;
463	default:
464		dev_err(dwc->dev, "invalid endpoint transfer type\n");
465	}
466
467	if (dep->flags & DWC3_EP_ENABLED) {
468		dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
469				dep->name);
470		return 0;
471	}
472
473	dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
474
475	spin_lock_irqsave(&dwc->lock, flags);
476	ret = __dwc3_gadget_ep_enable(dep, desc);
477	spin_unlock_irqrestore(&dwc->lock, flags);
478
479	return ret;
480}
481
482static int dwc3_gadget_ep_disable(struct usb_ep *ep)
483{
484	struct dwc3_ep			*dep;
485	struct dwc3			*dwc;
486	unsigned long			flags;
487	int				ret;
488
489	if (!ep) {
490		pr_debug("dwc3: invalid parameters\n");
491		return -EINVAL;
492	}
493
494	dep = to_dwc3_ep(ep);
495	dwc = dep->dwc;
496
497	if (!(dep->flags & DWC3_EP_ENABLED)) {
498		dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
499				dep->name);
500		return 0;
501	}
502
503	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
504			dep->number >> 1,
505			(dep->number & 1) ? "in" : "out");
506
507	spin_lock_irqsave(&dwc->lock, flags);
508	ret = __dwc3_gadget_ep_disable(dep);
509	spin_unlock_irqrestore(&dwc->lock, flags);
510
511	return ret;
512}
513
514static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
515	gfp_t gfp_flags)
516{
517	struct dwc3_request		*req;
518	struct dwc3_ep			*dep = to_dwc3_ep(ep);
519	struct dwc3			*dwc = dep->dwc;
520
521	req = kzalloc(sizeof(*req), gfp_flags);
522	if (!req) {
523		dev_err(dwc->dev, "not enough memory\n");
524		return NULL;
525	}
526
527	req->epnum	= dep->number;
528	req->dep	= dep;
529	req->request.dma = DMA_ADDR_INVALID;
530
531	return &req->request;
532}
533
534static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
535		struct usb_request *request)
536{
537	struct dwc3_request		*req = to_dwc3_request(request);
538
539	kfree(req);
540}
541
542/**
543 * dwc3_prepare_one_trb - setup one TRB from one request
544 * @dep: endpoint for which this request is prepared
545 * @req: dwc3_request pointer
546 */
547static int dwc3_prepare_one_trb(struct dwc3_ep *dep,
548		struct dwc3_request *req, unsigned last)
549{
550	struct dwc3_trb_hw	*trb_hw;
551	struct dwc3_trb		trb;
552
553	unsigned int		cur_slot;
554
555	trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
556	cur_slot = dep->free_slot;
557	dep->free_slot++;
558
559	/* Skip the LINK-TRB on ISOC */
560	if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
561			usb_endpoint_xfer_isoc(dep->desc))
562		return 0;
563
564	dwc3_gadget_move_request_queued(req);
565	memset(&trb, 0, sizeof(trb));
566
567	req->trb = trb_hw;
568
569	if (usb_endpoint_xfer_isoc(dep->desc)) {
570		trb.isp_imi = true;
571		trb.csp = true;
572	} else {
573		trb.lst = last;
574	}
575
576	if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
577		trb.sid_sofn = req->request.stream_id;
578
579	switch (usb_endpoint_type(dep->desc)) {
580	case USB_ENDPOINT_XFER_CONTROL:
581		trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
582		break;
583
584	case USB_ENDPOINT_XFER_ISOC:
585		trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
586
587		/* IOC every DWC3_TRB_NUM / 4 so we can refill */
588		if (!(cur_slot % (DWC3_TRB_NUM / 4)))
589			trb.ioc = last;
590		break;
591
592	case USB_ENDPOINT_XFER_BULK:
593	case USB_ENDPOINT_XFER_INT:
594		trb.trbctl = DWC3_TRBCTL_NORMAL;
595		break;
596	default:
597		/*
598		 * This is only possible with faulty memory because we
599		 * checked it already :)
600		 */
601		BUG();
602	}
603
604	trb.length	= req->request.length;
605	trb.bplh	= req->request.dma;
606	trb.hwo		= true;
607
608	dwc3_trb_to_hw(&trb, trb_hw);
609	req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
610
611	return 0;
612}
613
614/*
615 * dwc3_prepare_trbs - setup TRBs from requests
616 * @dep: endpoint for which requests are being prepared
617 * @starting: true if the endpoint is idle and no requests are queued.
618 *
619 * The functions goes through the requests list and setups TRBs for the
620 * transfers. The functions returns once there are not more TRBs available or
621 * it run out of requests.
622 */
623static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
624		bool starting)
625{
626	struct dwc3_request	*req, *n, *ret = NULL;
627	u32			trbs_left;
628	unsigned int		last_one = 0;
629
630	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
631
632	/* the first request must not be queued */
633	trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
634
635	/*
636	 * if busy & slot are equal than it is either full or empty. If we are
637	 * starting to proceed requests then we are empty. Otherwise we ar
638	 * full and don't do anything
639	 */
640	if (!trbs_left) {
641		if (!starting)
642			return NULL;
643		trbs_left = DWC3_TRB_NUM;
644		/*
645		 * In case we start from scratch, we queue the ISOC requests
646		 * starting from slot 1. This is done because we use ring
647		 * buffer and have no LST bit to stop us. Instead, we place
648		 * IOC bit TRB_NUM/4. We try to avoid to having an interrupt
649		 * after the first request so we start at slot 1 and have
650		 * 7 requests proceed before we hit the first IOC.
651		 * Other transfer types don't use the ring buffer and are
652		 * processed from the first TRB until the last one. Since we
653		 * don't wrap around we have to start at the beginning.
654		 */
655		if (usb_endpoint_xfer_isoc(dep->desc)) {
656			dep->busy_slot = 1;
657			dep->free_slot = 1;
658		} else {
659			dep->busy_slot = 0;
660			dep->free_slot = 0;
661		}
662	}
663
664	/* The last TRB is a link TRB, not used for xfer */
665	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
666		return NULL;
667
668	list_for_each_entry_safe(req, n, &dep->request_list, list) {
669		trbs_left--;
670
671		if (!trbs_left)
672			last_one = 1;
673
674		/* Is this the last request? */
675		if (list_empty(&dep->request_list))
676			last_one = 1;
677
678		/*
679		 * FIXME we shouldn't need to set LST bit always but we are
680		 * facing some weird problem with the Hardware where it doesn't
681		 * complete even though it has been previously started.
682		 *
683		 * While we're debugging the problem, as a workaround to
684		 * multiple TRBs handling, use only one TRB at a time.
685		 */
686		dwc3_prepare_one_trb(dep, req, true);
687		ret = req;
688		break;
689	}
690
691	return ret;
692}
693
694static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
695		int start_new)
696{
697	struct dwc3_gadget_ep_cmd_params params;
698	struct dwc3_request		*req;
699	struct dwc3			*dwc = dep->dwc;
700	int				ret;
701	u32				cmd;
702
703	if (start_new && (dep->flags & DWC3_EP_BUSY)) {
704		dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
705		return -EBUSY;
706	}
707	dep->flags &= ~DWC3_EP_PENDING_REQUEST;
708
709	/*
710	 * If we are getting here after a short-out-packet we don't enqueue any
711	 * new requests as we try to set the IOC bit only on the last request.
712	 */
713	if (start_new) {
714		if (list_empty(&dep->req_queued))
715			dwc3_prepare_trbs(dep, start_new);
716
717		/* req points to the first request which will be sent */
718		req = next_request(&dep->req_queued);
719	} else {
720		/*
721		 * req points to the first request where HWO changed
722		 * from 0 to 1
723		 */
724		req = dwc3_prepare_trbs(dep, start_new);
725	}
726	if (!req) {
727		dep->flags |= DWC3_EP_PENDING_REQUEST;
728		return 0;
729	}
730
731	memset(&params, 0, sizeof(params));
732	params.param0 = upper_32_bits(req->trb_dma);
733	params.param1 = lower_32_bits(req->trb_dma);
734
735	if (start_new)
736		cmd = DWC3_DEPCMD_STARTTRANSFER;
737	else
738		cmd = DWC3_DEPCMD_UPDATETRANSFER;
739
740	cmd |= DWC3_DEPCMD_PARAM(cmd_param);
741	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
742	if (ret < 0) {
743		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
744
745		/*
746		 * FIXME we need to iterate over the list of requests
747		 * here and stop, unmap, free and del each of the linked
748		 * requests instead of we do now.
749		 */
750		dwc3_unmap_buffer_from_dma(req);
751		list_del(&req->list);
752		return ret;
753	}
754
755	dep->flags |= DWC3_EP_BUSY;
756	dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
757			dep->number);
758	if (!dep->res_trans_idx)
759		printk_once(KERN_ERR "%s() res_trans_idx is invalid\n", __func__);
760	return 0;
761}
762
763static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
764{
765	req->request.actual	= 0;
766	req->request.status	= -EINPROGRESS;
767	req->direction		= dep->direction;
768	req->epnum		= dep->number;
769
770	/*
771	 * We only add to our list of requests now and
772	 * start consuming the list once we get XferNotReady
773	 * IRQ.
774	 *
775	 * That way, we avoid doing anything that we don't need
776	 * to do now and defer it until the point we receive a
777	 * particular token from the Host side.
778	 *
779	 * This will also avoid Host cancelling URBs due to too
780	 * many NACKs.
781	 */
782	dwc3_map_buffer_to_dma(req);
783	list_add_tail(&req->list, &dep->request_list);
784
785	/*
786	 * There is one special case: XferNotReady with
787	 * empty list of requests. We need to kick the
788	 * transfer here in that situation, otherwise
789	 * we will be NAKing forever.
790	 *
791	 * If we get XferNotReady before gadget driver
792	 * has a chance to queue a request, we will ACK
793	 * the IRQ but won't be able to receive the data
794	 * until the next request is queued. The following
795	 * code is handling exactly that.
796	 */
797	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
798		int ret;
799		int start_trans;
800
801		start_trans = 1;
802		if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
803				dep->flags & DWC3_EP_BUSY)
804			start_trans = 0;
805
806		ret =  __dwc3_gadget_kick_transfer(dep, 0, start_trans);
807		if (ret && ret != -EBUSY) {
808			struct dwc3	*dwc = dep->dwc;
809
810			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
811					dep->name);
812		}
813	};
814
815	return 0;
816}
817
818static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
819	gfp_t gfp_flags)
820{
821	struct dwc3_request		*req = to_dwc3_request(request);
822	struct dwc3_ep			*dep = to_dwc3_ep(ep);
823	struct dwc3			*dwc = dep->dwc;
824
825	unsigned long			flags;
826
827	int				ret;
828
829	if (!dep->desc) {
830		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
831				request, ep->name);
832		return -ESHUTDOWN;
833	}
834
835	dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
836			request, ep->name, request->length);
837
838	spin_lock_irqsave(&dwc->lock, flags);
839	ret = __dwc3_gadget_ep_queue(dep, req);
840	spin_unlock_irqrestore(&dwc->lock, flags);
841
842	return ret;
843}
844
845static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
846		struct usb_request *request)
847{
848	struct dwc3_request		*req = to_dwc3_request(request);
849	struct dwc3_request		*r = NULL;
850
851	struct dwc3_ep			*dep = to_dwc3_ep(ep);
852	struct dwc3			*dwc = dep->dwc;
853
854	unsigned long			flags;
855	int				ret = 0;
856
857	spin_lock_irqsave(&dwc->lock, flags);
858
859	list_for_each_entry(r, &dep->request_list, list) {
860		if (r == req)
861			break;
862	}
863
864	if (r != req) {
865		list_for_each_entry(r, &dep->req_queued, list) {
866			if (r == req)
867				break;
868		}
869		if (r == req) {
870			/* wait until it is processed */
871			dwc3_stop_active_transfer(dwc, dep->number);
872			goto out0;
873		}
874		dev_err(dwc->dev, "request %p was not queued to %s\n",
875				request, ep->name);
876		ret = -EINVAL;
877		goto out0;
878	}
879
880	/* giveback the request */
881	dwc3_gadget_giveback(dep, req, -ECONNRESET);
882
883out0:
884	spin_unlock_irqrestore(&dwc->lock, flags);
885
886	return ret;
887}
888
889int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
890{
891	struct dwc3_gadget_ep_cmd_params	params;
892	struct dwc3				*dwc = dep->dwc;
893	int					ret;
894
895	memset(&params, 0x00, sizeof(params));
896
897	if (value) {
898		if (dep->number == 0 || dep->number == 1) {
899			/*
900			 * Whenever EP0 is stalled, we will restart
901			 * the state machine, thus moving back to
902			 * Setup Phase
903			 */
904			dwc->ep0state = EP0_SETUP_PHASE;
905		}
906
907		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
908			DWC3_DEPCMD_SETSTALL, &params);
909		if (ret)
910			dev_err(dwc->dev, "failed to %s STALL on %s\n",
911					value ? "set" : "clear",
912					dep->name);
913		else
914			dep->flags |= DWC3_EP_STALL;
915	} else {
916		if (dep->flags & DWC3_EP_WEDGE)
917			return 0;
918
919		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
920			DWC3_DEPCMD_CLEARSTALL, &params);
921		if (ret)
922			dev_err(dwc->dev, "failed to %s STALL on %s\n",
923					value ? "set" : "clear",
924					dep->name);
925		else
926			dep->flags &= ~DWC3_EP_STALL;
927	}
928
929	return ret;
930}
931
932static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
933{
934	struct dwc3_ep			*dep = to_dwc3_ep(ep);
935	struct dwc3			*dwc = dep->dwc;
936
937	unsigned long			flags;
938
939	int				ret;
940
941	spin_lock_irqsave(&dwc->lock, flags);
942
943	if (usb_endpoint_xfer_isoc(dep->desc)) {
944		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
945		ret = -EINVAL;
946		goto out;
947	}
948
949	ret = __dwc3_gadget_ep_set_halt(dep, value);
950out:
951	spin_unlock_irqrestore(&dwc->lock, flags);
952
953	return ret;
954}
955
956static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
957{
958	struct dwc3_ep			*dep = to_dwc3_ep(ep);
959
960	dep->flags |= DWC3_EP_WEDGE;
961
962	return dwc3_gadget_ep_set_halt(ep, 1);
963}
964
965/* -------------------------------------------------------------------------- */
966
967static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
968	.bLength	= USB_DT_ENDPOINT_SIZE,
969	.bDescriptorType = USB_DT_ENDPOINT,
970	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
971};
972
973static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
974	.enable		= dwc3_gadget_ep0_enable,
975	.disable	= dwc3_gadget_ep0_disable,
976	.alloc_request	= dwc3_gadget_ep_alloc_request,
977	.free_request	= dwc3_gadget_ep_free_request,
978	.queue		= dwc3_gadget_ep0_queue,
979	.dequeue	= dwc3_gadget_ep_dequeue,
980	.set_halt	= dwc3_gadget_ep_set_halt,
981	.set_wedge	= dwc3_gadget_ep_set_wedge,
982};
983
984static const struct usb_ep_ops dwc3_gadget_ep_ops = {
985	.enable		= dwc3_gadget_ep_enable,
986	.disable	= dwc3_gadget_ep_disable,
987	.alloc_request	= dwc3_gadget_ep_alloc_request,
988	.free_request	= dwc3_gadget_ep_free_request,
989	.queue		= dwc3_gadget_ep_queue,
990	.dequeue	= dwc3_gadget_ep_dequeue,
991	.set_halt	= dwc3_gadget_ep_set_halt,
992	.set_wedge	= dwc3_gadget_ep_set_wedge,
993};
994
995/* -------------------------------------------------------------------------- */
996
997static int dwc3_gadget_get_frame(struct usb_gadget *g)
998{
999	struct dwc3		*dwc = gadget_to_dwc(g);
1000	u32			reg;
1001
1002	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1003	return DWC3_DSTS_SOFFN(reg);
1004}
1005
1006static int dwc3_gadget_wakeup(struct usb_gadget *g)
1007{
1008	struct dwc3		*dwc = gadget_to_dwc(g);
1009
1010	unsigned long		timeout;
1011	unsigned long		flags;
1012
1013	u32			reg;
1014
1015	int			ret = 0;
1016
1017	u8			link_state;
1018	u8			speed;
1019
1020	spin_lock_irqsave(&dwc->lock, flags);
1021
1022	/*
1023	 * According to the Databook Remote wakeup request should
1024	 * be issued only when the device is in early suspend state.
1025	 *
1026	 * We can check that via USB Link State bits in DSTS register.
1027	 */
1028	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1029
1030	speed = reg & DWC3_DSTS_CONNECTSPD;
1031	if (speed == DWC3_DSTS_SUPERSPEED) {
1032		dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1033		ret = -EINVAL;
1034		goto out;
1035	}
1036
1037	link_state = DWC3_DSTS_USBLNKST(reg);
1038
1039	switch (link_state) {
1040	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
1041	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
1042		break;
1043	default:
1044		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1045				link_state);
1046		ret = -EINVAL;
1047		goto out;
1048	}
1049
1050	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1051
1052	/*
1053	 * Switch link state to Recovery. In HS/FS/LS this means
1054	 * RemoteWakeup Request
1055	 */
1056	reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
1057	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1058
1059	/* wait for at least 2000us */
1060	usleep_range(2000, 2500);
1061
1062	/* write zeroes to Link Change Request */
1063	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1064	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1065
1066	/* pool until Link State change to ON */
1067	timeout = jiffies + msecs_to_jiffies(100);
1068
1069	while (!(time_after(jiffies, timeout))) {
1070		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1071
1072		/* in HS, means ON */
1073		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1074			break;
1075	}
1076
1077	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1078		dev_err(dwc->dev, "failed to send remote wakeup\n");
1079		ret = -EINVAL;
1080	}
1081
1082out:
1083	spin_unlock_irqrestore(&dwc->lock, flags);
1084
1085	return ret;
1086}
1087
1088static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1089		int is_selfpowered)
1090{
1091	struct dwc3		*dwc = gadget_to_dwc(g);
1092
1093	dwc->is_selfpowered = !!is_selfpowered;
1094
1095	return 0;
1096}
1097
1098static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1099{
1100	u32			reg;
1101	u32			timeout = 500;
1102
1103	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1104	if (is_on)
1105		reg |= DWC3_DCTL_RUN_STOP;
1106	else
1107		reg &= ~DWC3_DCTL_RUN_STOP;
1108
1109	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1110
1111	do {
1112		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1113		if (is_on) {
1114			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1115				break;
1116		} else {
1117			if (reg & DWC3_DSTS_DEVCTRLHLT)
1118				break;
1119		}
1120		timeout--;
1121		if (!timeout)
1122			break;
1123		udelay(1);
1124	} while (1);
1125
1126	dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1127			dwc->gadget_driver
1128			? dwc->gadget_driver->function : "no-function",
1129			is_on ? "connect" : "disconnect");
1130}
1131
1132static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1133{
1134	struct dwc3		*dwc = gadget_to_dwc(g);
1135	unsigned long		flags;
1136
1137	is_on = !!is_on;
1138
1139	spin_lock_irqsave(&dwc->lock, flags);
1140	dwc3_gadget_run_stop(dwc, is_on);
1141	spin_unlock_irqrestore(&dwc->lock, flags);
1142
1143	return 0;
1144}
1145
1146static int dwc3_gadget_start(struct usb_gadget *g,
1147		struct usb_gadget_driver *driver)
1148{
1149	struct dwc3		*dwc = gadget_to_dwc(g);
1150	struct dwc3_ep		*dep;
1151	unsigned long		flags;
1152	int			ret = 0;
1153	u32			reg;
1154
1155	spin_lock_irqsave(&dwc->lock, flags);
1156
1157	if (dwc->gadget_driver) {
1158		dev_err(dwc->dev, "%s is already bound to %s\n",
1159				dwc->gadget.name,
1160				dwc->gadget_driver->driver.name);
1161		ret = -EBUSY;
1162		goto err0;
1163	}
1164
1165	dwc->gadget_driver	= driver;
1166	dwc->gadget.dev.driver	= &driver->driver;
1167
1168	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1169
1170	reg &= ~DWC3_GCTL_SCALEDOWN(3);
1171	reg &= ~DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG);
1172	reg &= ~DWC3_GCTL_DISSCRAMBLE;
1173	reg |= DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_DEVICE);
1174
1175	switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams0)) {
1176	case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
1177		reg &= ~DWC3_GCTL_DSBLCLKGTNG;
1178		break;
1179	default:
1180		dev_dbg(dwc->dev, "No power optimization available\n");
1181	}
1182
1183	/*
1184	 * WORKAROUND: DWC3 revisions <1.90a have a bug
1185	 * when The device fails to connect at SuperSpeed
1186	 * and falls back to high-speed mode which causes
1187	 * the device to enter in a Connect/Disconnect loop
1188	 */
1189	if (dwc->revision < DWC3_REVISION_190A)
1190		reg |= DWC3_GCTL_U2RSTECN;
1191
1192	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1193
1194	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1195	reg &= ~(DWC3_DCFG_SPEED_MASK);
1196	reg |= DWC3_DCFG_SUPERSPEED;
1197	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1198
1199	dwc->start_config_issued = false;
1200
1201	/* Start with SuperSpeed Default */
1202	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1203
1204	dep = dwc->eps[0];
1205	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1206	if (ret) {
1207		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1208		goto err0;
1209	}
1210
1211	dep = dwc->eps[1];
1212	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1213	if (ret) {
1214		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1215		goto err1;
1216	}
1217
1218	/* begin to receive SETUP packets */
1219	dwc->ep0state = EP0_SETUP_PHASE;
1220	dwc3_ep0_out_start(dwc);
1221
1222	spin_unlock_irqrestore(&dwc->lock, flags);
1223
1224	return 0;
1225
1226err1:
1227	__dwc3_gadget_ep_disable(dwc->eps[0]);
1228
1229err0:
1230	spin_unlock_irqrestore(&dwc->lock, flags);
1231
1232	return ret;
1233}
1234
1235static int dwc3_gadget_stop(struct usb_gadget *g,
1236		struct usb_gadget_driver *driver)
1237{
1238	struct dwc3		*dwc = gadget_to_dwc(g);
1239	unsigned long		flags;
1240
1241	spin_lock_irqsave(&dwc->lock, flags);
1242
1243	__dwc3_gadget_ep_disable(dwc->eps[0]);
1244	__dwc3_gadget_ep_disable(dwc->eps[1]);
1245
1246	dwc->gadget_driver	= NULL;
1247	dwc->gadget.dev.driver	= NULL;
1248
1249	spin_unlock_irqrestore(&dwc->lock, flags);
1250
1251	return 0;
1252}
1253static const struct usb_gadget_ops dwc3_gadget_ops = {
1254	.get_frame		= dwc3_gadget_get_frame,
1255	.wakeup			= dwc3_gadget_wakeup,
1256	.set_selfpowered	= dwc3_gadget_set_selfpowered,
1257	.pullup			= dwc3_gadget_pullup,
1258	.udc_start		= dwc3_gadget_start,
1259	.udc_stop		= dwc3_gadget_stop,
1260};
1261
1262/* -------------------------------------------------------------------------- */
1263
1264static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1265{
1266	struct dwc3_ep			*dep;
1267	u8				epnum;
1268
1269	INIT_LIST_HEAD(&dwc->gadget.ep_list);
1270
1271	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1272		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1273		if (!dep) {
1274			dev_err(dwc->dev, "can't allocate endpoint %d\n",
1275					epnum);
1276			return -ENOMEM;
1277		}
1278
1279		dep->dwc = dwc;
1280		dep->number = epnum;
1281		dwc->eps[epnum] = dep;
1282
1283		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1284				(epnum & 1) ? "in" : "out");
1285		dep->endpoint.name = dep->name;
1286		dep->direction = (epnum & 1);
1287
1288		if (epnum == 0 || epnum == 1) {
1289			dep->endpoint.maxpacket = 512;
1290			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1291			if (!epnum)
1292				dwc->gadget.ep0 = &dep->endpoint;
1293		} else {
1294			int		ret;
1295
1296			dep->endpoint.maxpacket = 1024;
1297			dep->endpoint.max_streams = 15;
1298			dep->endpoint.ops = &dwc3_gadget_ep_ops;
1299			list_add_tail(&dep->endpoint.ep_list,
1300					&dwc->gadget.ep_list);
1301
1302			ret = dwc3_alloc_trb_pool(dep);
1303			if (ret) {
1304				dev_err(dwc->dev, "%s: failed to allocate TRB pool\n", dep->name);
1305				return ret;
1306			}
1307		}
1308		INIT_LIST_HEAD(&dep->request_list);
1309		INIT_LIST_HEAD(&dep->req_queued);
1310	}
1311
1312	return 0;
1313}
1314
1315static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1316{
1317	struct dwc3_ep			*dep;
1318	u8				epnum;
1319
1320	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1321		dep = dwc->eps[epnum];
1322		dwc3_free_trb_pool(dep);
1323
1324		if (epnum != 0 && epnum != 1)
1325			list_del(&dep->endpoint.ep_list);
1326
1327		kfree(dep);
1328	}
1329}
1330
1331static void dwc3_gadget_release(struct device *dev)
1332{
1333	dev_dbg(dev, "%s\n", __func__);
1334}
1335
1336/* -------------------------------------------------------------------------- */
1337static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1338		const struct dwc3_event_depevt *event, int status)
1339{
1340	struct dwc3_request	*req;
1341	struct dwc3_trb         trb;
1342	unsigned int		count;
1343	unsigned int		s_pkt = 0;
1344
1345	do {
1346		req = next_request(&dep->req_queued);
1347		if (!req)
1348			break;
1349
1350		dwc3_trb_to_nat(req->trb, &trb);
1351
1352		if (trb.hwo && status != -ESHUTDOWN)
1353			/*
1354			 * We continue despite the error. There is not much we
1355			 * can do. If we don't clean in up we loop for ever. If
1356			 * we skip the TRB than it gets overwritten reused after
1357			 * a while since we use them in a ring buffer. a BUG()
1358			 * would help. Lets hope that if this occures, someone
1359			 * fixes the root cause instead of looking away :)
1360			 */
1361			dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1362					dep->name, req->trb);
1363		count = trb.length;
1364
1365		if (dep->direction) {
1366			if (count) {
1367				dev_err(dwc->dev, "incomplete IN transfer %s\n",
1368						dep->name);
1369				status = -ECONNRESET;
1370			}
1371		} else {
1372			if (count && (event->status & DEPEVT_STATUS_SHORT))
1373				s_pkt = 1;
1374		}
1375
1376		/*
1377		 * We assume here we will always receive the entire data block
1378		 * which we should receive. Meaning, if we program RX to
1379		 * receive 4K but we receive only 2K, we assume that's all we
1380		 * should receive and we simply bounce the request back to the
1381		 * gadget driver for further processing.
1382		 */
1383		req->request.actual += req->request.length - count;
1384		dwc3_gadget_giveback(dep, req, status);
1385		if (s_pkt)
1386			break;
1387		if ((event->status & DEPEVT_STATUS_LST) && trb.lst)
1388			break;
1389		if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1390			break;
1391	} while (1);
1392
1393	if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1394		return 0;
1395	return 1;
1396}
1397
1398static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1399		struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1400		int start_new)
1401{
1402	unsigned		status = 0;
1403	int			clean_busy;
1404
1405	if (event->status & DEPEVT_STATUS_BUSERR)
1406		status = -ECONNRESET;
1407
1408	clean_busy =  dwc3_cleanup_done_reqs(dwc, dep, event, status);
1409	if (clean_busy) {
1410		dep->flags &= ~DWC3_EP_BUSY;
1411		dep->res_trans_idx = 0;
1412	}
1413}
1414
1415static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1416		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1417{
1418	u32 uf;
1419
1420	if (list_empty(&dep->request_list)) {
1421		dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1422			dep->name);
1423		return;
1424	}
1425
1426	if (event->parameters) {
1427		u32 mask;
1428
1429		mask = ~(dep->interval - 1);
1430		uf = event->parameters & mask;
1431		/* 4 micro frames in the future */
1432		uf += dep->interval * 4;
1433	} else {
1434		uf = 0;
1435	}
1436
1437	__dwc3_gadget_kick_transfer(dep, uf, 1);
1438}
1439
1440static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1441		const struct dwc3_event_depevt *event)
1442{
1443	struct dwc3 *dwc = dep->dwc;
1444	struct dwc3_event_depevt mod_ev = *event;
1445
1446	/*
1447	 * We were asked to remove one requests. It is possible that this
1448	 * request and a few other were started together and have the same
1449	 * transfer index. Since we stopped the complete endpoint we don't
1450	 * know how many requests were already completed (and not yet)
1451	 * reported and how could be done (later). We purge them all until
1452	 * the end of the list.
1453	 */
1454	mod_ev.status = DEPEVT_STATUS_LST;
1455	dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1456	dep->flags &= ~DWC3_EP_BUSY;
1457	/* pending requets are ignored and are queued on XferNotReady */
1458}
1459
1460static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1461		const struct dwc3_event_depevt *event)
1462{
1463	u32 param = event->parameters;
1464	u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1465
1466	switch (cmd_type) {
1467	case DWC3_DEPCMD_ENDTRANSFER:
1468		dwc3_process_ep_cmd_complete(dep, event);
1469		break;
1470	case DWC3_DEPCMD_STARTTRANSFER:
1471		dep->res_trans_idx = param & 0x7f;
1472		break;
1473	default:
1474		printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1475				__func__, cmd_type);
1476		break;
1477	};
1478}
1479
1480static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1481		const struct dwc3_event_depevt *event)
1482{
1483	struct dwc3_ep		*dep;
1484	u8			epnum = event->endpoint_number;
1485
1486	dep = dwc->eps[epnum];
1487
1488	dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1489			dwc3_ep_event_string(event->endpoint_event));
1490
1491	if (epnum == 0 || epnum == 1) {
1492		dwc3_ep0_interrupt(dwc, event);
1493		return;
1494	}
1495
1496	switch (event->endpoint_event) {
1497	case DWC3_DEPEVT_XFERCOMPLETE:
1498		if (usb_endpoint_xfer_isoc(dep->desc)) {
1499			dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1500					dep->name);
1501			return;
1502		}
1503
1504		dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1505		break;
1506	case DWC3_DEPEVT_XFERINPROGRESS:
1507		if (!usb_endpoint_xfer_isoc(dep->desc)) {
1508			dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1509					dep->name);
1510			return;
1511		}
1512
1513		dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1514		break;
1515	case DWC3_DEPEVT_XFERNOTREADY:
1516		if (usb_endpoint_xfer_isoc(dep->desc)) {
1517			dwc3_gadget_start_isoc(dwc, dep, event);
1518		} else {
1519			int ret;
1520
1521			dev_vdbg(dwc->dev, "%s: reason %s\n",
1522					dep->name, event->status
1523					? "Transfer Active"
1524					: "Transfer Not Active");
1525
1526			ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1527			if (!ret || ret == -EBUSY)
1528				return;
1529
1530			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1531					dep->name);
1532		}
1533
1534		break;
1535	case DWC3_DEPEVT_STREAMEVT:
1536		if (!usb_endpoint_xfer_bulk(dep->desc)) {
1537			dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1538					dep->name);
1539			return;
1540		}
1541
1542		switch (event->status) {
1543		case DEPEVT_STREAMEVT_FOUND:
1544			dev_vdbg(dwc->dev, "Stream %d found and started\n",
1545					event->parameters);
1546
1547			break;
1548		case DEPEVT_STREAMEVT_NOTFOUND:
1549			/* FALLTHROUGH */
1550		default:
1551			dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1552		}
1553		break;
1554	case DWC3_DEPEVT_RXTXFIFOEVT:
1555		dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1556		break;
1557	case DWC3_DEPEVT_EPCMDCMPLT:
1558		dwc3_ep_cmd_compl(dep, event);
1559		break;
1560	}
1561}
1562
1563static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1564{
1565	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1566		spin_unlock(&dwc->lock);
1567		dwc->gadget_driver->disconnect(&dwc->gadget);
1568		spin_lock(&dwc->lock);
1569	}
1570}
1571
1572static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1573{
1574	struct dwc3_ep *dep;
1575	struct dwc3_gadget_ep_cmd_params params;
1576	u32 cmd;
1577	int ret;
1578
1579	dep = dwc->eps[epnum];
1580
1581	WARN_ON(!dep->res_trans_idx);
1582	if (dep->res_trans_idx) {
1583		cmd = DWC3_DEPCMD_ENDTRANSFER;
1584		cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1585		cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1586		memset(&params, 0, sizeof(params));
1587		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1588		WARN_ON_ONCE(ret);
1589		dep->res_trans_idx = 0;
1590	}
1591}
1592
1593static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1594{
1595	u32 epnum;
1596
1597	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1598		struct dwc3_ep *dep;
1599
1600		dep = dwc->eps[epnum];
1601		if (!(dep->flags & DWC3_EP_ENABLED))
1602			continue;
1603
1604		dwc3_remove_requests(dwc, dep);
1605	}
1606}
1607
1608static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1609{
1610	u32 epnum;
1611
1612	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1613		struct dwc3_ep *dep;
1614		struct dwc3_gadget_ep_cmd_params params;
1615		int ret;
1616
1617		dep = dwc->eps[epnum];
1618
1619		if (!(dep->flags & DWC3_EP_STALL))
1620			continue;
1621
1622		dep->flags &= ~DWC3_EP_STALL;
1623
1624		memset(&params, 0, sizeof(params));
1625		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1626				DWC3_DEPCMD_CLEARSTALL, &params);
1627		WARN_ON_ONCE(ret);
1628	}
1629}
1630
1631static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1632{
1633	dev_vdbg(dwc->dev, "%s\n", __func__);
1634#if 0
1635	XXX
1636	U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1637	enable it before we can disable it.
1638
1639	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1640	reg &= ~DWC3_DCTL_INITU1ENA;
1641	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1642
1643	reg &= ~DWC3_DCTL_INITU2ENA;
1644	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1645#endif
1646
1647	dwc3_stop_active_transfers(dwc);
1648	dwc3_disconnect_gadget(dwc);
1649	dwc->start_config_issued = false;
1650
1651	dwc->gadget.speed = USB_SPEED_UNKNOWN;
1652}
1653
1654static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1655{
1656	u32			reg;
1657
1658	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1659
1660	if (on)
1661		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1662	else
1663		reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1664
1665	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1666}
1667
1668static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1669{
1670	u32			reg;
1671
1672	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1673
1674	if (on)
1675		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1676	else
1677		reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1678
1679	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1680}
1681
1682static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1683{
1684	u32			reg;
1685
1686	dev_vdbg(dwc->dev, "%s\n", __func__);
1687
1688	/* Enable PHYs */
1689	dwc3_gadget_usb2_phy_power(dwc, true);
1690	dwc3_gadget_usb3_phy_power(dwc, true);
1691
1692	if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1693		dwc3_disconnect_gadget(dwc);
1694
1695	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1696	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1697	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1698
1699	dwc3_stop_active_transfers(dwc);
1700	dwc3_clear_stall_all_ep(dwc);
1701	dwc->start_config_issued = false;
1702
1703	/* Reset device address to zero */
1704	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1705	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1706	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1707}
1708
1709static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1710{
1711	u32 reg;
1712	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1713
1714	/*
1715	 * We change the clock only at SS but I dunno why I would want to do
1716	 * this. Maybe it becomes part of the power saving plan.
1717	 */
1718
1719	if (speed != DWC3_DSTS_SUPERSPEED)
1720		return;
1721
1722	/*
1723	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
1724	 * each time on Connect Done.
1725	 */
1726	if (!usb30_clock)
1727		return;
1728
1729	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1730	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
1731	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1732}
1733
1734static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
1735{
1736	switch (speed) {
1737	case USB_SPEED_SUPER:
1738		dwc3_gadget_usb2_phy_power(dwc, false);
1739		break;
1740	case USB_SPEED_HIGH:
1741	case USB_SPEED_FULL:
1742	case USB_SPEED_LOW:
1743		dwc3_gadget_usb3_phy_power(dwc, false);
1744		break;
1745	}
1746}
1747
1748static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
1749{
1750	struct dwc3_gadget_ep_cmd_params params;
1751	struct dwc3_ep		*dep;
1752	int			ret;
1753	u32			reg;
1754	u8			speed;
1755
1756	dev_vdbg(dwc->dev, "%s\n", __func__);
1757
1758	memset(&params, 0x00, sizeof(params));
1759
1760	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1761	speed = reg & DWC3_DSTS_CONNECTSPD;
1762	dwc->speed = speed;
1763
1764	dwc3_update_ram_clk_sel(dwc, speed);
1765
1766	switch (speed) {
1767	case DWC3_DCFG_SUPERSPEED:
1768		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1769		dwc->gadget.ep0->maxpacket = 512;
1770		dwc->gadget.speed = USB_SPEED_SUPER;
1771		break;
1772	case DWC3_DCFG_HIGHSPEED:
1773		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
1774		dwc->gadget.ep0->maxpacket = 64;
1775		dwc->gadget.speed = USB_SPEED_HIGH;
1776		break;
1777	case DWC3_DCFG_FULLSPEED2:
1778	case DWC3_DCFG_FULLSPEED1:
1779		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
1780		dwc->gadget.ep0->maxpacket = 64;
1781		dwc->gadget.speed = USB_SPEED_FULL;
1782		break;
1783	case DWC3_DCFG_LOWSPEED:
1784		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
1785		dwc->gadget.ep0->maxpacket = 8;
1786		dwc->gadget.speed = USB_SPEED_LOW;
1787		break;
1788	}
1789
1790	/* Disable unneded PHY */
1791	dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
1792
1793	dep = dwc->eps[0];
1794	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1795	if (ret) {
1796		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1797		return;
1798	}
1799
1800	dep = dwc->eps[1];
1801	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1802	if (ret) {
1803		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1804		return;
1805	}
1806
1807	/*
1808	 * Configure PHY via GUSB3PIPECTLn if required.
1809	 *
1810	 * Update GTXFIFOSIZn
1811	 *
1812	 * In both cases reset values should be sufficient.
1813	 */
1814}
1815
1816static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
1817{
1818	dev_vdbg(dwc->dev, "%s\n", __func__);
1819
1820	/*
1821	 * TODO take core out of low power mode when that's
1822	 * implemented.
1823	 */
1824
1825	dwc->gadget_driver->resume(&dwc->gadget);
1826}
1827
1828static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
1829		unsigned int evtinfo)
1830{
1831	/*  The fith bit says SuperSpeed yes or no. */
1832	dwc->link_state = evtinfo & DWC3_LINK_STATE_MASK;
1833
1834	dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
1835}
1836
1837static void dwc3_gadget_interrupt(struct dwc3 *dwc,
1838		const struct dwc3_event_devt *event)
1839{
1840	switch (event->type) {
1841	case DWC3_DEVICE_EVENT_DISCONNECT:
1842		dwc3_gadget_disconnect_interrupt(dwc);
1843		break;
1844	case DWC3_DEVICE_EVENT_RESET:
1845		dwc3_gadget_reset_interrupt(dwc);
1846		break;
1847	case DWC3_DEVICE_EVENT_CONNECT_DONE:
1848		dwc3_gadget_conndone_interrupt(dwc);
1849		break;
1850	case DWC3_DEVICE_EVENT_WAKEUP:
1851		dwc3_gadget_wakeup_interrupt(dwc);
1852		break;
1853	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
1854		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
1855		break;
1856	case DWC3_DEVICE_EVENT_EOPF:
1857		dev_vdbg(dwc->dev, "End of Periodic Frame\n");
1858		break;
1859	case DWC3_DEVICE_EVENT_SOF:
1860		dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
1861		break;
1862	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
1863		dev_vdbg(dwc->dev, "Erratic Error\n");
1864		break;
1865	case DWC3_DEVICE_EVENT_CMD_CMPL:
1866		dev_vdbg(dwc->dev, "Command Complete\n");
1867		break;
1868	case DWC3_DEVICE_EVENT_OVERFLOW:
1869		dev_vdbg(dwc->dev, "Overflow\n");
1870		break;
1871	default:
1872		dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
1873	}
1874}
1875
1876static void dwc3_process_event_entry(struct dwc3 *dwc,
1877		const union dwc3_event *event)
1878{
1879	/* Endpoint IRQ, handle it and return early */
1880	if (event->type.is_devspec == 0) {
1881		/* depevt */
1882		return dwc3_endpoint_interrupt(dwc, &event->depevt);
1883	}
1884
1885	switch (event->type.type) {
1886	case DWC3_EVENT_TYPE_DEV:
1887		dwc3_gadget_interrupt(dwc, &event->devt);
1888		break;
1889	/* REVISIT what to do with Carkit and I2C events ? */
1890	default:
1891		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
1892	}
1893}
1894
1895static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
1896{
1897	struct dwc3_event_buffer *evt;
1898	int left;
1899	u32 count;
1900
1901	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
1902	count &= DWC3_GEVNTCOUNT_MASK;
1903	if (!count)
1904		return IRQ_NONE;
1905
1906	evt = dwc->ev_buffs[buf];
1907	left = count;
1908
1909	while (left > 0) {
1910		union dwc3_event event;
1911
1912		memcpy(&event.raw, (evt->buf + evt->lpos), sizeof(event.raw));
1913		dwc3_process_event_entry(dwc, &event);
1914		/*
1915		 * XXX we wrap around correctly to the next entry as almost all
1916		 * entries are 4 bytes in size. There is one entry which has 12
1917		 * bytes which is a regular entry followed by 8 bytes data. ATM
1918		 * I don't know how things are organized if were get next to the
1919		 * a boundary so I worry about that once we try to handle that.
1920		 */
1921		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
1922		left -= 4;
1923
1924		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
1925	}
1926
1927	return IRQ_HANDLED;
1928}
1929
1930static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
1931{
1932	struct dwc3			*dwc = _dwc;
1933	int				i;
1934	irqreturn_t			ret = IRQ_NONE;
1935
1936	spin_lock(&dwc->lock);
1937
1938	for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
1939		irqreturn_t status;
1940
1941		status = dwc3_process_event_buf(dwc, i);
1942		if (status == IRQ_HANDLED)
1943			ret = status;
1944	}
1945
1946	spin_unlock(&dwc->lock);
1947
1948	return ret;
1949}
1950
1951/**
1952 * dwc3_gadget_init - Initializes gadget related registers
1953 * @dwc: Pointer to out controller context structure
1954 *
1955 * Returns 0 on success otherwise negative errno.
1956 */
1957int __devinit dwc3_gadget_init(struct dwc3 *dwc)
1958{
1959	u32					reg;
1960	int					ret;
1961	int					irq;
1962
1963	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
1964			&dwc->ctrl_req_addr, GFP_KERNEL);
1965	if (!dwc->ctrl_req) {
1966		dev_err(dwc->dev, "failed to allocate ctrl request\n");
1967		ret = -ENOMEM;
1968		goto err0;
1969	}
1970
1971	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
1972			&dwc->ep0_trb_addr, GFP_KERNEL);
1973	if (!dwc->ep0_trb) {
1974		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
1975		ret = -ENOMEM;
1976		goto err1;
1977	}
1978
1979	dwc->setup_buf = dma_alloc_coherent(dwc->dev,
1980			sizeof(*dwc->setup_buf) * 2,
1981			&dwc->setup_buf_addr, GFP_KERNEL);
1982	if (!dwc->setup_buf) {
1983		dev_err(dwc->dev, "failed to allocate setup buffer\n");
1984		ret = -ENOMEM;
1985		goto err2;
1986	}
1987
1988	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
1989			512, &dwc->ep0_bounce_addr, GFP_KERNEL);
1990	if (!dwc->ep0_bounce) {
1991		dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
1992		ret = -ENOMEM;
1993		goto err3;
1994	}
1995
1996	dev_set_name(&dwc->gadget.dev, "gadget");
1997
1998	dwc->gadget.ops			= &dwc3_gadget_ops;
1999	dwc->gadget.max_speed		= USB_SPEED_SUPER;
2000	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
2001	dwc->gadget.dev.parent		= dwc->dev;
2002
2003	dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
2004
2005	dwc->gadget.dev.dma_parms	= dwc->dev->dma_parms;
2006	dwc->gadget.dev.dma_mask	= dwc->dev->dma_mask;
2007	dwc->gadget.dev.release		= dwc3_gadget_release;
2008	dwc->gadget.name		= "dwc3-gadget";
2009
2010	/*
2011	 * REVISIT: Here we should clear all pending IRQs to be
2012	 * sure we're starting from a well known location.
2013	 */
2014
2015	ret = dwc3_gadget_init_endpoints(dwc);
2016	if (ret)
2017		goto err4;
2018
2019	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2020
2021	ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
2022			"dwc3", dwc);
2023	if (ret) {
2024		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2025				irq, ret);
2026		goto err5;
2027	}
2028
2029	/* Enable all but Start and End of Frame IRQs */
2030	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
2031			DWC3_DEVTEN_EVNTOVERFLOWEN |
2032			DWC3_DEVTEN_CMDCMPLTEN |
2033			DWC3_DEVTEN_ERRTICERREN |
2034			DWC3_DEVTEN_WKUPEVTEN |
2035			DWC3_DEVTEN_ULSTCNGEN |
2036			DWC3_DEVTEN_CONNECTDONEEN |
2037			DWC3_DEVTEN_USBRSTEN |
2038			DWC3_DEVTEN_DISCONNEVTEN);
2039	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2040
2041	ret = device_register(&dwc->gadget.dev);
2042	if (ret) {
2043		dev_err(dwc->dev, "failed to register gadget device\n");
2044		put_device(&dwc->gadget.dev);
2045		goto err6;
2046	}
2047
2048	ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2049	if (ret) {
2050		dev_err(dwc->dev, "failed to register udc\n");
2051		goto err7;
2052	}
2053
2054	return 0;
2055
2056err7:
2057	device_unregister(&dwc->gadget.dev);
2058
2059err6:
2060	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2061	free_irq(irq, dwc);
2062
2063err5:
2064	dwc3_gadget_free_endpoints(dwc);
2065
2066err4:
2067	dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2068			dwc->ep0_bounce_addr);
2069
2070err3:
2071	dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2072			dwc->setup_buf, dwc->setup_buf_addr);
2073
2074err2:
2075	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2076			dwc->ep0_trb, dwc->ep0_trb_addr);
2077
2078err1:
2079	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2080			dwc->ctrl_req, dwc->ctrl_req_addr);
2081
2082err0:
2083	return ret;
2084}
2085
2086void dwc3_gadget_exit(struct dwc3 *dwc)
2087{
2088	int			irq;
2089	int			i;
2090
2091	usb_del_gadget_udc(&dwc->gadget);
2092	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2093
2094	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2095	free_irq(irq, dwc);
2096
2097	for (i = 0; i < ARRAY_SIZE(dwc->eps); i++)
2098		__dwc3_gadget_ep_disable(dwc->eps[i]);
2099
2100	dwc3_gadget_free_endpoints(dwc);
2101
2102	dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2103			dwc->ep0_bounce_addr);
2104
2105	dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2106			dwc->setup_buf, dwc->setup_buf_addr);
2107
2108	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2109			dwc->ep0_trb, dwc->ep0_trb_addr);
2110
2111	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2112			dwc->ctrl_req, dwc->ctrl_req_addr);
2113
2114	device_unregister(&dwc->gadget.dev);
2115}
2116