gadget.c revision 019ac83252dc2b356cb0ca81c25a077ec90309e7
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 * All rights reserved.
6 *
7 * Authors: Felipe Balbi <balbi@ti.com>,
8 *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions, and the following disclaimer,
15 *    without modification.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. The names of the above-listed copyright holders may not be used
20 *    to endorse or promote products derived from this software without
21 *    specific prior written permission.
22 *
23 * ALTERNATIVELY, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") version 2, as published by the Free
25 * Software Foundation.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#include <linux/kernel.h>
41#include <linux/delay.h>
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/platform_device.h>
45#include <linux/pm_runtime.h>
46#include <linux/interrupt.h>
47#include <linux/io.h>
48#include <linux/list.h>
49#include <linux/dma-mapping.h>
50
51#include <linux/usb/ch9.h>
52#include <linux/usb/gadget.h>
53
54#include "core.h"
55#include "gadget.h"
56#include "io.h"
57
58#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
59
60void dwc3_map_buffer_to_dma(struct dwc3_request *req)
61{
62	struct dwc3			*dwc = req->dep->dwc;
63
64	if (req->request.length == 0) {
65		/* req->request.dma = dwc->setup_buf_addr; */
66		return;
67	}
68
69	if (req->request.dma == DMA_ADDR_INVALID) {
70		req->request.dma = dma_map_single(dwc->dev, req->request.buf,
71				req->request.length, req->direction
72				? DMA_TO_DEVICE : DMA_FROM_DEVICE);
73		req->mapped = true;
74	}
75}
76
77void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
78{
79	struct dwc3			*dwc = req->dep->dwc;
80
81	if (req->request.length == 0) {
82		req->request.dma = DMA_ADDR_INVALID;
83		return;
84	}
85
86	if (req->mapped) {
87		dma_unmap_single(dwc->dev, req->request.dma,
88				req->request.length, req->direction
89				? DMA_TO_DEVICE : DMA_FROM_DEVICE);
90		req->mapped = 0;
91		req->request.dma = DMA_ADDR_INVALID;
92	}
93}
94
95void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
96		int status)
97{
98	struct dwc3			*dwc = dep->dwc;
99
100	if (req->queued) {
101		dep->busy_slot++;
102		/*
103		 * Skip LINK TRB. We can't use req->trb and check for
104		 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
105		 * completed (not the LINK TRB).
106		 */
107		if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
108				usb_endpoint_xfer_isoc(dep->desc))
109			dep->busy_slot++;
110	}
111	list_del(&req->list);
112
113	if (req->request.status == -EINPROGRESS)
114		req->request.status = status;
115
116	dwc3_unmap_buffer_from_dma(req);
117
118	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
119			req, dep->name, req->request.actual,
120			req->request.length, status);
121
122	spin_unlock(&dwc->lock);
123	req->request.complete(&req->dep->endpoint, &req->request);
124	spin_lock(&dwc->lock);
125}
126
127static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
128{
129	switch (cmd) {
130	case DWC3_DEPCMD_DEPSTARTCFG:
131		return "Start New Configuration";
132	case DWC3_DEPCMD_ENDTRANSFER:
133		return "End Transfer";
134	case DWC3_DEPCMD_UPDATETRANSFER:
135		return "Update Transfer";
136	case DWC3_DEPCMD_STARTTRANSFER:
137		return "Start Transfer";
138	case DWC3_DEPCMD_CLEARSTALL:
139		return "Clear Stall";
140	case DWC3_DEPCMD_SETSTALL:
141		return "Set Stall";
142	case DWC3_DEPCMD_GETSEQNUMBER:
143		return "Get Data Sequence Number";
144	case DWC3_DEPCMD_SETTRANSFRESOURCE:
145		return "Set Endpoint Transfer Resource";
146	case DWC3_DEPCMD_SETEPCONFIG:
147		return "Set Endpoint Configuration";
148	default:
149		return "UNKNOWN command";
150	}
151}
152
153int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
154		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
155{
156	struct dwc3_ep		*dep = dwc->eps[ep];
157	u32			timeout = 500;
158	u32			reg;
159
160	dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
161			dep->name,
162			dwc3_gadget_ep_cmd_string(cmd), params->param0.raw,
163			params->param1.raw, params->param2.raw);
164
165	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0.raw);
166	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1.raw);
167	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2.raw);
168
169	dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
170	do {
171		reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
172		if (!(reg & DWC3_DEPCMD_CMDACT)) {
173			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
174					DWC3_DEPCMD_STATUS(reg));
175			return 0;
176		}
177
178		/*
179		 * We can't sleep here, because it is also called from
180		 * interrupt context.
181		 */
182		timeout--;
183		if (!timeout)
184			return -ETIMEDOUT;
185
186		udelay(1);
187	} while (1);
188}
189
190static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
191		struct dwc3_trb_hw *trb)
192{
193	u32		offset = trb - dep->trb_pool;
194
195	return dep->trb_pool_dma + offset;
196}
197
198static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
199{
200	struct dwc3		*dwc = dep->dwc;
201
202	if (dep->trb_pool)
203		return 0;
204
205	if (dep->number == 0 || dep->number == 1)
206		return 0;
207
208	dep->trb_pool = dma_alloc_coherent(dwc->dev,
209			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
210			&dep->trb_pool_dma, GFP_KERNEL);
211	if (!dep->trb_pool) {
212		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
213				dep->name);
214		return -ENOMEM;
215	}
216
217	return 0;
218}
219
220static void dwc3_free_trb_pool(struct dwc3_ep *dep)
221{
222	struct dwc3		*dwc = dep->dwc;
223
224	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
225			dep->trb_pool, dep->trb_pool_dma);
226
227	dep->trb_pool = NULL;
228	dep->trb_pool_dma = 0;
229}
230
231static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
232{
233	struct dwc3_gadget_ep_cmd_params params;
234	u32			cmd;
235
236	memset(&params, 0x00, sizeof(params));
237
238	if (dep->number != 1) {
239		cmd = DWC3_DEPCMD_DEPSTARTCFG;
240		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
241		if (dep->number > 1)
242			cmd |= DWC3_DEPCMD_PARAM(2);
243
244		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
245	}
246
247	return 0;
248}
249
250static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
251		const struct usb_endpoint_descriptor *desc)
252{
253	struct dwc3_gadget_ep_cmd_params params;
254
255	memset(&params, 0x00, sizeof(params));
256
257	params.param0.depcfg.ep_type = usb_endpoint_type(desc);
258	params.param0.depcfg.max_packet_size = usb_endpoint_maxp(desc);
259
260	params.param1.depcfg.xfer_complete_enable = true;
261	params.param1.depcfg.xfer_not_ready_enable = true;
262
263	if (usb_endpoint_xfer_isoc(desc))
264		params.param1.depcfg.xfer_in_progress_enable = true;
265
266	/*
267	 * We are doing 1:1 mapping for endpoints, meaning
268	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
269	 * so on. We consider the direction bit as part of the physical
270	 * endpoint number. So USB endpoint 0x81 is 0x03.
271	 */
272	params.param1.depcfg.ep_number = dep->number;
273
274	/*
275	 * We must use the lower 16 TX FIFOs even though
276	 * HW might have more
277	 */
278	if (dep->direction)
279		params.param0.depcfg.fifo_number = dep->number >> 1;
280
281	if (desc->bInterval) {
282		params.param1.depcfg.binterval_m1 = desc->bInterval - 1;
283		dep->interval = 1 << (desc->bInterval - 1);
284	}
285
286	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
287			DWC3_DEPCMD_SETEPCONFIG, &params);
288}
289
290static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
291{
292	struct dwc3_gadget_ep_cmd_params params;
293
294	memset(&params, 0x00, sizeof(params));
295
296	params.param0.depxfercfg.number_xfer_resources = 1;
297
298	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
299			DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
300}
301
302/**
303 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
304 * @dep: endpoint to be initialized
305 * @desc: USB Endpoint Descriptor
306 *
307 * Caller should take care of locking
308 */
309static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
310		const struct usb_endpoint_descriptor *desc)
311{
312	struct dwc3		*dwc = dep->dwc;
313	u32			reg;
314	int			ret = -ENOMEM;
315
316	if (!(dep->flags & DWC3_EP_ENABLED)) {
317		ret = dwc3_gadget_start_config(dwc, dep);
318		if (ret)
319			return ret;
320	}
321
322	ret = dwc3_gadget_set_ep_config(dwc, dep, desc);
323	if (ret)
324		return ret;
325
326	if (!(dep->flags & DWC3_EP_ENABLED)) {
327		struct dwc3_trb_hw	*trb_st_hw;
328		struct dwc3_trb_hw	*trb_link_hw;
329		struct dwc3_trb		trb_link;
330
331		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
332		if (ret)
333			return ret;
334
335		dep->desc = desc;
336		dep->type = usb_endpoint_type(desc);
337		dep->flags |= DWC3_EP_ENABLED;
338
339		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
340		reg |= DWC3_DALEPENA_EP(dep->number);
341		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
342
343		if (!usb_endpoint_xfer_isoc(desc))
344			return 0;
345
346		memset(&trb_link, 0, sizeof(trb_link));
347
348		/* Link TRB for ISOC. The HWO but is never reset */
349		trb_st_hw = &dep->trb_pool[0];
350
351		trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw);
352		trb_link.trbctl = DWC3_TRBCTL_LINK_TRB;
353		trb_link.hwo = true;
354
355		trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1];
356		dwc3_trb_to_hw(&trb_link, trb_link_hw);
357	}
358
359	return 0;
360}
361
362static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
363static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
364{
365	struct dwc3_request		*req;
366
367	if (!list_empty(&dep->req_queued))
368		dwc3_stop_active_transfer(dwc, dep->number);
369
370	while (!list_empty(&dep->request_list)) {
371		req = next_request(&dep->request_list);
372
373		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
374	}
375}
376
377/**
378 * __dwc3_gadget_ep_disable - Disables a HW endpoint
379 * @dep: the endpoint to disable
380 *
381 * This function also removes requests which are currently processed ny the
382 * hardware and those which are not yet scheduled.
383 * Caller should take care of locking.
384 */
385static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
386{
387	struct dwc3		*dwc = dep->dwc;
388	u32			reg;
389
390	dep->flags &= ~DWC3_EP_ENABLED;
391	dwc3_remove_requests(dwc, dep);
392
393	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
394	reg &= ~DWC3_DALEPENA_EP(dep->number);
395	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
396
397	dep->desc = NULL;
398	dep->type = 0;
399
400	return 0;
401}
402
403/* -------------------------------------------------------------------------- */
404
405static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
406		const struct usb_endpoint_descriptor *desc)
407{
408	return -EINVAL;
409}
410
411static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
412{
413	return -EINVAL;
414}
415
416/* -------------------------------------------------------------------------- */
417
418static int dwc3_gadget_ep_enable(struct usb_ep *ep,
419		const struct usb_endpoint_descriptor *desc)
420{
421	struct dwc3_ep			*dep;
422	struct dwc3			*dwc;
423	unsigned long			flags;
424	int				ret;
425
426	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
427		pr_debug("dwc3: invalid parameters\n");
428		return -EINVAL;
429	}
430
431	if (!desc->wMaxPacketSize) {
432		pr_debug("dwc3: missing wMaxPacketSize\n");
433		return -EINVAL;
434	}
435
436	dep = to_dwc3_ep(ep);
437	dwc = dep->dwc;
438
439	switch (usb_endpoint_type(desc)) {
440	case USB_ENDPOINT_XFER_CONTROL:
441		strncat(dep->name, "-control", sizeof(dep->name));
442		break;
443	case USB_ENDPOINT_XFER_ISOC:
444		strncat(dep->name, "-isoc", sizeof(dep->name));
445		break;
446	case USB_ENDPOINT_XFER_BULK:
447		strncat(dep->name, "-bulk", sizeof(dep->name));
448		break;
449	case USB_ENDPOINT_XFER_INT:
450		strncat(dep->name, "-int", sizeof(dep->name));
451		break;
452	default:
453		dev_err(dwc->dev, "invalid endpoint transfer type\n");
454	}
455
456	if (dep->flags & DWC3_EP_ENABLED) {
457		dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
458				dep->name);
459		return 0;
460	}
461
462	dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
463
464	spin_lock_irqsave(&dwc->lock, flags);
465	ret = __dwc3_gadget_ep_enable(dep, desc);
466	spin_unlock_irqrestore(&dwc->lock, flags);
467
468	return ret;
469}
470
471static int dwc3_gadget_ep_disable(struct usb_ep *ep)
472{
473	struct dwc3_ep			*dep;
474	struct dwc3			*dwc;
475	unsigned long			flags;
476	int				ret;
477
478	if (!ep) {
479		pr_debug("dwc3: invalid parameters\n");
480		return -EINVAL;
481	}
482
483	dep = to_dwc3_ep(ep);
484	dwc = dep->dwc;
485
486	if (!(dep->flags & DWC3_EP_ENABLED)) {
487		dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
488				dep->name);
489		return 0;
490	}
491
492	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
493			dep->number >> 1,
494			(dep->number & 1) ? "in" : "out");
495
496	spin_lock_irqsave(&dwc->lock, flags);
497	ret = __dwc3_gadget_ep_disable(dep);
498	spin_unlock_irqrestore(&dwc->lock, flags);
499
500	return ret;
501}
502
503static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
504	gfp_t gfp_flags)
505{
506	struct dwc3_request		*req;
507	struct dwc3_ep			*dep = to_dwc3_ep(ep);
508	struct dwc3			*dwc = dep->dwc;
509
510	req = kzalloc(sizeof(*req), gfp_flags);
511	if (!req) {
512		dev_err(dwc->dev, "not enough memory\n");
513		return NULL;
514	}
515
516	req->epnum	= dep->number;
517	req->dep	= dep;
518	req->request.dma = DMA_ADDR_INVALID;
519
520	return &req->request;
521}
522
523static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
524		struct usb_request *request)
525{
526	struct dwc3_request		*req = to_dwc3_request(request);
527
528	kfree(req);
529}
530
531/*
532 * dwc3_prepare_trbs - setup TRBs from requests
533 * @dep: endpoint for which requests are being prepared
534 * @starting: true if the endpoint is idle and no requests are queued.
535 *
536 * The functions goes through the requests list and setups TRBs for the
537 * transfers. The functions returns once there are not more TRBs available or
538 * it run out of requests.
539 */
540static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
541		bool starting)
542{
543	struct dwc3_request	*req, *n, *ret = NULL;
544	struct dwc3_trb_hw	*trb_hw;
545	struct dwc3_trb		trb;
546	u32			trbs_left;
547
548	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
549
550	/* the first request must not be queued */
551	trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
552	/*
553	 * if busy & slot are equal than it is either full or empty. If we are
554	 * starting to proceed requests then we are empty. Otherwise we ar
555	 * full and don't do anything
556	 */
557	if (!trbs_left) {
558		if (!starting)
559			return NULL;
560		trbs_left = DWC3_TRB_NUM;
561		/*
562		 * In case we start from scratch, we queue the ISOC requests
563		 * starting from slot 1. This is done because we use ring
564		 * buffer and have no LST bit to stop us. Instead, we place
565		 * IOC bit TRB_NUM/4. We try to avoid to having an interrupt
566		 * after the first request so we start at slot 1 and have
567		 * 7 requests proceed before we hit the first IOC.
568		 * Other transfer types don't use the ring buffer and are
569		 * processed from the first TRB until the last one. Since we
570		 * don't wrap around we have to start at the beginning.
571		 */
572		if (usb_endpoint_xfer_isoc(dep->desc)) {
573			dep->busy_slot = 1;
574			dep->free_slot = 1;
575		} else {
576			dep->busy_slot = 0;
577			dep->free_slot = 0;
578		}
579	}
580
581	/* The last TRB is a link TRB, not used for xfer */
582	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
583		return NULL;
584
585	list_for_each_entry_safe(req, n, &dep->request_list, list) {
586		unsigned int last_one = 0;
587		unsigned int cur_slot;
588
589		trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
590		cur_slot = dep->free_slot;
591		dep->free_slot++;
592
593		/* Skip the LINK-TRB on ISOC */
594		if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
595				usb_endpoint_xfer_isoc(dep->desc))
596			continue;
597
598		dwc3_gadget_move_request_queued(req);
599		memset(&trb, 0, sizeof(trb));
600		trbs_left--;
601
602		/* Is our TRB pool empty? */
603		if (!trbs_left)
604			last_one = 1;
605		/* Is this the last request? */
606		if (list_empty(&dep->request_list))
607			last_one = 1;
608
609		/*
610		 * FIXME we shouldn't need to set LST bit always but we are
611		 * facing some weird problem with the Hardware where it doesn't
612		 * complete even though it has been previously started.
613		 *
614		 * While we're debugging the problem, as a workaround to
615		 * multiple TRBs handling, use only one TRB at a time.
616		 */
617		last_one = 1;
618
619		req->trb = trb_hw;
620		if (!ret)
621			ret = req;
622
623		trb.bplh = req->request.dma;
624
625		if (usb_endpoint_xfer_isoc(dep->desc)) {
626			trb.isp_imi = true;
627			trb.csp = true;
628		} else {
629			trb.lst = last_one;
630		}
631
632		switch (usb_endpoint_type(dep->desc)) {
633		case USB_ENDPOINT_XFER_CONTROL:
634			trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
635			break;
636
637		case USB_ENDPOINT_XFER_ISOC:
638			trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
639
640			/* IOC every DWC3_TRB_NUM / 4 so we can refill */
641			if (!(cur_slot % (DWC3_TRB_NUM / 4)))
642				trb.ioc = last_one;
643			break;
644
645		case USB_ENDPOINT_XFER_BULK:
646		case USB_ENDPOINT_XFER_INT:
647			trb.trbctl = DWC3_TRBCTL_NORMAL;
648			break;
649		default:
650			/*
651			 * This is only possible with faulty memory because we
652			 * checked it already :)
653			 */
654			BUG();
655		}
656
657		trb.length	= req->request.length;
658		trb.hwo = true;
659
660		dwc3_trb_to_hw(&trb, trb_hw);
661		req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
662
663		if (last_one)
664			break;
665	}
666
667	return ret;
668}
669
670static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
671		int start_new)
672{
673	struct dwc3_gadget_ep_cmd_params params;
674	struct dwc3_request		*req;
675	struct dwc3			*dwc = dep->dwc;
676	int				ret;
677	u32				cmd;
678
679	if (start_new && (dep->flags & DWC3_EP_BUSY)) {
680		dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
681		return -EBUSY;
682	}
683	dep->flags &= ~DWC3_EP_PENDING_REQUEST;
684
685	/*
686	 * If we are getting here after a short-out-packet we don't enqueue any
687	 * new requests as we try to set the IOC bit only on the last request.
688	 */
689	if (start_new) {
690		if (list_empty(&dep->req_queued))
691			dwc3_prepare_trbs(dep, start_new);
692
693		/* req points to the first request which will be sent */
694		req = next_request(&dep->req_queued);
695	} else {
696		/*
697		 * req points to the first request where HWO changed
698		 * from 0 to 1
699		 */
700		req = dwc3_prepare_trbs(dep, start_new);
701	}
702	if (!req) {
703		dep->flags |= DWC3_EP_PENDING_REQUEST;
704		return 0;
705	}
706
707	memset(&params, 0, sizeof(params));
708	params.param0.depstrtxfer.transfer_desc_addr_high =
709		upper_32_bits(req->trb_dma);
710	params.param1.depstrtxfer.transfer_desc_addr_low =
711		lower_32_bits(req->trb_dma);
712
713	if (start_new)
714		cmd = DWC3_DEPCMD_STARTTRANSFER;
715	else
716		cmd = DWC3_DEPCMD_UPDATETRANSFER;
717
718	cmd |= DWC3_DEPCMD_PARAM(cmd_param);
719	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
720	if (ret < 0) {
721		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
722
723		/*
724		 * FIXME we need to iterate over the list of requests
725		 * here and stop, unmap, free and del each of the linked
726		 * requests instead of we do now.
727		 */
728		dwc3_unmap_buffer_from_dma(req);
729		list_del(&req->list);
730		return ret;
731	}
732
733	dep->flags |= DWC3_EP_BUSY;
734	dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
735			dep->number);
736	if (!dep->res_trans_idx)
737		printk_once(KERN_ERR "%s() res_trans_idx is invalid\n", __func__);
738	return 0;
739}
740
741static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
742{
743	req->request.actual	= 0;
744	req->request.status	= -EINPROGRESS;
745	req->direction		= dep->direction;
746	req->epnum		= dep->number;
747
748	/*
749	 * We only add to our list of requests now and
750	 * start consuming the list once we get XferNotReady
751	 * IRQ.
752	 *
753	 * That way, we avoid doing anything that we don't need
754	 * to do now and defer it until the point we receive a
755	 * particular token from the Host side.
756	 *
757	 * This will also avoid Host cancelling URBs due to too
758	 * many NACKs.
759	 */
760	dwc3_map_buffer_to_dma(req);
761	list_add_tail(&req->list, &dep->request_list);
762
763	/*
764	 * There is one special case: XferNotReady with
765	 * empty list of requests. We need to kick the
766	 * transfer here in that situation, otherwise
767	 * we will be NAKing forever.
768	 *
769	 * If we get XferNotReady before gadget driver
770	 * has a chance to queue a request, we will ACK
771	 * the IRQ but won't be able to receive the data
772	 * until the next request is queued. The following
773	 * code is handling exactly that.
774	 */
775	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
776		int ret;
777		int start_trans;
778
779		start_trans = 1;
780		if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
781				dep->flags & DWC3_EP_BUSY)
782			start_trans = 0;
783
784		ret =  __dwc3_gadget_kick_transfer(dep, 0, start_trans);
785		if (ret && ret != -EBUSY) {
786			struct dwc3	*dwc = dep->dwc;
787
788			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
789					dep->name);
790		}
791	};
792
793	return 0;
794}
795
796static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
797	gfp_t gfp_flags)
798{
799	struct dwc3_request		*req = to_dwc3_request(request);
800	struct dwc3_ep			*dep = to_dwc3_ep(ep);
801	struct dwc3			*dwc = dep->dwc;
802
803	unsigned long			flags;
804
805	int				ret;
806
807	if (!dep->desc) {
808		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
809				request, ep->name);
810		return -ESHUTDOWN;
811	}
812
813	dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
814			request, ep->name, request->length);
815
816	spin_lock_irqsave(&dwc->lock, flags);
817	ret = __dwc3_gadget_ep_queue(dep, req);
818	spin_unlock_irqrestore(&dwc->lock, flags);
819
820	return ret;
821}
822
823static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
824		struct usb_request *request)
825{
826	struct dwc3_request		*req = to_dwc3_request(request);
827	struct dwc3_request		*r = NULL;
828
829	struct dwc3_ep			*dep = to_dwc3_ep(ep);
830	struct dwc3			*dwc = dep->dwc;
831
832	unsigned long			flags;
833	int				ret = 0;
834
835	spin_lock_irqsave(&dwc->lock, flags);
836
837	list_for_each_entry(r, &dep->request_list, list) {
838		if (r == req)
839			break;
840	}
841
842	if (r != req) {
843		list_for_each_entry(r, &dep->req_queued, list) {
844			if (r == req)
845				break;
846		}
847		if (r == req) {
848			/* wait until it is processed */
849			dwc3_stop_active_transfer(dwc, dep->number);
850			goto out0;
851		}
852		dev_err(dwc->dev, "request %p was not queued to %s\n",
853				request, ep->name);
854		ret = -EINVAL;
855		goto out0;
856	}
857
858	/* giveback the request */
859	dwc3_gadget_giveback(dep, req, -ECONNRESET);
860
861out0:
862	spin_unlock_irqrestore(&dwc->lock, flags);
863
864	return ret;
865}
866
867int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
868{
869	struct dwc3_gadget_ep_cmd_params	params;
870	struct dwc3				*dwc = dep->dwc;
871	int					ret;
872
873	memset(&params, 0x00, sizeof(params));
874
875	if (value) {
876		if (dep->number == 0 || dep->number == 1) {
877			/*
878			 * Whenever EP0 is stalled, we will restart
879			 * the state machine, thus moving back to
880			 * Setup Phase
881			 */
882			dwc->ep0state = EP0_SETUP_PHASE;
883		}
884
885		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
886			DWC3_DEPCMD_SETSTALL, &params);
887		if (ret)
888			dev_err(dwc->dev, "failed to %s STALL on %s\n",
889					value ? "set" : "clear",
890					dep->name);
891		else
892			dep->flags |= DWC3_EP_STALL;
893	} else {
894		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
895			DWC3_DEPCMD_CLEARSTALL, &params);
896		if (ret)
897			dev_err(dwc->dev, "failed to %s STALL on %s\n",
898					value ? "set" : "clear",
899					dep->name);
900		else
901			dep->flags &= ~DWC3_EP_STALL;
902	}
903	return ret;
904}
905
906static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
907{
908	struct dwc3_ep			*dep = to_dwc3_ep(ep);
909	struct dwc3			*dwc = dep->dwc;
910
911	unsigned long			flags;
912
913	int				ret;
914
915	spin_lock_irqsave(&dwc->lock, flags);
916
917	if (usb_endpoint_xfer_isoc(dep->desc)) {
918		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
919		ret = -EINVAL;
920		goto out;
921	}
922
923	ret = __dwc3_gadget_ep_set_halt(dep, value);
924out:
925	spin_unlock_irqrestore(&dwc->lock, flags);
926
927	return ret;
928}
929
930static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
931{
932	struct dwc3_ep			*dep = to_dwc3_ep(ep);
933
934	dep->flags |= DWC3_EP_WEDGE;
935
936	return usb_ep_set_halt(ep);
937}
938
939/* -------------------------------------------------------------------------- */
940
941static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
942	.bLength	= USB_DT_ENDPOINT_SIZE,
943	.bDescriptorType = USB_DT_ENDPOINT,
944	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
945};
946
947static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
948	.enable		= dwc3_gadget_ep0_enable,
949	.disable	= dwc3_gadget_ep0_disable,
950	.alloc_request	= dwc3_gadget_ep_alloc_request,
951	.free_request	= dwc3_gadget_ep_free_request,
952	.queue		= dwc3_gadget_ep0_queue,
953	.dequeue	= dwc3_gadget_ep_dequeue,
954	.set_halt	= dwc3_gadget_ep_set_halt,
955	.set_wedge	= dwc3_gadget_ep_set_wedge,
956};
957
958static const struct usb_ep_ops dwc3_gadget_ep_ops = {
959	.enable		= dwc3_gadget_ep_enable,
960	.disable	= dwc3_gadget_ep_disable,
961	.alloc_request	= dwc3_gadget_ep_alloc_request,
962	.free_request	= dwc3_gadget_ep_free_request,
963	.queue		= dwc3_gadget_ep_queue,
964	.dequeue	= dwc3_gadget_ep_dequeue,
965	.set_halt	= dwc3_gadget_ep_set_halt,
966	.set_wedge	= dwc3_gadget_ep_set_wedge,
967};
968
969/* -------------------------------------------------------------------------- */
970
971static int dwc3_gadget_get_frame(struct usb_gadget *g)
972{
973	struct dwc3		*dwc = gadget_to_dwc(g);
974	u32			reg;
975
976	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
977	return DWC3_DSTS_SOFFN(reg);
978}
979
980static int dwc3_gadget_wakeup(struct usb_gadget *g)
981{
982	struct dwc3		*dwc = gadget_to_dwc(g);
983
984	unsigned long		timeout;
985	unsigned long		flags;
986
987	u32			reg;
988
989	int			ret = 0;
990
991	u8			link_state;
992	u8			speed;
993
994	spin_lock_irqsave(&dwc->lock, flags);
995
996	/*
997	 * According to the Databook Remote wakeup request should
998	 * be issued only when the device is in early suspend state.
999	 *
1000	 * We can check that via USB Link State bits in DSTS register.
1001	 */
1002	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1003
1004	speed = reg & DWC3_DSTS_CONNECTSPD;
1005	if (speed == DWC3_DSTS_SUPERSPEED) {
1006		dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1007		ret = -EINVAL;
1008		goto out;
1009	}
1010
1011	link_state = DWC3_DSTS_USBLNKST(reg);
1012
1013	switch (link_state) {
1014	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
1015	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
1016		break;
1017	default:
1018		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1019				link_state);
1020		ret = -EINVAL;
1021		goto out;
1022	}
1023
1024	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1025
1026	/*
1027	 * Switch link state to Recovery. In HS/FS/LS this means
1028	 * RemoteWakeup Request
1029	 */
1030	reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
1031	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1032
1033	/* wait for at least 2000us */
1034	usleep_range(2000, 2500);
1035
1036	/* write zeroes to Link Change Request */
1037	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1038	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1039
1040	/* pool until Link State change to ON */
1041	timeout = jiffies + msecs_to_jiffies(100);
1042
1043	while (!(time_after(jiffies, timeout))) {
1044		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1045
1046		/* in HS, means ON */
1047		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1048			break;
1049	}
1050
1051	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1052		dev_err(dwc->dev, "failed to send remote wakeup\n");
1053		ret = -EINVAL;
1054	}
1055
1056out:
1057	spin_unlock_irqrestore(&dwc->lock, flags);
1058
1059	return ret;
1060}
1061
1062static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1063		int is_selfpowered)
1064{
1065	struct dwc3		*dwc = gadget_to_dwc(g);
1066
1067	dwc->is_selfpowered = !!is_selfpowered;
1068
1069	return 0;
1070}
1071
1072static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1073{
1074	u32			reg;
1075	u32			timeout = 500;
1076
1077	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1078	if (is_on)
1079		reg |= DWC3_DCTL_RUN_STOP;
1080	else
1081		reg &= ~DWC3_DCTL_RUN_STOP;
1082
1083	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1084
1085	do {
1086		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1087		if (is_on) {
1088			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1089				break;
1090		} else {
1091			if (reg & DWC3_DSTS_DEVCTRLHLT)
1092				break;
1093		}
1094		timeout--;
1095		if (!timeout)
1096			break;
1097		udelay(1);
1098	} while (1);
1099
1100	dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1101			dwc->gadget_driver
1102			? dwc->gadget_driver->function : "no-function",
1103			is_on ? "connect" : "disconnect");
1104}
1105
1106static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1107{
1108	struct dwc3		*dwc = gadget_to_dwc(g);
1109	unsigned long		flags;
1110
1111	is_on = !!is_on;
1112
1113	spin_lock_irqsave(&dwc->lock, flags);
1114	dwc3_gadget_run_stop(dwc, is_on);
1115	spin_unlock_irqrestore(&dwc->lock, flags);
1116
1117	return 0;
1118}
1119
1120static int dwc3_gadget_start(struct usb_gadget *g,
1121		struct usb_gadget_driver *driver)
1122{
1123	struct dwc3		*dwc = gadget_to_dwc(g);
1124	struct dwc3_ep		*dep;
1125	unsigned long		flags;
1126	int			ret = 0;
1127	u32			reg;
1128
1129	spin_lock_irqsave(&dwc->lock, flags);
1130
1131	if (dwc->gadget_driver) {
1132		dev_err(dwc->dev, "%s is already bound to %s\n",
1133				dwc->gadget.name,
1134				dwc->gadget_driver->driver.name);
1135		ret = -EBUSY;
1136		goto err0;
1137	}
1138
1139	dwc->gadget_driver	= driver;
1140	dwc->gadget.dev.driver	= &driver->driver;
1141
1142	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1143
1144	reg &= ~DWC3_GCTL_SCALEDOWN(3);
1145	reg &= ~DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG);
1146	reg &= ~DWC3_GCTL_DISSCRAMBLE;
1147	reg |= DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_DEVICE);
1148
1149	/*
1150	 * WORKAROUND: DWC3 revisions <1.90a have a bug
1151	 * when The device fails to connect at SuperSpeed
1152	 * and falls back to high-speed mode which causes
1153	 * the device to enter in a Connect/Disconnect loop
1154	 */
1155	if (dwc->revision < DWC3_REVISION_190A)
1156		reg |= DWC3_GCTL_U2RSTECN;
1157
1158	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1159
1160	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1161	reg &= ~(DWC3_DCFG_SPEED_MASK);
1162	reg |= DWC3_DCFG_SUPERSPEED;
1163	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1164
1165	/* Start with SuperSpeed Default */
1166	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1167
1168	dep = dwc->eps[0];
1169	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1170	if (ret) {
1171		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1172		goto err0;
1173	}
1174
1175	dep = dwc->eps[1];
1176	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1177	if (ret) {
1178		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1179		goto err1;
1180	}
1181
1182	/* begin to receive SETUP packets */
1183	dwc->ep0state = EP0_SETUP_PHASE;
1184	dwc3_ep0_out_start(dwc);
1185
1186	spin_unlock_irqrestore(&dwc->lock, flags);
1187
1188	return 0;
1189
1190err1:
1191	__dwc3_gadget_ep_disable(dwc->eps[0]);
1192
1193err0:
1194	spin_unlock_irqrestore(&dwc->lock, flags);
1195
1196	return ret;
1197}
1198
1199static int dwc3_gadget_stop(struct usb_gadget *g,
1200		struct usb_gadget_driver *driver)
1201{
1202	struct dwc3		*dwc = gadget_to_dwc(g);
1203	unsigned long		flags;
1204
1205	spin_lock_irqsave(&dwc->lock, flags);
1206
1207	__dwc3_gadget_ep_disable(dwc->eps[0]);
1208	__dwc3_gadget_ep_disable(dwc->eps[1]);
1209
1210	dwc->gadget_driver	= NULL;
1211	dwc->gadget.dev.driver	= NULL;
1212
1213	spin_unlock_irqrestore(&dwc->lock, flags);
1214
1215	return 0;
1216}
1217static const struct usb_gadget_ops dwc3_gadget_ops = {
1218	.get_frame		= dwc3_gadget_get_frame,
1219	.wakeup			= dwc3_gadget_wakeup,
1220	.set_selfpowered	= dwc3_gadget_set_selfpowered,
1221	.pullup			= dwc3_gadget_pullup,
1222	.udc_start		= dwc3_gadget_start,
1223	.udc_stop		= dwc3_gadget_stop,
1224};
1225
1226/* -------------------------------------------------------------------------- */
1227
1228static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1229{
1230	struct dwc3_ep			*dep;
1231	u8				epnum;
1232
1233	INIT_LIST_HEAD(&dwc->gadget.ep_list);
1234
1235	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1236		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1237		if (!dep) {
1238			dev_err(dwc->dev, "can't allocate endpoint %d\n",
1239					epnum);
1240			return -ENOMEM;
1241		}
1242
1243		dep->dwc = dwc;
1244		dep->number = epnum;
1245		dwc->eps[epnum] = dep;
1246
1247		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1248				(epnum & 1) ? "in" : "out");
1249		dep->endpoint.name = dep->name;
1250		dep->direction = (epnum & 1);
1251
1252		if (epnum == 0 || epnum == 1) {
1253			dep->endpoint.maxpacket = 512;
1254			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1255			if (!epnum)
1256				dwc->gadget.ep0 = &dep->endpoint;
1257		} else {
1258			int		ret;
1259
1260			dep->endpoint.maxpacket = 1024;
1261			dep->endpoint.ops = &dwc3_gadget_ep_ops;
1262			list_add_tail(&dep->endpoint.ep_list,
1263					&dwc->gadget.ep_list);
1264
1265			ret = dwc3_alloc_trb_pool(dep);
1266			if (ret) {
1267				dev_err(dwc->dev, "%s: failed to allocate TRB pool\n", dep->name);
1268				return ret;
1269			}
1270		}
1271		INIT_LIST_HEAD(&dep->request_list);
1272		INIT_LIST_HEAD(&dep->req_queued);
1273	}
1274
1275	return 0;
1276}
1277
1278static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1279{
1280	struct dwc3_ep			*dep;
1281	u8				epnum;
1282
1283	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1284		dep = dwc->eps[epnum];
1285		dwc3_free_trb_pool(dep);
1286
1287		if (epnum != 0 && epnum != 1)
1288			list_del(&dep->endpoint.ep_list);
1289
1290		kfree(dep);
1291	}
1292}
1293
1294static void dwc3_gadget_release(struct device *dev)
1295{
1296	dev_dbg(dev, "%s\n", __func__);
1297}
1298
1299/* -------------------------------------------------------------------------- */
1300static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1301		const struct dwc3_event_depevt *event, int status)
1302{
1303	struct dwc3_request	*req;
1304	struct dwc3_trb         trb;
1305	unsigned int		count;
1306	unsigned int		s_pkt = 0;
1307
1308	do {
1309		req = next_request(&dep->req_queued);
1310		if (!req)
1311			break;
1312
1313		dwc3_trb_to_nat(req->trb, &trb);
1314
1315		if (trb.hwo && status != -ESHUTDOWN)
1316			/*
1317			 * We continue despite the error. There is not much we
1318			 * can do. If we don't clean in up we loop for ever. If
1319			 * we skip the TRB than it gets overwritten reused after
1320			 * a while since we use them in a ring buffer. a BUG()
1321			 * would help. Lets hope that if this occures, someone
1322			 * fixes the root cause instead of looking away :)
1323			 */
1324			dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1325					dep->name, req->trb);
1326		count = trb.length;
1327
1328		if (dep->direction) {
1329			if (count) {
1330				dev_err(dwc->dev, "incomplete IN transfer %s\n",
1331						dep->name);
1332				status = -ECONNRESET;
1333			}
1334		} else {
1335			if (count && (event->status & DEPEVT_STATUS_SHORT))
1336				s_pkt = 1;
1337		}
1338
1339		/*
1340		 * We assume here we will always receive the entire data block
1341		 * which we should receive. Meaning, if we program RX to
1342		 * receive 4K but we receive only 2K, we assume that's all we
1343		 * should receive and we simply bounce the request back to the
1344		 * gadget driver for further processing.
1345		 */
1346		req->request.actual += req->request.length - count;
1347		dwc3_gadget_giveback(dep, req, status);
1348		if (s_pkt)
1349			break;
1350		if ((event->status & DEPEVT_STATUS_LST) && trb.lst)
1351			break;
1352		if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1353			break;
1354	} while (1);
1355
1356	if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1357		return 0;
1358	return 1;
1359}
1360
1361static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1362		struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1363		int start_new)
1364{
1365	unsigned		status = 0;
1366	int			clean_busy;
1367
1368	if (event->status & DEPEVT_STATUS_BUSERR)
1369		status = -ECONNRESET;
1370
1371	clean_busy =  dwc3_cleanup_done_reqs(dwc, dep, event, status);
1372	if (clean_busy) {
1373		dep->flags &= ~DWC3_EP_BUSY;
1374		dep->res_trans_idx = 0;
1375	}
1376}
1377
1378static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1379		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1380{
1381	u32 uf;
1382
1383	if (list_empty(&dep->request_list)) {
1384		dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1385			dep->name);
1386		return;
1387	}
1388
1389	if (event->parameters) {
1390		u32 mask;
1391
1392		mask = ~(dep->interval - 1);
1393		uf = event->parameters & mask;
1394		/* 4 micro frames in the future */
1395		uf += dep->interval * 4;
1396	} else {
1397		uf = 0;
1398	}
1399
1400	__dwc3_gadget_kick_transfer(dep, uf, 1);
1401}
1402
1403static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1404		const struct dwc3_event_depevt *event)
1405{
1406	struct dwc3 *dwc = dep->dwc;
1407	struct dwc3_event_depevt mod_ev = *event;
1408
1409	/*
1410	 * We were asked to remove one requests. It is possible that this
1411	 * request and a few other were started together and have the same
1412	 * transfer index. Since we stopped the complete endpoint we don't
1413	 * know how many requests were already completed (and not yet)
1414	 * reported and how could be done (later). We purge them all until
1415	 * the end of the list.
1416	 */
1417	mod_ev.status = DEPEVT_STATUS_LST;
1418	dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1419	dep->flags &= ~DWC3_EP_BUSY;
1420	/* pending requets are ignored and are queued on XferNotReady */
1421}
1422
1423static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1424		const struct dwc3_event_depevt *event)
1425{
1426	u32 param = event->parameters;
1427	u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1428
1429	switch (cmd_type) {
1430	case DWC3_DEPCMD_ENDTRANSFER:
1431		dwc3_process_ep_cmd_complete(dep, event);
1432		break;
1433	case DWC3_DEPCMD_STARTTRANSFER:
1434		dep->res_trans_idx = param & 0x7f;
1435		break;
1436	default:
1437		printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1438				__func__, cmd_type);
1439		break;
1440	};
1441}
1442
1443static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1444		const struct dwc3_event_depevt *event)
1445{
1446	struct dwc3_ep		*dep;
1447	u8			epnum = event->endpoint_number;
1448
1449	dep = dwc->eps[epnum];
1450
1451	dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1452			dwc3_ep_event_string(event->endpoint_event));
1453
1454	if (epnum == 0 || epnum == 1) {
1455		dwc3_ep0_interrupt(dwc, event);
1456		return;
1457	}
1458
1459	switch (event->endpoint_event) {
1460	case DWC3_DEPEVT_XFERCOMPLETE:
1461		if (usb_endpoint_xfer_isoc(dep->desc)) {
1462			dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1463					dep->name);
1464			return;
1465		}
1466
1467		dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1468		break;
1469	case DWC3_DEPEVT_XFERINPROGRESS:
1470		if (!usb_endpoint_xfer_isoc(dep->desc)) {
1471			dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1472					dep->name);
1473			return;
1474		}
1475
1476		dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1477		break;
1478	case DWC3_DEPEVT_XFERNOTREADY:
1479		if (usb_endpoint_xfer_isoc(dep->desc)) {
1480			dwc3_gadget_start_isoc(dwc, dep, event);
1481		} else {
1482			int ret;
1483
1484			dev_vdbg(dwc->dev, "%s: reason %s\n",
1485					dep->name, event->status
1486					? "Transfer Active"
1487					: "Transfer Not Active");
1488
1489			ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1490			if (!ret || ret == -EBUSY)
1491				return;
1492
1493			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1494					dep->name);
1495		}
1496
1497		break;
1498	case DWC3_DEPEVT_RXTXFIFOEVT:
1499		dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1500		break;
1501	case DWC3_DEPEVT_STREAMEVT:
1502		dev_dbg(dwc->dev, "%s Stream Event\n", dep->name);
1503		break;
1504	case DWC3_DEPEVT_EPCMDCMPLT:
1505		dwc3_ep_cmd_compl(dep, event);
1506		break;
1507	}
1508}
1509
1510static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1511{
1512	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1513		spin_unlock(&dwc->lock);
1514		dwc->gadget_driver->disconnect(&dwc->gadget);
1515		spin_lock(&dwc->lock);
1516	}
1517}
1518
1519static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1520{
1521	struct dwc3_ep *dep;
1522	struct dwc3_gadget_ep_cmd_params params;
1523	u32 cmd;
1524	int ret;
1525
1526	dep = dwc->eps[epnum];
1527
1528	WARN_ON(!dep->res_trans_idx);
1529	if (dep->res_trans_idx) {
1530		cmd = DWC3_DEPCMD_ENDTRANSFER;
1531		cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1532		cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1533		memset(&params, 0, sizeof(params));
1534		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1535		WARN_ON_ONCE(ret);
1536		dep->res_trans_idx = 0;
1537	}
1538}
1539
1540static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1541{
1542	u32 epnum;
1543
1544	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1545		struct dwc3_ep *dep;
1546
1547		dep = dwc->eps[epnum];
1548		if (!(dep->flags & DWC3_EP_ENABLED))
1549			continue;
1550
1551		dwc3_remove_requests(dwc, dep);
1552	}
1553}
1554
1555static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1556{
1557	u32 epnum;
1558
1559	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1560		struct dwc3_ep *dep;
1561		struct dwc3_gadget_ep_cmd_params params;
1562		int ret;
1563
1564		dep = dwc->eps[epnum];
1565
1566		if (!(dep->flags & DWC3_EP_STALL))
1567			continue;
1568
1569		dep->flags &= ~DWC3_EP_STALL;
1570
1571		memset(&params, 0, sizeof(params));
1572		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1573				DWC3_DEPCMD_CLEARSTALL, &params);
1574		WARN_ON_ONCE(ret);
1575	}
1576}
1577
1578static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1579{
1580	dev_vdbg(dwc->dev, "%s\n", __func__);
1581#if 0
1582	XXX
1583	U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1584	enable it before we can disable it.
1585
1586	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1587	reg &= ~DWC3_DCTL_INITU1ENA;
1588	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1589
1590	reg &= ~DWC3_DCTL_INITU2ENA;
1591	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1592#endif
1593
1594	dwc3_stop_active_transfers(dwc);
1595	dwc3_disconnect_gadget(dwc);
1596
1597	dwc->gadget.speed = USB_SPEED_UNKNOWN;
1598}
1599
1600static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1601{
1602	u32			reg;
1603
1604	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1605
1606	if (on)
1607		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1608	else
1609		reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1610
1611	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1612}
1613
1614static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1615{
1616	u32			reg;
1617
1618	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1619
1620	if (on)
1621		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1622	else
1623		reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1624
1625	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1626}
1627
1628static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1629{
1630	u32			reg;
1631
1632	dev_vdbg(dwc->dev, "%s\n", __func__);
1633
1634	/* Enable PHYs */
1635	dwc3_gadget_usb2_phy_power(dwc, true);
1636	dwc3_gadget_usb3_phy_power(dwc, true);
1637
1638	if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1639		dwc3_disconnect_gadget(dwc);
1640
1641	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1642	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1643	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1644
1645	dwc3_stop_active_transfers(dwc);
1646	dwc3_clear_stall_all_ep(dwc);
1647
1648	/* Reset device address to zero */
1649	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1650	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1651	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1652
1653	/*
1654	 * Wait for RxFifo to drain
1655	 *
1656	 * REVISIT probably shouldn't wait forever.
1657	 * In case Hardware ends up in a screwed up
1658	 * case, we error out, notify the user and,
1659	 * maybe, WARN() or BUG() but leave the rest
1660	 * of the kernel working fine.
1661	 *
1662	 * REVISIT the below is rather CPU intensive,
1663	 * maybe we should read and if it doesn't work
1664	 * sleep (not busy wait) for a few useconds.
1665	 *
1666	 * REVISIT why wait until the RXFIFO is empty anyway?
1667	 */
1668	while (!(dwc3_readl(dwc->regs, DWC3_DSTS)
1669				& DWC3_DSTS_RXFIFOEMPTY))
1670		cpu_relax();
1671}
1672
1673static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1674{
1675	u32 reg;
1676	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1677
1678	/*
1679	 * We change the clock only at SS but I dunno why I would want to do
1680	 * this. Maybe it becomes part of the power saving plan.
1681	 */
1682
1683	if (speed != DWC3_DSTS_SUPERSPEED)
1684		return;
1685
1686	/*
1687	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
1688	 * each time on Connect Done.
1689	 */
1690	if (!usb30_clock)
1691		return;
1692
1693	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1694	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
1695	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1696}
1697
1698static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
1699{
1700	switch (speed) {
1701	case USB_SPEED_SUPER:
1702		dwc3_gadget_usb2_phy_power(dwc, false);
1703		break;
1704	case USB_SPEED_HIGH:
1705	case USB_SPEED_FULL:
1706	case USB_SPEED_LOW:
1707		dwc3_gadget_usb3_phy_power(dwc, false);
1708		break;
1709	}
1710}
1711
1712static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
1713{
1714	struct dwc3_gadget_ep_cmd_params params;
1715	struct dwc3_ep		*dep;
1716	int			ret;
1717	u32			reg;
1718	u8			speed;
1719
1720	dev_vdbg(dwc->dev, "%s\n", __func__);
1721
1722	memset(&params, 0x00, sizeof(params));
1723
1724	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1725	speed = reg & DWC3_DSTS_CONNECTSPD;
1726	dwc->speed = speed;
1727
1728	dwc3_update_ram_clk_sel(dwc, speed);
1729
1730	switch (speed) {
1731	case DWC3_DCFG_SUPERSPEED:
1732		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1733		dwc->gadget.ep0->maxpacket = 512;
1734		dwc->gadget.speed = USB_SPEED_SUPER;
1735		break;
1736	case DWC3_DCFG_HIGHSPEED:
1737		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
1738		dwc->gadget.ep0->maxpacket = 64;
1739		dwc->gadget.speed = USB_SPEED_HIGH;
1740		break;
1741	case DWC3_DCFG_FULLSPEED2:
1742	case DWC3_DCFG_FULLSPEED1:
1743		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
1744		dwc->gadget.ep0->maxpacket = 64;
1745		dwc->gadget.speed = USB_SPEED_FULL;
1746		break;
1747	case DWC3_DCFG_LOWSPEED:
1748		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
1749		dwc->gadget.ep0->maxpacket = 8;
1750		dwc->gadget.speed = USB_SPEED_LOW;
1751		break;
1752	}
1753
1754	/* Disable unneded PHY */
1755	dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
1756
1757	dep = dwc->eps[0];
1758	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1759	if (ret) {
1760		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1761		return;
1762	}
1763
1764	dep = dwc->eps[1];
1765	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1766	if (ret) {
1767		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1768		return;
1769	}
1770
1771	/*
1772	 * Configure PHY via GUSB3PIPECTLn if required.
1773	 *
1774	 * Update GTXFIFOSIZn
1775	 *
1776	 * In both cases reset values should be sufficient.
1777	 */
1778}
1779
1780static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
1781{
1782	dev_vdbg(dwc->dev, "%s\n", __func__);
1783
1784	/*
1785	 * TODO take core out of low power mode when that's
1786	 * implemented.
1787	 */
1788
1789	dwc->gadget_driver->resume(&dwc->gadget);
1790}
1791
1792static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
1793		unsigned int evtinfo)
1794{
1795	/*  The fith bit says SuperSpeed yes or no. */
1796	dwc->link_state = evtinfo & DWC3_LINK_STATE_MASK;
1797
1798	dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
1799}
1800
1801static void dwc3_gadget_interrupt(struct dwc3 *dwc,
1802		const struct dwc3_event_devt *event)
1803{
1804	switch (event->type) {
1805	case DWC3_DEVICE_EVENT_DISCONNECT:
1806		dwc3_gadget_disconnect_interrupt(dwc);
1807		break;
1808	case DWC3_DEVICE_EVENT_RESET:
1809		dwc3_gadget_reset_interrupt(dwc);
1810		break;
1811	case DWC3_DEVICE_EVENT_CONNECT_DONE:
1812		dwc3_gadget_conndone_interrupt(dwc);
1813		break;
1814	case DWC3_DEVICE_EVENT_WAKEUP:
1815		dwc3_gadget_wakeup_interrupt(dwc);
1816		break;
1817	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
1818		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
1819		break;
1820	case DWC3_DEVICE_EVENT_EOPF:
1821		dev_vdbg(dwc->dev, "End of Periodic Frame\n");
1822		break;
1823	case DWC3_DEVICE_EVENT_SOF:
1824		dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
1825		break;
1826	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
1827		dev_vdbg(dwc->dev, "Erratic Error\n");
1828		break;
1829	case DWC3_DEVICE_EVENT_CMD_CMPL:
1830		dev_vdbg(dwc->dev, "Command Complete\n");
1831		break;
1832	case DWC3_DEVICE_EVENT_OVERFLOW:
1833		dev_vdbg(dwc->dev, "Overflow\n");
1834		break;
1835	default:
1836		dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
1837	}
1838}
1839
1840static void dwc3_process_event_entry(struct dwc3 *dwc,
1841		const union dwc3_event *event)
1842{
1843	/* Endpoint IRQ, handle it and return early */
1844	if (event->type.is_devspec == 0) {
1845		/* depevt */
1846		return dwc3_endpoint_interrupt(dwc, &event->depevt);
1847	}
1848
1849	switch (event->type.type) {
1850	case DWC3_EVENT_TYPE_DEV:
1851		dwc3_gadget_interrupt(dwc, &event->devt);
1852		break;
1853	/* REVISIT what to do with Carkit and I2C events ? */
1854	default:
1855		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
1856	}
1857}
1858
1859static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
1860{
1861	struct dwc3_event_buffer *evt;
1862	int left;
1863	u32 count;
1864
1865	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
1866	count &= DWC3_GEVNTCOUNT_MASK;
1867	if (!count)
1868		return IRQ_NONE;
1869
1870	evt = dwc->ev_buffs[buf];
1871	left = count;
1872
1873	while (left > 0) {
1874		union dwc3_event event;
1875
1876		memcpy(&event.raw, (evt->buf + evt->lpos), sizeof(event.raw));
1877		dwc3_process_event_entry(dwc, &event);
1878		/*
1879		 * XXX we wrap around correctly to the next entry as almost all
1880		 * entries are 4 bytes in size. There is one entry which has 12
1881		 * bytes which is a regular entry followed by 8 bytes data. ATM
1882		 * I don't know how things are organized if were get next to the
1883		 * a boundary so I worry about that once we try to handle that.
1884		 */
1885		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
1886		left -= 4;
1887
1888		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
1889	}
1890
1891	return IRQ_HANDLED;
1892}
1893
1894static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
1895{
1896	struct dwc3			*dwc = _dwc;
1897	int				i;
1898	irqreturn_t			ret = IRQ_NONE;
1899
1900	spin_lock(&dwc->lock);
1901
1902	for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
1903		irqreturn_t status;
1904
1905		status = dwc3_process_event_buf(dwc, i);
1906		if (status == IRQ_HANDLED)
1907			ret = status;
1908	}
1909
1910	spin_unlock(&dwc->lock);
1911
1912	return ret;
1913}
1914
1915/**
1916 * dwc3_gadget_init - Initializes gadget related registers
1917 * @dwc: Pointer to out controller context structure
1918 *
1919 * Returns 0 on success otherwise negative errno.
1920 */
1921int __devinit dwc3_gadget_init(struct dwc3 *dwc)
1922{
1923	u32					reg;
1924	int					ret;
1925	int					irq;
1926
1927	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
1928			&dwc->ctrl_req_addr, GFP_KERNEL);
1929	if (!dwc->ctrl_req) {
1930		dev_err(dwc->dev, "failed to allocate ctrl request\n");
1931		ret = -ENOMEM;
1932		goto err0;
1933	}
1934
1935	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
1936			&dwc->ep0_trb_addr, GFP_KERNEL);
1937	if (!dwc->ep0_trb) {
1938		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
1939		ret = -ENOMEM;
1940		goto err1;
1941	}
1942
1943	dwc->setup_buf = dma_alloc_coherent(dwc->dev,
1944			sizeof(*dwc->setup_buf) * 2,
1945			&dwc->setup_buf_addr, GFP_KERNEL);
1946	if (!dwc->setup_buf) {
1947		dev_err(dwc->dev, "failed to allocate setup buffer\n");
1948		ret = -ENOMEM;
1949		goto err2;
1950	}
1951
1952	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
1953			512, &dwc->ep0_bounce_addr, GFP_KERNEL);
1954	if (!dwc->ep0_bounce) {
1955		dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
1956		ret = -ENOMEM;
1957		goto err3;
1958	}
1959
1960	dev_set_name(&dwc->gadget.dev, "gadget");
1961
1962	dwc->gadget.ops			= &dwc3_gadget_ops;
1963	dwc->gadget.is_dualspeed	= true;
1964	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
1965	dwc->gadget.dev.parent		= dwc->dev;
1966
1967	dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
1968
1969	dwc->gadget.dev.dma_parms	= dwc->dev->dma_parms;
1970	dwc->gadget.dev.dma_mask	= dwc->dev->dma_mask;
1971	dwc->gadget.dev.release		= dwc3_gadget_release;
1972	dwc->gadget.name		= "dwc3-gadget";
1973
1974	/*
1975	 * REVISIT: Here we should clear all pending IRQs to be
1976	 * sure we're starting from a well known location.
1977	 */
1978
1979	ret = dwc3_gadget_init_endpoints(dwc);
1980	if (ret)
1981		goto err4;
1982
1983	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1984
1985	ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
1986			"dwc3", dwc);
1987	if (ret) {
1988		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1989				irq, ret);
1990		goto err5;
1991	}
1992
1993	/* Enable all but Start and End of Frame IRQs */
1994	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1995			DWC3_DEVTEN_EVNTOVERFLOWEN |
1996			DWC3_DEVTEN_CMDCMPLTEN |
1997			DWC3_DEVTEN_ERRTICERREN |
1998			DWC3_DEVTEN_WKUPEVTEN |
1999			DWC3_DEVTEN_ULSTCNGEN |
2000			DWC3_DEVTEN_CONNECTDONEEN |
2001			DWC3_DEVTEN_USBRSTEN |
2002			DWC3_DEVTEN_DISCONNEVTEN);
2003	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2004
2005	ret = device_register(&dwc->gadget.dev);
2006	if (ret) {
2007		dev_err(dwc->dev, "failed to register gadget device\n");
2008		put_device(&dwc->gadget.dev);
2009		goto err6;
2010	}
2011
2012	ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2013	if (ret) {
2014		dev_err(dwc->dev, "failed to register udc\n");
2015		goto err7;
2016	}
2017
2018	return 0;
2019
2020err7:
2021	device_unregister(&dwc->gadget.dev);
2022
2023err6:
2024	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2025	free_irq(irq, dwc);
2026
2027err5:
2028	dwc3_gadget_free_endpoints(dwc);
2029
2030err4:
2031	dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2032			dwc->ep0_bounce_addr);
2033
2034err3:
2035	dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2036			dwc->setup_buf, dwc->setup_buf_addr);
2037
2038err2:
2039	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2040			dwc->ep0_trb, dwc->ep0_trb_addr);
2041
2042err1:
2043	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2044			dwc->ctrl_req, dwc->ctrl_req_addr);
2045
2046err0:
2047	return ret;
2048}
2049
2050void dwc3_gadget_exit(struct dwc3 *dwc)
2051{
2052	int			irq;
2053	int			i;
2054
2055	usb_del_gadget_udc(&dwc->gadget);
2056	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2057
2058	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2059	free_irq(irq, dwc);
2060
2061	for (i = 0; i < ARRAY_SIZE(dwc->eps); i++)
2062		__dwc3_gadget_ep_disable(dwc->eps[i]);
2063
2064	dwc3_gadget_free_endpoints(dwc);
2065
2066	dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2067			dwc->ep0_bounce_addr);
2068
2069	dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2070			dwc->setup_buf, dwc->setup_buf_addr);
2071
2072	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2073			dwc->ep0_trb, dwc->ep0_trb_addr);
2074
2075	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2076			dwc->ctrl_req, dwc->ctrl_req_addr);
2077
2078	device_unregister(&dwc->gadget.dev);
2079}
2080