gadget.c revision e5caff6831d00d96b4618de939312570527ad54a
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions, and the following disclaimer,
14 *    without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 *    to endorse or promote products derived from this software without
20 *    specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <linux/kernel.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/interrupt.h>
46#include <linux/io.h>
47#include <linux/list.h>
48#include <linux/dma-mapping.h>
49
50#include <linux/usb/ch9.h>
51#include <linux/usb/gadget.h>
52
53#include "core.h"
54#include "gadget.h"
55#include "io.h"
56
57/**
58 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
59 * @dwc: pointer to our context structure
60 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
61 *
62 * Caller should take care of locking. This function will
63 * return 0 on success or -EINVAL if wrong Test Selector
64 * is passed
65 */
66int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
67{
68	u32		reg;
69
70	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
71	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
72
73	switch (mode) {
74	case TEST_J:
75	case TEST_K:
76	case TEST_SE0_NAK:
77	case TEST_PACKET:
78	case TEST_FORCE_EN:
79		reg |= mode << 1;
80		break;
81	default:
82		return -EINVAL;
83	}
84
85	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
86
87	return 0;
88}
89
90/**
91 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
92 * @dwc: pointer to our context structure
93 * @state: the state to put link into
94 *
95 * Caller should take care of locking. This function will
96 * return 0 on success or -ETIMEDOUT.
97 */
98int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
99{
100	int		retries = 10000;
101	u32		reg;
102
103	/*
104	 * Wait until device controller is ready. Only applies to 1.94a and
105	 * later RTL.
106	 */
107	if (dwc->revision >= DWC3_REVISION_194A) {
108		while (--retries) {
109			reg = dwc3_readl(dwc->regs, DWC3_DSTS);
110			if (reg & DWC3_DSTS_DCNRD)
111				udelay(5);
112			else
113				break;
114		}
115
116		if (retries <= 0)
117			return -ETIMEDOUT;
118	}
119
120	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
121	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
122
123	/* set requested state */
124	reg |= DWC3_DCTL_ULSTCHNGREQ(state);
125	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
126
127	/*
128	 * The following code is racy when called from dwc3_gadget_wakeup,
129	 * and is not needed, at least on newer versions
130	 */
131	if (dwc->revision >= DWC3_REVISION_194A)
132		return 0;
133
134	/* wait for a change in DSTS */
135	retries = 10000;
136	while (--retries) {
137		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
138
139		if (DWC3_DSTS_USBLNKST(reg) == state)
140			return 0;
141
142		udelay(5);
143	}
144
145	dev_vdbg(dwc->dev, "link state change request timed out\n");
146
147	return -ETIMEDOUT;
148}
149
150/**
151 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
152 * @dwc: pointer to our context structure
153 *
154 * This function will a best effort FIFO allocation in order
155 * to improve FIFO usage and throughput, while still allowing
156 * us to enable as many endpoints as possible.
157 *
158 * Keep in mind that this operation will be highly dependent
159 * on the configured size for RAM1 - which contains TxFifo -,
160 * the amount of endpoints enabled on coreConsultant tool, and
161 * the width of the Master Bus.
162 *
163 * In the ideal world, we would always be able to satisfy the
164 * following equation:
165 *
166 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
167 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
168 *
169 * Unfortunately, due to many variables that's not always the case.
170 */
171int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
172{
173	int		last_fifo_depth = 0;
174	int		ram1_depth;
175	int		fifo_size;
176	int		mdwidth;
177	int		num;
178
179	if (!dwc->needs_fifo_resize)
180		return 0;
181
182	ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
183	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
184
185	/* MDWIDTH is represented in bits, we need it in bytes */
186	mdwidth >>= 3;
187
188	/*
189	 * FIXME For now we will only allocate 1 wMaxPacketSize space
190	 * for each enabled endpoint, later patches will come to
191	 * improve this algorithm so that we better use the internal
192	 * FIFO space
193	 */
194	for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
195		struct dwc3_ep	*dep = dwc->eps[num];
196		int		fifo_number = dep->number >> 1;
197		int		mult = 1;
198		int		tmp;
199
200		if (!(dep->number & 1))
201			continue;
202
203		if (!(dep->flags & DWC3_EP_ENABLED))
204			continue;
205
206		if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
207				|| usb_endpoint_xfer_isoc(dep->endpoint.desc))
208			mult = 3;
209
210		/*
211		 * REVISIT: the following assumes we will always have enough
212		 * space available on the FIFO RAM for all possible use cases.
213		 * Make sure that's true somehow and change FIFO allocation
214		 * accordingly.
215		 *
216		 * If we have Bulk or Isochronous endpoints, we want
217		 * them to be able to be very, very fast. So we're giving
218		 * those endpoints a fifo_size which is enough for 3 full
219		 * packets
220		 */
221		tmp = mult * (dep->endpoint.maxpacket + mdwidth);
222		tmp += mdwidth;
223
224		fifo_size = DIV_ROUND_UP(tmp, mdwidth);
225
226		fifo_size |= (last_fifo_depth << 16);
227
228		dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
229				dep->name, last_fifo_depth, fifo_size & 0xffff);
230
231		dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
232				fifo_size);
233
234		last_fifo_depth += (fifo_size & 0xffff);
235	}
236
237	return 0;
238}
239
240void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
241		int status)
242{
243	struct dwc3			*dwc = dep->dwc;
244	int				i;
245
246	if (req->queued) {
247		i = 0;
248		do {
249			dep->busy_slot++;
250			/*
251			 * Skip LINK TRB. We can't use req->trb and check for
252			 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
253			 * just completed (not the LINK TRB).
254			 */
255			if (((dep->busy_slot & DWC3_TRB_MASK) ==
256				DWC3_TRB_NUM- 1) &&
257				usb_endpoint_xfer_isoc(dep->endpoint.desc))
258				dep->busy_slot++;
259		} while(++i < req->request.num_mapped_sgs);
260		req->queued = false;
261	}
262	list_del(&req->list);
263	req->trb = NULL;
264
265	if (req->request.status == -EINPROGRESS)
266		req->request.status = status;
267
268	if (dwc->ep0_bounced && dep->number == 0)
269		dwc->ep0_bounced = false;
270	else
271		usb_gadget_unmap_request(&dwc->gadget, &req->request,
272				req->direction);
273
274	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
275			req, dep->name, req->request.actual,
276			req->request.length, status);
277
278	spin_unlock(&dwc->lock);
279	req->request.complete(&dep->endpoint, &req->request);
280	spin_lock(&dwc->lock);
281}
282
283static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
284{
285	switch (cmd) {
286	case DWC3_DEPCMD_DEPSTARTCFG:
287		return "Start New Configuration";
288	case DWC3_DEPCMD_ENDTRANSFER:
289		return "End Transfer";
290	case DWC3_DEPCMD_UPDATETRANSFER:
291		return "Update Transfer";
292	case DWC3_DEPCMD_STARTTRANSFER:
293		return "Start Transfer";
294	case DWC3_DEPCMD_CLEARSTALL:
295		return "Clear Stall";
296	case DWC3_DEPCMD_SETSTALL:
297		return "Set Stall";
298	case DWC3_DEPCMD_GETEPSTATE:
299		return "Get Endpoint State";
300	case DWC3_DEPCMD_SETTRANSFRESOURCE:
301		return "Set Endpoint Transfer Resource";
302	case DWC3_DEPCMD_SETEPCONFIG:
303		return "Set Endpoint Configuration";
304	default:
305		return "UNKNOWN command";
306	}
307}
308
309int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
310{
311	u32		timeout = 500;
312	u32		reg;
313
314	dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
315	dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
316
317	do {
318		reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
319		if (!(reg & DWC3_DGCMD_CMDACT)) {
320			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
321					DWC3_DGCMD_STATUS(reg));
322			return 0;
323		}
324
325		/*
326		 * We can't sleep here, because it's also called from
327		 * interrupt context.
328		 */
329		timeout--;
330		if (!timeout)
331			return -ETIMEDOUT;
332		udelay(1);
333	} while (1);
334}
335
336int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
337		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
338{
339	struct dwc3_ep		*dep = dwc->eps[ep];
340	u32			timeout = 500;
341	u32			reg;
342
343	dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
344			dep->name,
345			dwc3_gadget_ep_cmd_string(cmd), params->param0,
346			params->param1, params->param2);
347
348	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
349	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
350	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
351
352	dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
353	do {
354		reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
355		if (!(reg & DWC3_DEPCMD_CMDACT)) {
356			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
357					DWC3_DEPCMD_STATUS(reg));
358			return 0;
359		}
360
361		/*
362		 * We can't sleep here, because it is also called from
363		 * interrupt context.
364		 */
365		timeout--;
366		if (!timeout)
367			return -ETIMEDOUT;
368
369		udelay(1);
370	} while (1);
371}
372
373static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
374		struct dwc3_trb *trb)
375{
376	u32		offset = (char *) trb - (char *) dep->trb_pool;
377
378	return dep->trb_pool_dma + offset;
379}
380
381static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
382{
383	struct dwc3		*dwc = dep->dwc;
384
385	if (dep->trb_pool)
386		return 0;
387
388	if (dep->number == 0 || dep->number == 1)
389		return 0;
390
391	dep->trb_pool = dma_alloc_coherent(dwc->dev,
392			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
393			&dep->trb_pool_dma, GFP_KERNEL);
394	if (!dep->trb_pool) {
395		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
396				dep->name);
397		return -ENOMEM;
398	}
399
400	return 0;
401}
402
403static void dwc3_free_trb_pool(struct dwc3_ep *dep)
404{
405	struct dwc3		*dwc = dep->dwc;
406
407	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
408			dep->trb_pool, dep->trb_pool_dma);
409
410	dep->trb_pool = NULL;
411	dep->trb_pool_dma = 0;
412}
413
414static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
415{
416	struct dwc3_gadget_ep_cmd_params params;
417	u32			cmd;
418
419	memset(&params, 0x00, sizeof(params));
420
421	if (dep->number != 1) {
422		cmd = DWC3_DEPCMD_DEPSTARTCFG;
423		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
424		if (dep->number > 1) {
425			if (dwc->start_config_issued)
426				return 0;
427			dwc->start_config_issued = true;
428			cmd |= DWC3_DEPCMD_PARAM(2);
429		}
430
431		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
432	}
433
434	return 0;
435}
436
437static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
438		const struct usb_endpoint_descriptor *desc,
439		const struct usb_ss_ep_comp_descriptor *comp_desc,
440		bool ignore)
441{
442	struct dwc3_gadget_ep_cmd_params params;
443
444	memset(&params, 0x00, sizeof(params));
445
446	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
447		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
448
449	/* Burst size is only needed in SuperSpeed mode */
450	if (dwc->gadget.speed == USB_SPEED_SUPER) {
451		u32 burst = dep->endpoint.maxburst - 1;
452
453		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
454	}
455
456	if (ignore)
457		params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
458
459	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
460		| DWC3_DEPCFG_XFER_NOT_READY_EN;
461
462	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
463		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
464			| DWC3_DEPCFG_STREAM_EVENT_EN;
465		dep->stream_capable = true;
466	}
467
468	if (usb_endpoint_xfer_isoc(desc))
469		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
470
471	/*
472	 * We are doing 1:1 mapping for endpoints, meaning
473	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
474	 * so on. We consider the direction bit as part of the physical
475	 * endpoint number. So USB endpoint 0x81 is 0x03.
476	 */
477	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
478
479	/*
480	 * We must use the lower 16 TX FIFOs even though
481	 * HW might have more
482	 */
483	if (dep->direction)
484		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
485
486	if (desc->bInterval) {
487		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
488		dep->interval = 1 << (desc->bInterval - 1);
489	}
490
491	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
492			DWC3_DEPCMD_SETEPCONFIG, &params);
493}
494
495static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
496{
497	struct dwc3_gadget_ep_cmd_params params;
498
499	memset(&params, 0x00, sizeof(params));
500
501	params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
502
503	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
504			DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
505}
506
507/**
508 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
509 * @dep: endpoint to be initialized
510 * @desc: USB Endpoint Descriptor
511 *
512 * Caller should take care of locking
513 */
514static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
515		const struct usb_endpoint_descriptor *desc,
516		const struct usb_ss_ep_comp_descriptor *comp_desc,
517		bool ignore)
518{
519	struct dwc3		*dwc = dep->dwc;
520	u32			reg;
521	int			ret = -ENOMEM;
522
523	if (!(dep->flags & DWC3_EP_ENABLED)) {
524		ret = dwc3_gadget_start_config(dwc, dep);
525		if (ret)
526			return ret;
527	}
528
529	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
530	if (ret)
531		return ret;
532
533	if (!(dep->flags & DWC3_EP_ENABLED)) {
534		struct dwc3_trb	*trb_st_hw;
535		struct dwc3_trb	*trb_link;
536
537		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
538		if (ret)
539			return ret;
540
541		dep->endpoint.desc = desc;
542		dep->comp_desc = comp_desc;
543		dep->type = usb_endpoint_type(desc);
544		dep->flags |= DWC3_EP_ENABLED;
545
546		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
547		reg |= DWC3_DALEPENA_EP(dep->number);
548		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
549
550		if (!usb_endpoint_xfer_isoc(desc))
551			return 0;
552
553		memset(&trb_link, 0, sizeof(trb_link));
554
555		/* Link TRB for ISOC. The HWO bit is never reset */
556		trb_st_hw = &dep->trb_pool[0];
557
558		trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
559
560		trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
561		trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
562		trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
563		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
564	}
565
566	return 0;
567}
568
569static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
570static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
571{
572	struct dwc3_request		*req;
573
574	if (!list_empty(&dep->req_queued)) {
575		dwc3_stop_active_transfer(dwc, dep->number);
576
577		/* - giveback all requests to gadget driver */
578		while (!list_empty(&dep->req_queued)) {
579			req = next_request(&dep->req_queued);
580
581			dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
582		}
583	}
584
585	while (!list_empty(&dep->request_list)) {
586		req = next_request(&dep->request_list);
587
588		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
589	}
590}
591
592/**
593 * __dwc3_gadget_ep_disable - Disables a HW endpoint
594 * @dep: the endpoint to disable
595 *
596 * This function also removes requests which are currently processed ny the
597 * hardware and those which are not yet scheduled.
598 * Caller should take care of locking.
599 */
600static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
601{
602	struct dwc3		*dwc = dep->dwc;
603	u32			reg;
604
605	dwc3_remove_requests(dwc, dep);
606
607	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
608	reg &= ~DWC3_DALEPENA_EP(dep->number);
609	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
610
611	dep->stream_capable = false;
612	dep->endpoint.desc = NULL;
613	dep->comp_desc = NULL;
614	dep->type = 0;
615	dep->flags = 0;
616
617	return 0;
618}
619
620/* -------------------------------------------------------------------------- */
621
622static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
623		const struct usb_endpoint_descriptor *desc)
624{
625	return -EINVAL;
626}
627
628static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
629{
630	return -EINVAL;
631}
632
633/* -------------------------------------------------------------------------- */
634
635static int dwc3_gadget_ep_enable(struct usb_ep *ep,
636		const struct usb_endpoint_descriptor *desc)
637{
638	struct dwc3_ep			*dep;
639	struct dwc3			*dwc;
640	unsigned long			flags;
641	int				ret;
642
643	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
644		pr_debug("dwc3: invalid parameters\n");
645		return -EINVAL;
646	}
647
648	if (!desc->wMaxPacketSize) {
649		pr_debug("dwc3: missing wMaxPacketSize\n");
650		return -EINVAL;
651	}
652
653	dep = to_dwc3_ep(ep);
654	dwc = dep->dwc;
655
656	if (dep->flags & DWC3_EP_ENABLED) {
657		dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
658				dep->name);
659		return 0;
660	}
661
662	switch (usb_endpoint_type(desc)) {
663	case USB_ENDPOINT_XFER_CONTROL:
664		strlcat(dep->name, "-control", sizeof(dep->name));
665		break;
666	case USB_ENDPOINT_XFER_ISOC:
667		strlcat(dep->name, "-isoc", sizeof(dep->name));
668		break;
669	case USB_ENDPOINT_XFER_BULK:
670		strlcat(dep->name, "-bulk", sizeof(dep->name));
671		break;
672	case USB_ENDPOINT_XFER_INT:
673		strlcat(dep->name, "-int", sizeof(dep->name));
674		break;
675	default:
676		dev_err(dwc->dev, "invalid endpoint transfer type\n");
677	}
678
679	dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
680
681	spin_lock_irqsave(&dwc->lock, flags);
682	ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false);
683	spin_unlock_irqrestore(&dwc->lock, flags);
684
685	return ret;
686}
687
688static int dwc3_gadget_ep_disable(struct usb_ep *ep)
689{
690	struct dwc3_ep			*dep;
691	struct dwc3			*dwc;
692	unsigned long			flags;
693	int				ret;
694
695	if (!ep) {
696		pr_debug("dwc3: invalid parameters\n");
697		return -EINVAL;
698	}
699
700	dep = to_dwc3_ep(ep);
701	dwc = dep->dwc;
702
703	if (!(dep->flags & DWC3_EP_ENABLED)) {
704		dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
705				dep->name);
706		return 0;
707	}
708
709	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
710			dep->number >> 1,
711			(dep->number & 1) ? "in" : "out");
712
713	spin_lock_irqsave(&dwc->lock, flags);
714	ret = __dwc3_gadget_ep_disable(dep);
715	spin_unlock_irqrestore(&dwc->lock, flags);
716
717	return ret;
718}
719
720static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
721	gfp_t gfp_flags)
722{
723	struct dwc3_request		*req;
724	struct dwc3_ep			*dep = to_dwc3_ep(ep);
725	struct dwc3			*dwc = dep->dwc;
726
727	req = kzalloc(sizeof(*req), gfp_flags);
728	if (!req) {
729		dev_err(dwc->dev, "not enough memory\n");
730		return NULL;
731	}
732
733	req->epnum	= dep->number;
734	req->dep	= dep;
735
736	return &req->request;
737}
738
739static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
740		struct usb_request *request)
741{
742	struct dwc3_request		*req = to_dwc3_request(request);
743
744	kfree(req);
745}
746
747/**
748 * dwc3_prepare_one_trb - setup one TRB from one request
749 * @dep: endpoint for which this request is prepared
750 * @req: dwc3_request pointer
751 */
752static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
753		struct dwc3_request *req, dma_addr_t dma,
754		unsigned length, unsigned last, unsigned chain, unsigned node)
755{
756	struct dwc3		*dwc = dep->dwc;
757	struct dwc3_trb		*trb;
758
759	dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
760			dep->name, req, (unsigned long long) dma,
761			length, last ? " last" : "",
762			chain ? " chain" : "");
763
764	/* Skip the LINK-TRB on ISOC */
765	if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
766			usb_endpoint_xfer_isoc(dep->endpoint.desc))
767		dep->free_slot++;
768
769	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
770
771	if (!req->trb) {
772		dwc3_gadget_move_request_queued(req);
773		req->trb = trb;
774		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
775		req->start_slot = dep->free_slot & DWC3_TRB_MASK;
776	}
777
778	dep->free_slot++;
779
780	trb->size = DWC3_TRB_SIZE_LENGTH(length);
781	trb->bpl = lower_32_bits(dma);
782	trb->bph = upper_32_bits(dma);
783
784	switch (usb_endpoint_type(dep->endpoint.desc)) {
785	case USB_ENDPOINT_XFER_CONTROL:
786		trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
787		break;
788
789	case USB_ENDPOINT_XFER_ISOC:
790		if (!node)
791			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
792		else
793			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
794
795		if (!req->request.no_interrupt && !chain)
796			trb->ctrl |= DWC3_TRB_CTRL_IOC;
797		break;
798
799	case USB_ENDPOINT_XFER_BULK:
800	case USB_ENDPOINT_XFER_INT:
801		trb->ctrl = DWC3_TRBCTL_NORMAL;
802		break;
803	default:
804		/*
805		 * This is only possible with faulty memory because we
806		 * checked it already :)
807		 */
808		BUG();
809	}
810
811	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
812		trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
813		trb->ctrl |= DWC3_TRB_CTRL_CSP;
814	} else if (last) {
815		trb->ctrl |= DWC3_TRB_CTRL_LST;
816	}
817
818	if (chain)
819		trb->ctrl |= DWC3_TRB_CTRL_CHN;
820
821	if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
822		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
823
824	trb->ctrl |= DWC3_TRB_CTRL_HWO;
825}
826
827/*
828 * dwc3_prepare_trbs - setup TRBs from requests
829 * @dep: endpoint for which requests are being prepared
830 * @starting: true if the endpoint is idle and no requests are queued.
831 *
832 * The function goes through the requests list and sets up TRBs for the
833 * transfers. The function returns once there are no more TRBs available or
834 * it runs out of requests.
835 */
836static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
837{
838	struct dwc3_request	*req, *n;
839	u32			trbs_left;
840	u32			max;
841	unsigned int		last_one = 0;
842
843	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
844
845	/* the first request must not be queued */
846	trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
847
848	/* Can't wrap around on a non-isoc EP since there's no link TRB */
849	if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
850		max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
851		if (trbs_left > max)
852			trbs_left = max;
853	}
854
855	/*
856	 * If busy & slot are equal than it is either full or empty. If we are
857	 * starting to process requests then we are empty. Otherwise we are
858	 * full and don't do anything
859	 */
860	if (!trbs_left) {
861		if (!starting)
862			return;
863		trbs_left = DWC3_TRB_NUM;
864		/*
865		 * In case we start from scratch, we queue the ISOC requests
866		 * starting from slot 1. This is done because we use ring
867		 * buffer and have no LST bit to stop us. Instead, we place
868		 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
869		 * after the first request so we start at slot 1 and have
870		 * 7 requests proceed before we hit the first IOC.
871		 * Other transfer types don't use the ring buffer and are
872		 * processed from the first TRB until the last one. Since we
873		 * don't wrap around we have to start at the beginning.
874		 */
875		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
876			dep->busy_slot = 1;
877			dep->free_slot = 1;
878		} else {
879			dep->busy_slot = 0;
880			dep->free_slot = 0;
881		}
882	}
883
884	/* The last TRB is a link TRB, not used for xfer */
885	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
886		return;
887
888	list_for_each_entry_safe(req, n, &dep->request_list, list) {
889		unsigned	length;
890		dma_addr_t	dma;
891		last_one = false;
892
893		if (req->request.num_mapped_sgs > 0) {
894			struct usb_request *request = &req->request;
895			struct scatterlist *sg = request->sg;
896			struct scatterlist *s;
897			int		i;
898
899			for_each_sg(sg, s, request->num_mapped_sgs, i) {
900				unsigned chain = true;
901
902				length = sg_dma_len(s);
903				dma = sg_dma_address(s);
904
905				if (i == (request->num_mapped_sgs - 1) ||
906						sg_is_last(s)) {
907					if (list_is_last(&req->list,
908							&dep->request_list))
909						last_one = true;
910					chain = false;
911				}
912
913				trbs_left--;
914				if (!trbs_left)
915					last_one = true;
916
917				if (last_one)
918					chain = false;
919
920				dwc3_prepare_one_trb(dep, req, dma, length,
921						last_one, chain, i);
922
923				if (last_one)
924					break;
925			}
926		} else {
927			dma = req->request.dma;
928			length = req->request.length;
929			trbs_left--;
930
931			if (!trbs_left)
932				last_one = 1;
933
934			/* Is this the last request? */
935			if (list_is_last(&req->list, &dep->request_list))
936				last_one = 1;
937
938			dwc3_prepare_one_trb(dep, req, dma, length,
939					last_one, false, 0);
940
941			if (last_one)
942				break;
943		}
944	}
945}
946
947static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
948		int start_new)
949{
950	struct dwc3_gadget_ep_cmd_params params;
951	struct dwc3_request		*req;
952	struct dwc3			*dwc = dep->dwc;
953	int				ret;
954	u32				cmd;
955
956	if (start_new && (dep->flags & DWC3_EP_BUSY)) {
957		dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
958		return -EBUSY;
959	}
960	dep->flags &= ~DWC3_EP_PENDING_REQUEST;
961
962	/*
963	 * If we are getting here after a short-out-packet we don't enqueue any
964	 * new requests as we try to set the IOC bit only on the last request.
965	 */
966	if (start_new) {
967		if (list_empty(&dep->req_queued))
968			dwc3_prepare_trbs(dep, start_new);
969
970		/* req points to the first request which will be sent */
971		req = next_request(&dep->req_queued);
972	} else {
973		dwc3_prepare_trbs(dep, start_new);
974
975		/*
976		 * req points to the first request where HWO changed from 0 to 1
977		 */
978		req = next_request(&dep->req_queued);
979	}
980	if (!req) {
981		dep->flags |= DWC3_EP_PENDING_REQUEST;
982		return 0;
983	}
984
985	memset(&params, 0, sizeof(params));
986
987	if (start_new) {
988		params.param0 = upper_32_bits(req->trb_dma);
989		params.param1 = lower_32_bits(req->trb_dma);
990		cmd = DWC3_DEPCMD_STARTTRANSFER;
991	} else {
992		cmd = DWC3_DEPCMD_UPDATETRANSFER;
993	}
994
995	cmd |= DWC3_DEPCMD_PARAM(cmd_param);
996	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
997	if (ret < 0) {
998		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
999
1000		/*
1001		 * FIXME we need to iterate over the list of requests
1002		 * here and stop, unmap, free and del each of the linked
1003		 * requests instead of what we do now.
1004		 */
1005		usb_gadget_unmap_request(&dwc->gadget, &req->request,
1006				req->direction);
1007		list_del(&req->list);
1008		return ret;
1009	}
1010
1011	dep->flags |= DWC3_EP_BUSY;
1012
1013	if (start_new) {
1014		dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
1015				dep->number);
1016		WARN_ON_ONCE(!dep->resource_index);
1017	}
1018
1019	return 0;
1020}
1021
1022static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1023		struct dwc3_ep *dep, u32 cur_uf)
1024{
1025	u32 uf;
1026
1027	if (list_empty(&dep->request_list)) {
1028		dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1029			dep->name);
1030		dep->flags |= DWC3_EP_PENDING_REQUEST;
1031		return;
1032	}
1033
1034	/* 4 micro frames in the future */
1035	uf = cur_uf + dep->interval * 4;
1036
1037	__dwc3_gadget_kick_transfer(dep, uf, 1);
1038}
1039
1040static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1041		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1042{
1043	u32 cur_uf, mask;
1044
1045	mask = ~(dep->interval - 1);
1046	cur_uf = event->parameters & mask;
1047
1048	__dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1049}
1050
1051static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1052{
1053	struct dwc3		*dwc = dep->dwc;
1054	int			ret;
1055
1056	req->request.actual	= 0;
1057	req->request.status	= -EINPROGRESS;
1058	req->direction		= dep->direction;
1059	req->epnum		= dep->number;
1060
1061	/*
1062	 * We only add to our list of requests now and
1063	 * start consuming the list once we get XferNotReady
1064	 * IRQ.
1065	 *
1066	 * That way, we avoid doing anything that we don't need
1067	 * to do now and defer it until the point we receive a
1068	 * particular token from the Host side.
1069	 *
1070	 * This will also avoid Host cancelling URBs due to too
1071	 * many NAKs.
1072	 */
1073	ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1074			dep->direction);
1075	if (ret)
1076		return ret;
1077
1078	list_add_tail(&req->list, &dep->request_list);
1079
1080	/*
1081	 * There are a few special cases:
1082	 *
1083	 * 1. XferNotReady with empty list of requests. We need to kick the
1084	 *    transfer here in that situation, otherwise we will be NAKing
1085	 *    forever. If we get XferNotReady before gadget driver has a
1086	 *    chance to queue a request, we will ACK the IRQ but won't be
1087	 *    able to receive the data until the next request is queued.
1088	 *    The following code is handling exactly that.
1089	 *
1090	 */
1091	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1092		/*
1093		 * If xfernotready is already elapsed and it is a case
1094		 * of isoc transfer, then issue END TRANSFER, so that
1095		 * you can receive xfernotready again and can have
1096		 * notion of current microframe.
1097		 */
1098		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1099			if (list_empty(&dep->req_queued)) {
1100				dwc3_stop_active_transfer(dwc, dep->number);
1101				dep->flags = DWC3_EP_ENABLED;
1102			}
1103			return 0;
1104		}
1105
1106		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1107		if (ret && ret != -EBUSY)
1108			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1109					dep->name);
1110		return ret;
1111	}
1112
1113	/*
1114	 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1115	 *    kick the transfer here after queuing a request, otherwise the
1116	 *    core may not see the modified TRB(s).
1117	 */
1118	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1119			(dep->flags & DWC3_EP_BUSY) &&
1120			!(dep->flags & DWC3_EP_MISSED_ISOC)) {
1121		WARN_ON_ONCE(!dep->resource_index);
1122		ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
1123				false);
1124		if (ret && ret != -EBUSY)
1125			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1126					dep->name);
1127		return ret;
1128	}
1129
1130	return 0;
1131}
1132
1133static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1134	gfp_t gfp_flags)
1135{
1136	struct dwc3_request		*req = to_dwc3_request(request);
1137	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1138	struct dwc3			*dwc = dep->dwc;
1139
1140	unsigned long			flags;
1141
1142	int				ret;
1143
1144	if (!dep->endpoint.desc) {
1145		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1146				request, ep->name);
1147		return -ESHUTDOWN;
1148	}
1149
1150	dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1151			request, ep->name, request->length);
1152
1153	spin_lock_irqsave(&dwc->lock, flags);
1154	ret = __dwc3_gadget_ep_queue(dep, req);
1155	spin_unlock_irqrestore(&dwc->lock, flags);
1156
1157	return ret;
1158}
1159
1160static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1161		struct usb_request *request)
1162{
1163	struct dwc3_request		*req = to_dwc3_request(request);
1164	struct dwc3_request		*r = NULL;
1165
1166	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1167	struct dwc3			*dwc = dep->dwc;
1168
1169	unsigned long			flags;
1170	int				ret = 0;
1171
1172	spin_lock_irqsave(&dwc->lock, flags);
1173
1174	list_for_each_entry(r, &dep->request_list, list) {
1175		if (r == req)
1176			break;
1177	}
1178
1179	if (r != req) {
1180		list_for_each_entry(r, &dep->req_queued, list) {
1181			if (r == req)
1182				break;
1183		}
1184		if (r == req) {
1185			/* wait until it is processed */
1186			dwc3_stop_active_transfer(dwc, dep->number);
1187			goto out1;
1188		}
1189		dev_err(dwc->dev, "request %p was not queued to %s\n",
1190				request, ep->name);
1191		ret = -EINVAL;
1192		goto out0;
1193	}
1194
1195out1:
1196	/* giveback the request */
1197	dwc3_gadget_giveback(dep, req, -ECONNRESET);
1198
1199out0:
1200	spin_unlock_irqrestore(&dwc->lock, flags);
1201
1202	return ret;
1203}
1204
1205int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1206{
1207	struct dwc3_gadget_ep_cmd_params	params;
1208	struct dwc3				*dwc = dep->dwc;
1209	int					ret;
1210
1211	memset(&params, 0x00, sizeof(params));
1212
1213	if (value) {
1214		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1215			DWC3_DEPCMD_SETSTALL, &params);
1216		if (ret)
1217			dev_err(dwc->dev, "failed to %s STALL on %s\n",
1218					value ? "set" : "clear",
1219					dep->name);
1220		else
1221			dep->flags |= DWC3_EP_STALL;
1222	} else {
1223		if (dep->flags & DWC3_EP_WEDGE)
1224			return 0;
1225
1226		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1227			DWC3_DEPCMD_CLEARSTALL, &params);
1228		if (ret)
1229			dev_err(dwc->dev, "failed to %s STALL on %s\n",
1230					value ? "set" : "clear",
1231					dep->name);
1232		else
1233			dep->flags &= ~DWC3_EP_STALL;
1234	}
1235
1236	return ret;
1237}
1238
1239static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1240{
1241	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1242	struct dwc3			*dwc = dep->dwc;
1243
1244	unsigned long			flags;
1245
1246	int				ret;
1247
1248	spin_lock_irqsave(&dwc->lock, flags);
1249
1250	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1251		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1252		ret = -EINVAL;
1253		goto out;
1254	}
1255
1256	ret = __dwc3_gadget_ep_set_halt(dep, value);
1257out:
1258	spin_unlock_irqrestore(&dwc->lock, flags);
1259
1260	return ret;
1261}
1262
1263static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1264{
1265	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1266	struct dwc3			*dwc = dep->dwc;
1267	unsigned long			flags;
1268
1269	spin_lock_irqsave(&dwc->lock, flags);
1270	dep->flags |= DWC3_EP_WEDGE;
1271	spin_unlock_irqrestore(&dwc->lock, flags);
1272
1273	if (dep->number == 0 || dep->number == 1)
1274		return dwc3_gadget_ep0_set_halt(ep, 1);
1275	else
1276		return dwc3_gadget_ep_set_halt(ep, 1);
1277}
1278
1279/* -------------------------------------------------------------------------- */
1280
1281static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1282	.bLength	= USB_DT_ENDPOINT_SIZE,
1283	.bDescriptorType = USB_DT_ENDPOINT,
1284	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
1285};
1286
1287static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1288	.enable		= dwc3_gadget_ep0_enable,
1289	.disable	= dwc3_gadget_ep0_disable,
1290	.alloc_request	= dwc3_gadget_ep_alloc_request,
1291	.free_request	= dwc3_gadget_ep_free_request,
1292	.queue		= dwc3_gadget_ep0_queue,
1293	.dequeue	= dwc3_gadget_ep_dequeue,
1294	.set_halt	= dwc3_gadget_ep0_set_halt,
1295	.set_wedge	= dwc3_gadget_ep_set_wedge,
1296};
1297
1298static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1299	.enable		= dwc3_gadget_ep_enable,
1300	.disable	= dwc3_gadget_ep_disable,
1301	.alloc_request	= dwc3_gadget_ep_alloc_request,
1302	.free_request	= dwc3_gadget_ep_free_request,
1303	.queue		= dwc3_gadget_ep_queue,
1304	.dequeue	= dwc3_gadget_ep_dequeue,
1305	.set_halt	= dwc3_gadget_ep_set_halt,
1306	.set_wedge	= dwc3_gadget_ep_set_wedge,
1307};
1308
1309/* -------------------------------------------------------------------------- */
1310
1311static int dwc3_gadget_get_frame(struct usb_gadget *g)
1312{
1313	struct dwc3		*dwc = gadget_to_dwc(g);
1314	u32			reg;
1315
1316	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1317	return DWC3_DSTS_SOFFN(reg);
1318}
1319
1320static int dwc3_gadget_wakeup(struct usb_gadget *g)
1321{
1322	struct dwc3		*dwc = gadget_to_dwc(g);
1323
1324	unsigned long		timeout;
1325	unsigned long		flags;
1326
1327	u32			reg;
1328
1329	int			ret = 0;
1330
1331	u8			link_state;
1332	u8			speed;
1333
1334	spin_lock_irqsave(&dwc->lock, flags);
1335
1336	/*
1337	 * According to the Databook Remote wakeup request should
1338	 * be issued only when the device is in early suspend state.
1339	 *
1340	 * We can check that via USB Link State bits in DSTS register.
1341	 */
1342	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1343
1344	speed = reg & DWC3_DSTS_CONNECTSPD;
1345	if (speed == DWC3_DSTS_SUPERSPEED) {
1346		dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1347		ret = -EINVAL;
1348		goto out;
1349	}
1350
1351	link_state = DWC3_DSTS_USBLNKST(reg);
1352
1353	switch (link_state) {
1354	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
1355	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
1356		break;
1357	default:
1358		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1359				link_state);
1360		ret = -EINVAL;
1361		goto out;
1362	}
1363
1364	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1365	if (ret < 0) {
1366		dev_err(dwc->dev, "failed to put link in Recovery\n");
1367		goto out;
1368	}
1369
1370	/* Recent versions do this automatically */
1371	if (dwc->revision < DWC3_REVISION_194A) {
1372		/* write zeroes to Link Change Request */
1373		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1374		reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1375		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1376	}
1377
1378	/* poll until Link State changes to ON */
1379	timeout = jiffies + msecs_to_jiffies(100);
1380
1381	while (!time_after(jiffies, timeout)) {
1382		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1383
1384		/* in HS, means ON */
1385		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1386			break;
1387	}
1388
1389	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1390		dev_err(dwc->dev, "failed to send remote wakeup\n");
1391		ret = -EINVAL;
1392	}
1393
1394out:
1395	spin_unlock_irqrestore(&dwc->lock, flags);
1396
1397	return ret;
1398}
1399
1400static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1401		int is_selfpowered)
1402{
1403	struct dwc3		*dwc = gadget_to_dwc(g);
1404	unsigned long		flags;
1405
1406	spin_lock_irqsave(&dwc->lock, flags);
1407	dwc->is_selfpowered = !!is_selfpowered;
1408	spin_unlock_irqrestore(&dwc->lock, flags);
1409
1410	return 0;
1411}
1412
1413static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1414{
1415	u32			reg;
1416	u32			timeout = 500;
1417
1418	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1419	if (is_on) {
1420		if (dwc->revision <= DWC3_REVISION_187A) {
1421			reg &= ~DWC3_DCTL_TRGTULST_MASK;
1422			reg |= DWC3_DCTL_TRGTULST_RX_DET;
1423		}
1424
1425		if (dwc->revision >= DWC3_REVISION_194A)
1426			reg &= ~DWC3_DCTL_KEEP_CONNECT;
1427		reg |= DWC3_DCTL_RUN_STOP;
1428		dwc->pullups_connected = true;
1429	} else {
1430		reg &= ~DWC3_DCTL_RUN_STOP;
1431		dwc->pullups_connected = false;
1432	}
1433
1434	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1435
1436	do {
1437		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1438		if (is_on) {
1439			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1440				break;
1441		} else {
1442			if (reg & DWC3_DSTS_DEVCTRLHLT)
1443				break;
1444		}
1445		timeout--;
1446		if (!timeout)
1447			return -ETIMEDOUT;
1448		udelay(1);
1449	} while (1);
1450
1451	dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1452			dwc->gadget_driver
1453			? dwc->gadget_driver->function : "no-function",
1454			is_on ? "connect" : "disconnect");
1455
1456	return 0;
1457}
1458
1459static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1460{
1461	struct dwc3		*dwc = gadget_to_dwc(g);
1462	unsigned long		flags;
1463	int			ret;
1464
1465	is_on = !!is_on;
1466
1467	spin_lock_irqsave(&dwc->lock, flags);
1468	ret = dwc3_gadget_run_stop(dwc, is_on);
1469	spin_unlock_irqrestore(&dwc->lock, flags);
1470
1471	return ret;
1472}
1473
1474static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1475{
1476	u32			reg;
1477
1478	/* Enable all but Start and End of Frame IRQs */
1479	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1480			DWC3_DEVTEN_EVNTOVERFLOWEN |
1481			DWC3_DEVTEN_CMDCMPLTEN |
1482			DWC3_DEVTEN_ERRTICERREN |
1483			DWC3_DEVTEN_WKUPEVTEN |
1484			DWC3_DEVTEN_ULSTCNGEN |
1485			DWC3_DEVTEN_CONNECTDONEEN |
1486			DWC3_DEVTEN_USBRSTEN |
1487			DWC3_DEVTEN_DISCONNEVTEN);
1488
1489	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1490}
1491
1492static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1493{
1494	/* mask all interrupts */
1495	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1496}
1497
1498static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1499static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1500
1501static int dwc3_gadget_start(struct usb_gadget *g,
1502		struct usb_gadget_driver *driver)
1503{
1504	struct dwc3		*dwc = gadget_to_dwc(g);
1505	struct dwc3_ep		*dep;
1506	unsigned long		flags;
1507	int			ret = 0;
1508	int			irq;
1509	u32			reg;
1510
1511	spin_lock_irqsave(&dwc->lock, flags);
1512
1513	if (dwc->gadget_driver) {
1514		dev_err(dwc->dev, "%s is already bound to %s\n",
1515				dwc->gadget.name,
1516				dwc->gadget_driver->driver.name);
1517		ret = -EBUSY;
1518		goto err0;
1519	}
1520
1521	dwc->gadget_driver	= driver;
1522
1523	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1524	reg &= ~(DWC3_DCFG_SPEED_MASK);
1525
1526	/**
1527	 * WORKAROUND: DWC3 revision < 2.20a have an issue
1528	 * which would cause metastability state on Run/Stop
1529	 * bit if we try to force the IP to USB2-only mode.
1530	 *
1531	 * Because of that, we cannot configure the IP to any
1532	 * speed other than the SuperSpeed
1533	 *
1534	 * Refers to:
1535	 *
1536	 * STAR#9000525659: Clock Domain Crossing on DCTL in
1537	 * USB 2.0 Mode
1538	 */
1539	if (dwc->revision < DWC3_REVISION_220A)
1540		reg |= DWC3_DCFG_SUPERSPEED;
1541	else
1542		reg |= dwc->maximum_speed;
1543	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1544
1545	dwc->start_config_issued = false;
1546
1547	/* Start with SuperSpeed Default */
1548	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1549
1550	dep = dwc->eps[0];
1551	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
1552	if (ret) {
1553		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1554		goto err0;
1555	}
1556
1557	dep = dwc->eps[1];
1558	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
1559	if (ret) {
1560		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1561		goto err1;
1562	}
1563
1564	/* begin to receive SETUP packets */
1565	dwc->ep0state = EP0_SETUP_PHASE;
1566	dwc3_ep0_out_start(dwc);
1567
1568	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1569	ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1570			IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
1571	if (ret) {
1572		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1573				irq, ret);
1574		goto err1;
1575	}
1576
1577	dwc3_gadget_enable_irq(dwc);
1578
1579	spin_unlock_irqrestore(&dwc->lock, flags);
1580
1581	return 0;
1582
1583err1:
1584	__dwc3_gadget_ep_disable(dwc->eps[0]);
1585
1586err0:
1587	spin_unlock_irqrestore(&dwc->lock, flags);
1588
1589	return ret;
1590}
1591
1592static int dwc3_gadget_stop(struct usb_gadget *g,
1593		struct usb_gadget_driver *driver)
1594{
1595	struct dwc3		*dwc = gadget_to_dwc(g);
1596	unsigned long		flags;
1597	int			irq;
1598
1599	spin_lock_irqsave(&dwc->lock, flags);
1600
1601	dwc3_gadget_disable_irq(dwc);
1602	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1603	free_irq(irq, dwc);
1604
1605	__dwc3_gadget_ep_disable(dwc->eps[0]);
1606	__dwc3_gadget_ep_disable(dwc->eps[1]);
1607
1608	dwc->gadget_driver	= NULL;
1609
1610	spin_unlock_irqrestore(&dwc->lock, flags);
1611
1612	return 0;
1613}
1614
1615static const struct usb_gadget_ops dwc3_gadget_ops = {
1616	.get_frame		= dwc3_gadget_get_frame,
1617	.wakeup			= dwc3_gadget_wakeup,
1618	.set_selfpowered	= dwc3_gadget_set_selfpowered,
1619	.pullup			= dwc3_gadget_pullup,
1620	.udc_start		= dwc3_gadget_start,
1621	.udc_stop		= dwc3_gadget_stop,
1622};
1623
1624/* -------------------------------------------------------------------------- */
1625
1626static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1627{
1628	struct dwc3_ep			*dep;
1629	u8				epnum;
1630
1631	INIT_LIST_HEAD(&dwc->gadget.ep_list);
1632
1633	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1634		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1635		if (!dep) {
1636			dev_err(dwc->dev, "can't allocate endpoint %d\n",
1637					epnum);
1638			return -ENOMEM;
1639		}
1640
1641		dep->dwc = dwc;
1642		dep->number = epnum;
1643		dwc->eps[epnum] = dep;
1644
1645		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1646				(epnum & 1) ? "in" : "out");
1647		dep->endpoint.name = dep->name;
1648		dep->direction = (epnum & 1);
1649
1650		if (epnum == 0 || epnum == 1) {
1651			dep->endpoint.maxpacket = 512;
1652			dep->endpoint.maxburst = 1;
1653			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1654			if (!epnum)
1655				dwc->gadget.ep0 = &dep->endpoint;
1656		} else {
1657			int		ret;
1658
1659			dep->endpoint.maxpacket = 1024;
1660			dep->endpoint.max_streams = 15;
1661			dep->endpoint.ops = &dwc3_gadget_ep_ops;
1662			list_add_tail(&dep->endpoint.ep_list,
1663					&dwc->gadget.ep_list);
1664
1665			ret = dwc3_alloc_trb_pool(dep);
1666			if (ret)
1667				return ret;
1668		}
1669
1670		INIT_LIST_HEAD(&dep->request_list);
1671		INIT_LIST_HEAD(&dep->req_queued);
1672	}
1673
1674	return 0;
1675}
1676
1677static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1678{
1679	struct dwc3_ep			*dep;
1680	u8				epnum;
1681
1682	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1683		dep = dwc->eps[epnum];
1684		dwc3_free_trb_pool(dep);
1685
1686		if (epnum != 0 && epnum != 1)
1687			list_del(&dep->endpoint.ep_list);
1688
1689		kfree(dep);
1690	}
1691}
1692
1693/* -------------------------------------------------------------------------- */
1694
1695static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1696		struct dwc3_request *req, struct dwc3_trb *trb,
1697		const struct dwc3_event_depevt *event, int status)
1698{
1699	unsigned int		count;
1700	unsigned int		s_pkt = 0;
1701	unsigned int		trb_status;
1702
1703	if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1704		/*
1705		 * We continue despite the error. There is not much we
1706		 * can do. If we don't clean it up we loop forever. If
1707		 * we skip the TRB then it gets overwritten after a
1708		 * while since we use them in a ring buffer. A BUG()
1709		 * would help. Lets hope that if this occurs, someone
1710		 * fixes the root cause instead of looking away :)
1711		 */
1712		dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1713				dep->name, trb);
1714	count = trb->size & DWC3_TRB_SIZE_MASK;
1715
1716	if (dep->direction) {
1717		if (count) {
1718			trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1719			if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1720				dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1721						dep->name);
1722				/*
1723				 * If missed isoc occurred and there is
1724				 * no request queued then issue END
1725				 * TRANSFER, so that core generates
1726				 * next xfernotready and we will issue
1727				 * a fresh START TRANSFER.
1728				 * If there are still queued request
1729				 * then wait, do not issue either END
1730				 * or UPDATE TRANSFER, just attach next
1731				 * request in request_list during
1732				 * giveback.If any future queued request
1733				 * is successfully transferred then we
1734				 * will issue UPDATE TRANSFER for all
1735				 * request in the request_list.
1736				 */
1737				dep->flags |= DWC3_EP_MISSED_ISOC;
1738			} else {
1739				dev_err(dwc->dev, "incomplete IN transfer %s\n",
1740						dep->name);
1741				status = -ECONNRESET;
1742			}
1743		} else {
1744			dep->flags &= ~DWC3_EP_MISSED_ISOC;
1745		}
1746	} else {
1747		if (count && (event->status & DEPEVT_STATUS_SHORT))
1748			s_pkt = 1;
1749	}
1750
1751	/*
1752	 * We assume here we will always receive the entire data block
1753	 * which we should receive. Meaning, if we program RX to
1754	 * receive 4K but we receive only 2K, we assume that's all we
1755	 * should receive and we simply bounce the request back to the
1756	 * gadget driver for further processing.
1757	 */
1758	req->request.actual += req->request.length - count;
1759	if (s_pkt)
1760		return 1;
1761	if ((event->status & DEPEVT_STATUS_LST) &&
1762			(trb->ctrl & (DWC3_TRB_CTRL_LST |
1763				DWC3_TRB_CTRL_HWO)))
1764		return 1;
1765	if ((event->status & DEPEVT_STATUS_IOC) &&
1766			(trb->ctrl & DWC3_TRB_CTRL_IOC))
1767		return 1;
1768	return 0;
1769}
1770
1771static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1772		const struct dwc3_event_depevt *event, int status)
1773{
1774	struct dwc3_request	*req;
1775	struct dwc3_trb		*trb;
1776	unsigned int		slot;
1777	unsigned int		i;
1778	int			ret;
1779
1780	do {
1781		req = next_request(&dep->req_queued);
1782		if (!req) {
1783			WARN_ON_ONCE(1);
1784			return 1;
1785		}
1786		i = 0;
1787		do {
1788			slot = req->start_slot + i;
1789			if ((slot == DWC3_TRB_NUM - 1) &&
1790				usb_endpoint_xfer_isoc(dep->endpoint.desc))
1791				slot++;
1792			slot %= DWC3_TRB_NUM;
1793			trb = &dep->trb_pool[slot];
1794
1795			ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1796					event, status);
1797			if (ret)
1798				break;
1799		}while (++i < req->request.num_mapped_sgs);
1800
1801		dwc3_gadget_giveback(dep, req, status);
1802
1803		if (ret)
1804			break;
1805	} while (1);
1806
1807	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1808			list_empty(&dep->req_queued)) {
1809		if (list_empty(&dep->request_list)) {
1810			/*
1811			 * If there is no entry in request list then do
1812			 * not issue END TRANSFER now. Just set PENDING
1813			 * flag, so that END TRANSFER is issued when an
1814			 * entry is added into request list.
1815			 */
1816			dep->flags = DWC3_EP_PENDING_REQUEST;
1817		} else {
1818			dwc3_stop_active_transfer(dwc, dep->number);
1819			dep->flags = DWC3_EP_ENABLED;
1820		}
1821		return 1;
1822	}
1823
1824	if ((event->status & DEPEVT_STATUS_IOC) &&
1825			(trb->ctrl & DWC3_TRB_CTRL_IOC))
1826		return 0;
1827	return 1;
1828}
1829
1830static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1831		struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1832		int start_new)
1833{
1834	unsigned		status = 0;
1835	int			clean_busy;
1836
1837	if (event->status & DEPEVT_STATUS_BUSERR)
1838		status = -ECONNRESET;
1839
1840	clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1841	if (clean_busy)
1842		dep->flags &= ~DWC3_EP_BUSY;
1843
1844	/*
1845	 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1846	 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1847	 */
1848	if (dwc->revision < DWC3_REVISION_183A) {
1849		u32		reg;
1850		int		i;
1851
1852		for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1853			dep = dwc->eps[i];
1854
1855			if (!(dep->flags & DWC3_EP_ENABLED))
1856				continue;
1857
1858			if (!list_empty(&dep->req_queued))
1859				return;
1860		}
1861
1862		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1863		reg |= dwc->u1u2;
1864		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1865
1866		dwc->u1u2 = 0;
1867	}
1868}
1869
1870static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1871		const struct dwc3_event_depevt *event)
1872{
1873	struct dwc3_ep		*dep;
1874	u8			epnum = event->endpoint_number;
1875
1876	dep = dwc->eps[epnum];
1877
1878	if (!(dep->flags & DWC3_EP_ENABLED))
1879		return;
1880
1881	dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1882			dwc3_ep_event_string(event->endpoint_event));
1883
1884	if (epnum == 0 || epnum == 1) {
1885		dwc3_ep0_interrupt(dwc, event);
1886		return;
1887	}
1888
1889	switch (event->endpoint_event) {
1890	case DWC3_DEPEVT_XFERCOMPLETE:
1891		dep->resource_index = 0;
1892
1893		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1894			dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1895					dep->name);
1896			return;
1897		}
1898
1899		dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1900		break;
1901	case DWC3_DEPEVT_XFERINPROGRESS:
1902		if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1903			dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1904					dep->name);
1905			return;
1906		}
1907
1908		dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1909		break;
1910	case DWC3_DEPEVT_XFERNOTREADY:
1911		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1912			dwc3_gadget_start_isoc(dwc, dep, event);
1913		} else {
1914			int ret;
1915
1916			dev_vdbg(dwc->dev, "%s: reason %s\n",
1917					dep->name, event->status &
1918					DEPEVT_STATUS_TRANSFER_ACTIVE
1919					? "Transfer Active"
1920					: "Transfer Not Active");
1921
1922			ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1923			if (!ret || ret == -EBUSY)
1924				return;
1925
1926			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1927					dep->name);
1928		}
1929
1930		break;
1931	case DWC3_DEPEVT_STREAMEVT:
1932		if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
1933			dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1934					dep->name);
1935			return;
1936		}
1937
1938		switch (event->status) {
1939		case DEPEVT_STREAMEVT_FOUND:
1940			dev_vdbg(dwc->dev, "Stream %d found and started\n",
1941					event->parameters);
1942
1943			break;
1944		case DEPEVT_STREAMEVT_NOTFOUND:
1945			/* FALLTHROUGH */
1946		default:
1947			dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1948		}
1949		break;
1950	case DWC3_DEPEVT_RXTXFIFOEVT:
1951		dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1952		break;
1953	case DWC3_DEPEVT_EPCMDCMPLT:
1954		dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
1955		break;
1956	}
1957}
1958
1959static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1960{
1961	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1962		spin_unlock(&dwc->lock);
1963		dwc->gadget_driver->disconnect(&dwc->gadget);
1964		spin_lock(&dwc->lock);
1965	}
1966}
1967
1968static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1969{
1970	struct dwc3_ep *dep;
1971	struct dwc3_gadget_ep_cmd_params params;
1972	u32 cmd;
1973	int ret;
1974
1975	dep = dwc->eps[epnum];
1976
1977	if (!dep->resource_index)
1978		return;
1979
1980	/*
1981	 * NOTICE: We are violating what the Databook says about the
1982	 * EndTransfer command. Ideally we would _always_ wait for the
1983	 * EndTransfer Command Completion IRQ, but that's causing too
1984	 * much trouble synchronizing between us and gadget driver.
1985	 *
1986	 * We have discussed this with the IP Provider and it was
1987	 * suggested to giveback all requests here, but give HW some
1988	 * extra time to synchronize with the interconnect. We're using
1989	 * an arbitraty 100us delay for that.
1990	 *
1991	 * Note also that a similar handling was tested by Synopsys
1992	 * (thanks a lot Paul) and nothing bad has come out of it.
1993	 * In short, what we're doing is:
1994	 *
1995	 * - Issue EndTransfer WITH CMDIOC bit set
1996	 * - Wait 100us
1997	 */
1998
1999	cmd = DWC3_DEPCMD_ENDTRANSFER;
2000	cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
2001	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2002	memset(&params, 0, sizeof(params));
2003	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2004	WARN_ON_ONCE(ret);
2005	dep->resource_index = 0;
2006	dep->flags &= ~DWC3_EP_BUSY;
2007	udelay(100);
2008}
2009
2010static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2011{
2012	u32 epnum;
2013
2014	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2015		struct dwc3_ep *dep;
2016
2017		dep = dwc->eps[epnum];
2018		if (!(dep->flags & DWC3_EP_ENABLED))
2019			continue;
2020
2021		dwc3_remove_requests(dwc, dep);
2022	}
2023}
2024
2025static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2026{
2027	u32 epnum;
2028
2029	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2030		struct dwc3_ep *dep;
2031		struct dwc3_gadget_ep_cmd_params params;
2032		int ret;
2033
2034		dep = dwc->eps[epnum];
2035
2036		if (!(dep->flags & DWC3_EP_STALL))
2037			continue;
2038
2039		dep->flags &= ~DWC3_EP_STALL;
2040
2041		memset(&params, 0, sizeof(params));
2042		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2043				DWC3_DEPCMD_CLEARSTALL, &params);
2044		WARN_ON_ONCE(ret);
2045	}
2046}
2047
2048static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2049{
2050	int			reg;
2051
2052	dev_vdbg(dwc->dev, "%s\n", __func__);
2053
2054	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2055	reg &= ~DWC3_DCTL_INITU1ENA;
2056	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2057
2058	reg &= ~DWC3_DCTL_INITU2ENA;
2059	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2060
2061	dwc3_disconnect_gadget(dwc);
2062	dwc->start_config_issued = false;
2063
2064	dwc->gadget.speed = USB_SPEED_UNKNOWN;
2065	dwc->setup_packet_pending = false;
2066}
2067
2068static void dwc3_gadget_usb3_phy_suspend(struct dwc3 *dwc, int suspend)
2069{
2070	u32			reg;
2071
2072	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
2073
2074	if (suspend)
2075		reg |= DWC3_GUSB3PIPECTL_SUSPHY;
2076	else
2077		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
2078
2079	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
2080}
2081
2082static void dwc3_gadget_usb2_phy_suspend(struct dwc3 *dwc, int suspend)
2083{
2084	u32			reg;
2085
2086	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
2087
2088	if (suspend)
2089		reg |= DWC3_GUSB2PHYCFG_SUSPHY;
2090	else
2091		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
2092
2093	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
2094}
2095
2096static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2097{
2098	u32			reg;
2099
2100	dev_vdbg(dwc->dev, "%s\n", __func__);
2101
2102	/*
2103	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2104	 * would cause a missing Disconnect Event if there's a
2105	 * pending Setup Packet in the FIFO.
2106	 *
2107	 * There's no suggested workaround on the official Bug
2108	 * report, which states that "unless the driver/application
2109	 * is doing any special handling of a disconnect event,
2110	 * there is no functional issue".
2111	 *
2112	 * Unfortunately, it turns out that we _do_ some special
2113	 * handling of a disconnect event, namely complete all
2114	 * pending transfers, notify gadget driver of the
2115	 * disconnection, and so on.
2116	 *
2117	 * Our suggested workaround is to follow the Disconnect
2118	 * Event steps here, instead, based on a setup_packet_pending
2119	 * flag. Such flag gets set whenever we have a XferNotReady
2120	 * event on EP0 and gets cleared on XferComplete for the
2121	 * same endpoint.
2122	 *
2123	 * Refers to:
2124	 *
2125	 * STAR#9000466709: RTL: Device : Disconnect event not
2126	 * generated if setup packet pending in FIFO
2127	 */
2128	if (dwc->revision < DWC3_REVISION_188A) {
2129		if (dwc->setup_packet_pending)
2130			dwc3_gadget_disconnect_interrupt(dwc);
2131	}
2132
2133	/* after reset -> Default State */
2134	usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
2135
2136	/* Recent versions support automatic phy suspend and don't need this */
2137	if (dwc->revision < DWC3_REVISION_194A) {
2138		/* Resume PHYs */
2139		dwc3_gadget_usb2_phy_suspend(dwc, false);
2140		dwc3_gadget_usb3_phy_suspend(dwc, false);
2141	}
2142
2143	if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
2144		dwc3_disconnect_gadget(dwc);
2145
2146	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2147	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2148	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2149	dwc->test_mode = false;
2150
2151	dwc3_stop_active_transfers(dwc);
2152	dwc3_clear_stall_all_ep(dwc);
2153	dwc->start_config_issued = false;
2154
2155	/* Reset device address to zero */
2156	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2157	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2158	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2159}
2160
2161static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2162{
2163	u32 reg;
2164	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2165
2166	/*
2167	 * We change the clock only at SS but I dunno why I would want to do
2168	 * this. Maybe it becomes part of the power saving plan.
2169	 */
2170
2171	if (speed != DWC3_DSTS_SUPERSPEED)
2172		return;
2173
2174	/*
2175	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2176	 * each time on Connect Done.
2177	 */
2178	if (!usb30_clock)
2179		return;
2180
2181	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2182	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2183	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2184}
2185
2186static void dwc3_gadget_phy_suspend(struct dwc3 *dwc, u8 speed)
2187{
2188	switch (speed) {
2189	case USB_SPEED_SUPER:
2190		dwc3_gadget_usb2_phy_suspend(dwc, true);
2191		break;
2192	case USB_SPEED_HIGH:
2193	case USB_SPEED_FULL:
2194	case USB_SPEED_LOW:
2195		dwc3_gadget_usb3_phy_suspend(dwc, true);
2196		break;
2197	}
2198}
2199
2200static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2201{
2202	struct dwc3_ep		*dep;
2203	int			ret;
2204	u32			reg;
2205	u8			speed;
2206
2207	dev_vdbg(dwc->dev, "%s\n", __func__);
2208
2209	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2210	speed = reg & DWC3_DSTS_CONNECTSPD;
2211	dwc->speed = speed;
2212
2213	dwc3_update_ram_clk_sel(dwc, speed);
2214
2215	switch (speed) {
2216	case DWC3_DCFG_SUPERSPEED:
2217		/*
2218		 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2219		 * would cause a missing USB3 Reset event.
2220		 *
2221		 * In such situations, we should force a USB3 Reset
2222		 * event by calling our dwc3_gadget_reset_interrupt()
2223		 * routine.
2224		 *
2225		 * Refers to:
2226		 *
2227		 * STAR#9000483510: RTL: SS : USB3 reset event may
2228		 * not be generated always when the link enters poll
2229		 */
2230		if (dwc->revision < DWC3_REVISION_190A)
2231			dwc3_gadget_reset_interrupt(dwc);
2232
2233		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2234		dwc->gadget.ep0->maxpacket = 512;
2235		dwc->gadget.speed = USB_SPEED_SUPER;
2236		break;
2237	case DWC3_DCFG_HIGHSPEED:
2238		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2239		dwc->gadget.ep0->maxpacket = 64;
2240		dwc->gadget.speed = USB_SPEED_HIGH;
2241		break;
2242	case DWC3_DCFG_FULLSPEED2:
2243	case DWC3_DCFG_FULLSPEED1:
2244		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2245		dwc->gadget.ep0->maxpacket = 64;
2246		dwc->gadget.speed = USB_SPEED_FULL;
2247		break;
2248	case DWC3_DCFG_LOWSPEED:
2249		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2250		dwc->gadget.ep0->maxpacket = 8;
2251		dwc->gadget.speed = USB_SPEED_LOW;
2252		break;
2253	}
2254
2255	/* Enable USB2 LPM Capability */
2256
2257	if ((dwc->revision > DWC3_REVISION_194A)
2258			&& (speed != DWC3_DCFG_SUPERSPEED)) {
2259		reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2260		reg |= DWC3_DCFG_LPM_CAP;
2261		dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2262
2263		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2264		reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2265
2266		/*
2267		 * TODO: This should be configurable. For now using
2268		 * maximum allowed HIRD threshold value of 0b1100
2269		 */
2270		reg |= DWC3_DCTL_HIRD_THRES(12);
2271
2272		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2273	}
2274
2275	/* Recent versions support automatic phy suspend and don't need this */
2276	if (dwc->revision < DWC3_REVISION_194A) {
2277		/* Suspend unneeded PHY */
2278		dwc3_gadget_phy_suspend(dwc, dwc->gadget.speed);
2279	}
2280
2281	dep = dwc->eps[0];
2282	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
2283	if (ret) {
2284		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2285		return;
2286	}
2287
2288	dep = dwc->eps[1];
2289	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
2290	if (ret) {
2291		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2292		return;
2293	}
2294
2295	/*
2296	 * Configure PHY via GUSB3PIPECTLn if required.
2297	 *
2298	 * Update GTXFIFOSIZn
2299	 *
2300	 * In both cases reset values should be sufficient.
2301	 */
2302}
2303
2304static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2305{
2306	dev_vdbg(dwc->dev, "%s\n", __func__);
2307
2308	/*
2309	 * TODO take core out of low power mode when that's
2310	 * implemented.
2311	 */
2312
2313	dwc->gadget_driver->resume(&dwc->gadget);
2314}
2315
2316static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2317		unsigned int evtinfo)
2318{
2319	enum dwc3_link_state	next = evtinfo & DWC3_LINK_STATE_MASK;
2320
2321	/*
2322	 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2323	 * on the link partner, the USB session might do multiple entry/exit
2324	 * of low power states before a transfer takes place.
2325	 *
2326	 * Due to this problem, we might experience lower throughput. The
2327	 * suggested workaround is to disable DCTL[12:9] bits if we're
2328	 * transitioning from U1/U2 to U0 and enable those bits again
2329	 * after a transfer completes and there are no pending transfers
2330	 * on any of the enabled endpoints.
2331	 *
2332	 * This is the first half of that workaround.
2333	 *
2334	 * Refers to:
2335	 *
2336	 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2337	 * core send LGO_Ux entering U0
2338	 */
2339	if (dwc->revision < DWC3_REVISION_183A) {
2340		if (next == DWC3_LINK_STATE_U0) {
2341			u32	u1u2;
2342			u32	reg;
2343
2344			switch (dwc->link_state) {
2345			case DWC3_LINK_STATE_U1:
2346			case DWC3_LINK_STATE_U2:
2347				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2348				u1u2 = reg & (DWC3_DCTL_INITU2ENA
2349						| DWC3_DCTL_ACCEPTU2ENA
2350						| DWC3_DCTL_INITU1ENA
2351						| DWC3_DCTL_ACCEPTU1ENA);
2352
2353				if (!dwc->u1u2)
2354					dwc->u1u2 = reg & u1u2;
2355
2356				reg &= ~u1u2;
2357
2358				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2359				break;
2360			default:
2361				/* do nothing */
2362				break;
2363			}
2364		}
2365	}
2366
2367	dwc->link_state = next;
2368
2369	dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
2370}
2371
2372static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2373		const struct dwc3_event_devt *event)
2374{
2375	switch (event->type) {
2376	case DWC3_DEVICE_EVENT_DISCONNECT:
2377		dwc3_gadget_disconnect_interrupt(dwc);
2378		break;
2379	case DWC3_DEVICE_EVENT_RESET:
2380		dwc3_gadget_reset_interrupt(dwc);
2381		break;
2382	case DWC3_DEVICE_EVENT_CONNECT_DONE:
2383		dwc3_gadget_conndone_interrupt(dwc);
2384		break;
2385	case DWC3_DEVICE_EVENT_WAKEUP:
2386		dwc3_gadget_wakeup_interrupt(dwc);
2387		break;
2388	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2389		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2390		break;
2391	case DWC3_DEVICE_EVENT_EOPF:
2392		dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2393		break;
2394	case DWC3_DEVICE_EVENT_SOF:
2395		dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2396		break;
2397	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2398		dev_vdbg(dwc->dev, "Erratic Error\n");
2399		break;
2400	case DWC3_DEVICE_EVENT_CMD_CMPL:
2401		dev_vdbg(dwc->dev, "Command Complete\n");
2402		break;
2403	case DWC3_DEVICE_EVENT_OVERFLOW:
2404		dev_vdbg(dwc->dev, "Overflow\n");
2405		break;
2406	default:
2407		dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2408	}
2409}
2410
2411static void dwc3_process_event_entry(struct dwc3 *dwc,
2412		const union dwc3_event *event)
2413{
2414	/* Endpoint IRQ, handle it and return early */
2415	if (event->type.is_devspec == 0) {
2416		/* depevt */
2417		return dwc3_endpoint_interrupt(dwc, &event->depevt);
2418	}
2419
2420	switch (event->type.type) {
2421	case DWC3_EVENT_TYPE_DEV:
2422		dwc3_gadget_interrupt(dwc, &event->devt);
2423		break;
2424	/* REVISIT what to do with Carkit and I2C events ? */
2425	default:
2426		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2427	}
2428}
2429
2430static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2431{
2432	struct dwc3 *dwc = _dwc;
2433	unsigned long flags;
2434	irqreturn_t ret = IRQ_NONE;
2435	int i;
2436
2437	spin_lock_irqsave(&dwc->lock, flags);
2438
2439	for (i = 0; i < dwc->num_event_buffers; i++) {
2440		struct dwc3_event_buffer *evt;
2441		int			left;
2442
2443		evt = dwc->ev_buffs[i];
2444		left = evt->count;
2445
2446		if (!(evt->flags & DWC3_EVENT_PENDING))
2447			continue;
2448
2449		while (left > 0) {
2450			union dwc3_event event;
2451
2452			event.raw = *(u32 *) (evt->buf + evt->lpos);
2453
2454			dwc3_process_event_entry(dwc, &event);
2455
2456			/*
2457			 * FIXME we wrap around correctly to the next entry as
2458			 * almost all entries are 4 bytes in size. There is one
2459			 * entry which has 12 bytes which is a regular entry
2460			 * followed by 8 bytes data. ATM I don't know how
2461			 * things are organized if we get next to the a
2462			 * boundary so I worry about that once we try to handle
2463			 * that.
2464			 */
2465			evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2466			left -= 4;
2467
2468			dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(i), 4);
2469		}
2470
2471		evt->count = 0;
2472		evt->flags &= ~DWC3_EVENT_PENDING;
2473		ret = IRQ_HANDLED;
2474	}
2475
2476	spin_unlock_irqrestore(&dwc->lock, flags);
2477
2478	return ret;
2479}
2480
2481static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2482{
2483	struct dwc3_event_buffer *evt;
2484	u32 count;
2485
2486	evt = dwc->ev_buffs[buf];
2487
2488	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2489	count &= DWC3_GEVNTCOUNT_MASK;
2490	if (!count)
2491		return IRQ_NONE;
2492
2493	evt->count = count;
2494	evt->flags |= DWC3_EVENT_PENDING;
2495
2496	return IRQ_WAKE_THREAD;
2497}
2498
2499static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2500{
2501	struct dwc3			*dwc = _dwc;
2502	int				i;
2503	irqreturn_t			ret = IRQ_NONE;
2504
2505	spin_lock(&dwc->lock);
2506
2507	for (i = 0; i < dwc->num_event_buffers; i++) {
2508		irqreturn_t status;
2509
2510		status = dwc3_process_event_buf(dwc, i);
2511		if (status == IRQ_WAKE_THREAD)
2512			ret = status;
2513	}
2514
2515	spin_unlock(&dwc->lock);
2516
2517	return ret;
2518}
2519
2520/**
2521 * dwc3_gadget_init - Initializes gadget related registers
2522 * @dwc: pointer to our controller context structure
2523 *
2524 * Returns 0 on success otherwise negative errno.
2525 */
2526int dwc3_gadget_init(struct dwc3 *dwc)
2527{
2528	u32					reg;
2529	int					ret;
2530
2531	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2532			&dwc->ctrl_req_addr, GFP_KERNEL);
2533	if (!dwc->ctrl_req) {
2534		dev_err(dwc->dev, "failed to allocate ctrl request\n");
2535		ret = -ENOMEM;
2536		goto err0;
2537	}
2538
2539	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2540			&dwc->ep0_trb_addr, GFP_KERNEL);
2541	if (!dwc->ep0_trb) {
2542		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2543		ret = -ENOMEM;
2544		goto err1;
2545	}
2546
2547	dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2548	if (!dwc->setup_buf) {
2549		dev_err(dwc->dev, "failed to allocate setup buffer\n");
2550		ret = -ENOMEM;
2551		goto err2;
2552	}
2553
2554	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2555			DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2556			GFP_KERNEL);
2557	if (!dwc->ep0_bounce) {
2558		dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2559		ret = -ENOMEM;
2560		goto err3;
2561	}
2562
2563	dwc->gadget.ops			= &dwc3_gadget_ops;
2564	dwc->gadget.max_speed		= USB_SPEED_SUPER;
2565	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
2566	dwc->gadget.sg_supported	= true;
2567	dwc->gadget.name		= "dwc3-gadget";
2568
2569	/*
2570	 * REVISIT: Here we should clear all pending IRQs to be
2571	 * sure we're starting from a well known location.
2572	 */
2573
2574	ret = dwc3_gadget_init_endpoints(dwc);
2575	if (ret)
2576		goto err4;
2577
2578	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2579	reg |= DWC3_DCFG_LPM_CAP;
2580	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2581
2582	/* Enable USB2 LPM and automatic phy suspend only on recent versions */
2583	if (dwc->revision >= DWC3_REVISION_194A) {
2584		dwc3_gadget_usb2_phy_suspend(dwc, false);
2585		dwc3_gadget_usb3_phy_suspend(dwc, false);
2586	}
2587
2588	ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2589	if (ret) {
2590		dev_err(dwc->dev, "failed to register udc\n");
2591		goto err5;
2592	}
2593
2594	return 0;
2595
2596err5:
2597	dwc3_gadget_free_endpoints(dwc);
2598
2599err4:
2600	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2601			dwc->ep0_bounce, dwc->ep0_bounce_addr);
2602
2603err3:
2604	kfree(dwc->setup_buf);
2605
2606err2:
2607	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2608			dwc->ep0_trb, dwc->ep0_trb_addr);
2609
2610err1:
2611	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2612			dwc->ctrl_req, dwc->ctrl_req_addr);
2613
2614err0:
2615	return ret;
2616}
2617
2618/* -------------------------------------------------------------------------- */
2619
2620void dwc3_gadget_exit(struct dwc3 *dwc)
2621{
2622	usb_del_gadget_udc(&dwc->gadget);
2623
2624	dwc3_gadget_free_endpoints(dwc);
2625
2626	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2627			dwc->ep0_bounce, dwc->ep0_bounce_addr);
2628
2629	kfree(dwc->setup_buf);
2630
2631	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2632			dwc->ep0_trb, dwc->ep0_trb_addr);
2633
2634	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2635			dwc->ctrl_req, dwc->ctrl_req_addr);
2636}
2637
2638int dwc3_gadget_prepare(struct dwc3 *dwc)
2639{
2640	if (dwc->pullups_connected)
2641		dwc3_gadget_disable_irq(dwc);
2642
2643	return 0;
2644}
2645
2646void dwc3_gadget_complete(struct dwc3 *dwc)
2647{
2648	if (dwc->pullups_connected) {
2649		dwc3_gadget_enable_irq(dwc);
2650		dwc3_gadget_run_stop(dwc, true);
2651	}
2652}
2653
2654int dwc3_gadget_suspend(struct dwc3 *dwc)
2655{
2656	__dwc3_gadget_ep_disable(dwc->eps[0]);
2657	__dwc3_gadget_ep_disable(dwc->eps[1]);
2658
2659	dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2660
2661	return 0;
2662}
2663
2664int dwc3_gadget_resume(struct dwc3 *dwc)
2665{
2666	struct dwc3_ep		*dep;
2667	int			ret;
2668
2669	/* Start with SuperSpeed Default */
2670	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2671
2672	dep = dwc->eps[0];
2673	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2674	if (ret)
2675		goto err0;
2676
2677	dep = dwc->eps[1];
2678	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2679	if (ret)
2680		goto err1;
2681
2682	/* begin to receive SETUP packets */
2683	dwc->ep0state = EP0_SETUP_PHASE;
2684	dwc3_ep0_out_start(dwc);
2685
2686	dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2687
2688	return 0;
2689
2690err1:
2691	__dwc3_gadget_ep_disable(dwc->eps[0]);
2692
2693err0:
2694	return ret;
2695}
2696