gadget.c revision b09bb64239c83113b8b35fa6a1ecae43d8297eaa
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions, and the following disclaimer,
14 *    without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 *    to endorse or promote products derived from this software without
20 *    specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <linux/kernel.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/interrupt.h>
46#include <linux/io.h>
47#include <linux/list.h>
48#include <linux/dma-mapping.h>
49
50#include <linux/usb/ch9.h>
51#include <linux/usb/gadget.h>
52
53#include "core.h"
54#include "gadget.h"
55#include "io.h"
56
57/**
58 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
59 * @dwc: pointer to our context structure
60 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
61 *
62 * Caller should take care of locking. This function will
63 * return 0 on success or -EINVAL if wrong Test Selector
64 * is passed
65 */
66int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
67{
68	u32		reg;
69
70	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
71	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
72
73	switch (mode) {
74	case TEST_J:
75	case TEST_K:
76	case TEST_SE0_NAK:
77	case TEST_PACKET:
78	case TEST_FORCE_EN:
79		reg |= mode << 1;
80		break;
81	default:
82		return -EINVAL;
83	}
84
85	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
86
87	return 0;
88}
89
90/**
91 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
92 * @dwc: pointer to our context structure
93 * @state: the state to put link into
94 *
95 * Caller should take care of locking. This function will
96 * return 0 on success or -ETIMEDOUT.
97 */
98int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
99{
100	int		retries = 10000;
101	u32		reg;
102
103	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
104	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
105
106	/* set requested state */
107	reg |= DWC3_DCTL_ULSTCHNGREQ(state);
108	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
109
110	/* wait for a change in DSTS */
111	while (--retries) {
112		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
113
114		if (DWC3_DSTS_USBLNKST(reg) == state)
115			return 0;
116
117		udelay(5);
118	}
119
120	dev_vdbg(dwc->dev, "link state change request timed out\n");
121
122	return -ETIMEDOUT;
123}
124
125/**
126 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
127 * @dwc: pointer to our context structure
128 *
129 * This function will a best effort FIFO allocation in order
130 * to improve FIFO usage and throughput, while still allowing
131 * us to enable as many endpoints as possible.
132 *
133 * Keep in mind that this operation will be highly dependent
134 * on the configured size for RAM1 - which contains TxFifo -,
135 * the amount of endpoints enabled on coreConsultant tool, and
136 * the width of the Master Bus.
137 *
138 * In the ideal world, we would always be able to satisfy the
139 * following equation:
140 *
141 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
142 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
143 *
144 * Unfortunately, due to many variables that's not always the case.
145 */
146int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
147{
148	int		last_fifo_depth = 0;
149	int		ram1_depth;
150	int		fifo_size;
151	int		mdwidth;
152	int		num;
153
154	if (!dwc->needs_fifo_resize)
155		return 0;
156
157	ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
158	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
159
160	/* MDWIDTH is represented in bits, we need it in bytes */
161	mdwidth >>= 3;
162
163	/*
164	 * FIXME For now we will only allocate 1 wMaxPacketSize space
165	 * for each enabled endpoint, later patches will come to
166	 * improve this algorithm so that we better use the internal
167	 * FIFO space
168	 */
169	for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
170		struct dwc3_ep	*dep = dwc->eps[num];
171		int		fifo_number = dep->number >> 1;
172		int		mult = 1;
173		int		tmp;
174
175		if (!(dep->number & 1))
176			continue;
177
178		if (!(dep->flags & DWC3_EP_ENABLED))
179			continue;
180
181		if (usb_endpoint_xfer_bulk(dep->desc)
182				|| usb_endpoint_xfer_isoc(dep->desc))
183			mult = 3;
184
185		/*
186		 * REVISIT: the following assumes we will always have enough
187		 * space available on the FIFO RAM for all possible use cases.
188		 * Make sure that's true somehow and change FIFO allocation
189		 * accordingly.
190		 *
191		 * If we have Bulk or Isochronous endpoints, we want
192		 * them to be able to be very, very fast. So we're giving
193		 * those endpoints a fifo_size which is enough for 3 full
194		 * packets
195		 */
196		tmp = mult * (dep->endpoint.maxpacket + mdwidth);
197		tmp += mdwidth;
198
199		fifo_size = DIV_ROUND_UP(tmp, mdwidth);
200
201		fifo_size |= (last_fifo_depth << 16);
202
203		dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
204				dep->name, last_fifo_depth, fifo_size & 0xffff);
205
206		dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
207				fifo_size);
208
209		last_fifo_depth += (fifo_size & 0xffff);
210	}
211
212	return 0;
213}
214
215void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
216		int status)
217{
218	struct dwc3			*dwc = dep->dwc;
219
220	if (req->queued) {
221		if (req->request.num_mapped_sgs)
222			dep->busy_slot += req->request.num_mapped_sgs;
223		else
224			dep->busy_slot++;
225
226		/*
227		 * Skip LINK TRB. We can't use req->trb and check for
228		 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
229		 * completed (not the LINK TRB).
230		 */
231		if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
232				usb_endpoint_xfer_isoc(dep->desc))
233			dep->busy_slot++;
234	}
235	list_del(&req->list);
236	req->trb = NULL;
237
238	if (req->request.status == -EINPROGRESS)
239		req->request.status = status;
240
241	usb_gadget_unmap_request(&dwc->gadget, &req->request,
242			req->direction);
243
244	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
245			req, dep->name, req->request.actual,
246			req->request.length, status);
247
248	spin_unlock(&dwc->lock);
249	req->request.complete(&dep->endpoint, &req->request);
250	spin_lock(&dwc->lock);
251}
252
253static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
254{
255	switch (cmd) {
256	case DWC3_DEPCMD_DEPSTARTCFG:
257		return "Start New Configuration";
258	case DWC3_DEPCMD_ENDTRANSFER:
259		return "End Transfer";
260	case DWC3_DEPCMD_UPDATETRANSFER:
261		return "Update Transfer";
262	case DWC3_DEPCMD_STARTTRANSFER:
263		return "Start Transfer";
264	case DWC3_DEPCMD_CLEARSTALL:
265		return "Clear Stall";
266	case DWC3_DEPCMD_SETSTALL:
267		return "Set Stall";
268	case DWC3_DEPCMD_GETSEQNUMBER:
269		return "Get Data Sequence Number";
270	case DWC3_DEPCMD_SETTRANSFRESOURCE:
271		return "Set Endpoint Transfer Resource";
272	case DWC3_DEPCMD_SETEPCONFIG:
273		return "Set Endpoint Configuration";
274	default:
275		return "UNKNOWN command";
276	}
277}
278
279int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
280{
281	u32		timeout = 500;
282	u32		reg;
283
284	dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
285	dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
286
287	do {
288		reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
289		if (!(reg & DWC3_DGCMD_CMDACT)) {
290			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
291					DWC3_DGCMD_STATUS(reg));
292			return 0;
293		}
294
295		/*
296		 * We can't sleep here, because it's also called from
297		 * interrupt context.
298		 */
299		timeout--;
300		if (!timeout)
301			return -ETIMEDOUT;
302		udelay(1);
303	} while (1);
304}
305
306int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
307		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
308{
309	struct dwc3_ep		*dep = dwc->eps[ep];
310	u32			timeout = 500;
311	u32			reg;
312
313	dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
314			dep->name,
315			dwc3_gadget_ep_cmd_string(cmd), params->param0,
316			params->param1, params->param2);
317
318	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
319	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
320	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
321
322	dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
323	do {
324		reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
325		if (!(reg & DWC3_DEPCMD_CMDACT)) {
326			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
327					DWC3_DEPCMD_STATUS(reg));
328			return 0;
329		}
330
331		/*
332		 * We can't sleep here, because it is also called from
333		 * interrupt context.
334		 */
335		timeout--;
336		if (!timeout)
337			return -ETIMEDOUT;
338
339		udelay(1);
340	} while (1);
341}
342
343static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
344		struct dwc3_trb *trb)
345{
346	u32		offset = (char *) trb - (char *) dep->trb_pool;
347
348	return dep->trb_pool_dma + offset;
349}
350
351static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
352{
353	struct dwc3		*dwc = dep->dwc;
354
355	if (dep->trb_pool)
356		return 0;
357
358	if (dep->number == 0 || dep->number == 1)
359		return 0;
360
361	dep->trb_pool = dma_alloc_coherent(dwc->dev,
362			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
363			&dep->trb_pool_dma, GFP_KERNEL);
364	if (!dep->trb_pool) {
365		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
366				dep->name);
367		return -ENOMEM;
368	}
369
370	return 0;
371}
372
373static void dwc3_free_trb_pool(struct dwc3_ep *dep)
374{
375	struct dwc3		*dwc = dep->dwc;
376
377	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
378			dep->trb_pool, dep->trb_pool_dma);
379
380	dep->trb_pool = NULL;
381	dep->trb_pool_dma = 0;
382}
383
384static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
385{
386	struct dwc3_gadget_ep_cmd_params params;
387	u32			cmd;
388
389	memset(&params, 0x00, sizeof(params));
390
391	if (dep->number != 1) {
392		cmd = DWC3_DEPCMD_DEPSTARTCFG;
393		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
394		if (dep->number > 1) {
395			if (dwc->start_config_issued)
396				return 0;
397			dwc->start_config_issued = true;
398			cmd |= DWC3_DEPCMD_PARAM(2);
399		}
400
401		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
402	}
403
404	return 0;
405}
406
407static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
408		const struct usb_endpoint_descriptor *desc,
409		const struct usb_ss_ep_comp_descriptor *comp_desc)
410{
411	struct dwc3_gadget_ep_cmd_params params;
412
413	memset(&params, 0x00, sizeof(params));
414
415	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
416		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
417		| DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
418
419	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
420		| DWC3_DEPCFG_XFER_NOT_READY_EN;
421
422	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
423		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
424			| DWC3_DEPCFG_STREAM_EVENT_EN;
425		dep->stream_capable = true;
426	}
427
428	if (usb_endpoint_xfer_isoc(desc))
429		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
430
431	/*
432	 * We are doing 1:1 mapping for endpoints, meaning
433	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
434	 * so on. We consider the direction bit as part of the physical
435	 * endpoint number. So USB endpoint 0x81 is 0x03.
436	 */
437	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
438
439	/*
440	 * We must use the lower 16 TX FIFOs even though
441	 * HW might have more
442	 */
443	if (dep->direction)
444		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
445
446	if (desc->bInterval) {
447		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
448		dep->interval = 1 << (desc->bInterval - 1);
449	}
450
451	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
452			DWC3_DEPCMD_SETEPCONFIG, &params);
453}
454
455static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
456{
457	struct dwc3_gadget_ep_cmd_params params;
458
459	memset(&params, 0x00, sizeof(params));
460
461	params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
462
463	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
464			DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
465}
466
467/**
468 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
469 * @dep: endpoint to be initialized
470 * @desc: USB Endpoint Descriptor
471 *
472 * Caller should take care of locking
473 */
474static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
475		const struct usb_endpoint_descriptor *desc,
476		const struct usb_ss_ep_comp_descriptor *comp_desc)
477{
478	struct dwc3		*dwc = dep->dwc;
479	u32			reg;
480	int			ret = -ENOMEM;
481
482	if (!(dep->flags & DWC3_EP_ENABLED)) {
483		ret = dwc3_gadget_start_config(dwc, dep);
484		if (ret)
485			return ret;
486	}
487
488	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc);
489	if (ret)
490		return ret;
491
492	if (!(dep->flags & DWC3_EP_ENABLED)) {
493		struct dwc3_trb	*trb_st_hw;
494		struct dwc3_trb	*trb_link;
495
496		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
497		if (ret)
498			return ret;
499
500		dep->desc = desc;
501		dep->comp_desc = comp_desc;
502		dep->type = usb_endpoint_type(desc);
503		dep->flags |= DWC3_EP_ENABLED;
504
505		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
506		reg |= DWC3_DALEPENA_EP(dep->number);
507		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
508
509		if (!usb_endpoint_xfer_isoc(desc))
510			return 0;
511
512		memset(&trb_link, 0, sizeof(trb_link));
513
514		/* Link TRB for ISOC. The HWO bit is never reset */
515		trb_st_hw = &dep->trb_pool[0];
516
517		trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
518
519		trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
520		trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
521		trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
522		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
523	}
524
525	return 0;
526}
527
528static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
529static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
530{
531	struct dwc3_request		*req;
532
533	if (!list_empty(&dep->req_queued))
534		dwc3_stop_active_transfer(dwc, dep->number);
535
536	while (!list_empty(&dep->request_list)) {
537		req = next_request(&dep->request_list);
538
539		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
540	}
541}
542
543/**
544 * __dwc3_gadget_ep_disable - Disables a HW endpoint
545 * @dep: the endpoint to disable
546 *
547 * This function also removes requests which are currently processed ny the
548 * hardware and those which are not yet scheduled.
549 * Caller should take care of locking.
550 */
551static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
552{
553	struct dwc3		*dwc = dep->dwc;
554	u32			reg;
555
556	dwc3_remove_requests(dwc, dep);
557
558	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
559	reg &= ~DWC3_DALEPENA_EP(dep->number);
560	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
561
562	dep->stream_capable = false;
563	dep->desc = NULL;
564	dep->endpoint.desc = NULL;
565	dep->comp_desc = NULL;
566	dep->type = 0;
567	dep->flags = 0;
568
569	return 0;
570}
571
572/* -------------------------------------------------------------------------- */
573
574static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
575		const struct usb_endpoint_descriptor *desc)
576{
577	return -EINVAL;
578}
579
580static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
581{
582	return -EINVAL;
583}
584
585/* -------------------------------------------------------------------------- */
586
587static int dwc3_gadget_ep_enable(struct usb_ep *ep,
588		const struct usb_endpoint_descriptor *desc)
589{
590	struct dwc3_ep			*dep;
591	struct dwc3			*dwc;
592	unsigned long			flags;
593	int				ret;
594
595	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
596		pr_debug("dwc3: invalid parameters\n");
597		return -EINVAL;
598	}
599
600	if (!desc->wMaxPacketSize) {
601		pr_debug("dwc3: missing wMaxPacketSize\n");
602		return -EINVAL;
603	}
604
605	dep = to_dwc3_ep(ep);
606	dwc = dep->dwc;
607
608	switch (usb_endpoint_type(desc)) {
609	case USB_ENDPOINT_XFER_CONTROL:
610		strlcat(dep->name, "-control", sizeof(dep->name));
611		break;
612	case USB_ENDPOINT_XFER_ISOC:
613		strlcat(dep->name, "-isoc", sizeof(dep->name));
614		break;
615	case USB_ENDPOINT_XFER_BULK:
616		strlcat(dep->name, "-bulk", sizeof(dep->name));
617		break;
618	case USB_ENDPOINT_XFER_INT:
619		strlcat(dep->name, "-int", sizeof(dep->name));
620		break;
621	default:
622		dev_err(dwc->dev, "invalid endpoint transfer type\n");
623	}
624
625	if (dep->flags & DWC3_EP_ENABLED) {
626		dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
627				dep->name);
628		return 0;
629	}
630
631	dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
632
633	spin_lock_irqsave(&dwc->lock, flags);
634	ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc);
635	spin_unlock_irqrestore(&dwc->lock, flags);
636
637	return ret;
638}
639
640static int dwc3_gadget_ep_disable(struct usb_ep *ep)
641{
642	struct dwc3_ep			*dep;
643	struct dwc3			*dwc;
644	unsigned long			flags;
645	int				ret;
646
647	if (!ep) {
648		pr_debug("dwc3: invalid parameters\n");
649		return -EINVAL;
650	}
651
652	dep = to_dwc3_ep(ep);
653	dwc = dep->dwc;
654
655	if (!(dep->flags & DWC3_EP_ENABLED)) {
656		dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
657				dep->name);
658		return 0;
659	}
660
661	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
662			dep->number >> 1,
663			(dep->number & 1) ? "in" : "out");
664
665	spin_lock_irqsave(&dwc->lock, flags);
666	ret = __dwc3_gadget_ep_disable(dep);
667	spin_unlock_irqrestore(&dwc->lock, flags);
668
669	return ret;
670}
671
672static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
673	gfp_t gfp_flags)
674{
675	struct dwc3_request		*req;
676	struct dwc3_ep			*dep = to_dwc3_ep(ep);
677	struct dwc3			*dwc = dep->dwc;
678
679	req = kzalloc(sizeof(*req), gfp_flags);
680	if (!req) {
681		dev_err(dwc->dev, "not enough memory\n");
682		return NULL;
683	}
684
685	req->epnum	= dep->number;
686	req->dep	= dep;
687
688	return &req->request;
689}
690
691static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
692		struct usb_request *request)
693{
694	struct dwc3_request		*req = to_dwc3_request(request);
695
696	kfree(req);
697}
698
699/**
700 * dwc3_prepare_one_trb - setup one TRB from one request
701 * @dep: endpoint for which this request is prepared
702 * @req: dwc3_request pointer
703 */
704static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
705		struct dwc3_request *req, dma_addr_t dma,
706		unsigned length, unsigned last, unsigned chain)
707{
708	struct dwc3		*dwc = dep->dwc;
709	struct dwc3_trb		*trb;
710
711	unsigned int		cur_slot;
712
713	dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
714			dep->name, req, (unsigned long long) dma,
715			length, last ? " last" : "",
716			chain ? " chain" : "");
717
718	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
719	cur_slot = dep->free_slot;
720	dep->free_slot++;
721
722	/* Skip the LINK-TRB on ISOC */
723	if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
724			usb_endpoint_xfer_isoc(dep->desc))
725		return;
726
727	if (!req->trb) {
728		dwc3_gadget_move_request_queued(req);
729		req->trb = trb;
730		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
731	}
732
733	trb->size = DWC3_TRB_SIZE_LENGTH(length);
734	trb->bpl = lower_32_bits(dma);
735	trb->bph = upper_32_bits(dma);
736
737	switch (usb_endpoint_type(dep->desc)) {
738	case USB_ENDPOINT_XFER_CONTROL:
739		trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
740		break;
741
742	case USB_ENDPOINT_XFER_ISOC:
743		trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
744
745		/* IOC every DWC3_TRB_NUM / 4 so we can refill */
746		if (!(cur_slot % (DWC3_TRB_NUM / 4)))
747			trb->ctrl |= DWC3_TRB_CTRL_IOC;
748		break;
749
750	case USB_ENDPOINT_XFER_BULK:
751	case USB_ENDPOINT_XFER_INT:
752		trb->ctrl = DWC3_TRBCTL_NORMAL;
753		break;
754	default:
755		/*
756		 * This is only possible with faulty memory because we
757		 * checked it already :)
758		 */
759		BUG();
760	}
761
762	if (usb_endpoint_xfer_isoc(dep->desc)) {
763		trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
764		trb->ctrl |= DWC3_TRB_CTRL_CSP;
765	} else {
766		if (chain)
767			trb->ctrl |= DWC3_TRB_CTRL_CHN;
768
769		if (last)
770			trb->ctrl |= DWC3_TRB_CTRL_LST;
771	}
772
773	if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
774		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
775
776	trb->ctrl |= DWC3_TRB_CTRL_HWO;
777}
778
779/*
780 * dwc3_prepare_trbs - setup TRBs from requests
781 * @dep: endpoint for which requests are being prepared
782 * @starting: true if the endpoint is idle and no requests are queued.
783 *
784 * The function goes through the requests list and sets up TRBs for the
785 * transfers. The function returns once there are no more TRBs available or
786 * it runs out of requests.
787 */
788static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
789{
790	struct dwc3_request	*req, *n;
791	u32			trbs_left;
792	u32			max;
793	unsigned int		last_one = 0;
794
795	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
796
797	/* the first request must not be queued */
798	trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
799
800	/* Can't wrap around on a non-isoc EP since there's no link TRB */
801	if (!usb_endpoint_xfer_isoc(dep->desc)) {
802		max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
803		if (trbs_left > max)
804			trbs_left = max;
805	}
806
807	/*
808	 * If busy & slot are equal than it is either full or empty. If we are
809	 * starting to process requests then we are empty. Otherwise we are
810	 * full and don't do anything
811	 */
812	if (!trbs_left) {
813		if (!starting)
814			return;
815		trbs_left = DWC3_TRB_NUM;
816		/*
817		 * In case we start from scratch, we queue the ISOC requests
818		 * starting from slot 1. This is done because we use ring
819		 * buffer and have no LST bit to stop us. Instead, we place
820		 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
821		 * after the first request so we start at slot 1 and have
822		 * 7 requests proceed before we hit the first IOC.
823		 * Other transfer types don't use the ring buffer and are
824		 * processed from the first TRB until the last one. Since we
825		 * don't wrap around we have to start at the beginning.
826		 */
827		if (usb_endpoint_xfer_isoc(dep->desc)) {
828			dep->busy_slot = 1;
829			dep->free_slot = 1;
830		} else {
831			dep->busy_slot = 0;
832			dep->free_slot = 0;
833		}
834	}
835
836	/* The last TRB is a link TRB, not used for xfer */
837	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
838		return;
839
840	list_for_each_entry_safe(req, n, &dep->request_list, list) {
841		unsigned	length;
842		dma_addr_t	dma;
843
844		if (req->request.num_mapped_sgs > 0) {
845			struct usb_request *request = &req->request;
846			struct scatterlist *sg = request->sg;
847			struct scatterlist *s;
848			int		i;
849
850			for_each_sg(sg, s, request->num_mapped_sgs, i) {
851				unsigned chain = true;
852
853				length = sg_dma_len(s);
854				dma = sg_dma_address(s);
855
856				if (i == (request->num_mapped_sgs - 1) ||
857						sg_is_last(s)) {
858					last_one = true;
859					chain = false;
860				}
861
862				trbs_left--;
863				if (!trbs_left)
864					last_one = true;
865
866				if (last_one)
867					chain = false;
868
869				dwc3_prepare_one_trb(dep, req, dma, length,
870						last_one, chain);
871
872				if (last_one)
873					break;
874			}
875		} else {
876			dma = req->request.dma;
877			length = req->request.length;
878			trbs_left--;
879
880			if (!trbs_left)
881				last_one = 1;
882
883			/* Is this the last request? */
884			if (list_is_last(&req->list, &dep->request_list))
885				last_one = 1;
886
887			dwc3_prepare_one_trb(dep, req, dma, length,
888					last_one, false);
889
890			if (last_one)
891				break;
892		}
893	}
894}
895
896static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
897		int start_new)
898{
899	struct dwc3_gadget_ep_cmd_params params;
900	struct dwc3_request		*req;
901	struct dwc3			*dwc = dep->dwc;
902	int				ret;
903	u32				cmd;
904
905	if (start_new && (dep->flags & DWC3_EP_BUSY)) {
906		dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
907		return -EBUSY;
908	}
909	dep->flags &= ~DWC3_EP_PENDING_REQUEST;
910
911	/*
912	 * If we are getting here after a short-out-packet we don't enqueue any
913	 * new requests as we try to set the IOC bit only on the last request.
914	 */
915	if (start_new) {
916		if (list_empty(&dep->req_queued))
917			dwc3_prepare_trbs(dep, start_new);
918
919		/* req points to the first request which will be sent */
920		req = next_request(&dep->req_queued);
921	} else {
922		dwc3_prepare_trbs(dep, start_new);
923
924		/*
925		 * req points to the first request where HWO changed from 0 to 1
926		 */
927		req = next_request(&dep->req_queued);
928	}
929	if (!req) {
930		dep->flags |= DWC3_EP_PENDING_REQUEST;
931		return 0;
932	}
933
934	memset(&params, 0, sizeof(params));
935	params.param0 = upper_32_bits(req->trb_dma);
936	params.param1 = lower_32_bits(req->trb_dma);
937
938	if (start_new)
939		cmd = DWC3_DEPCMD_STARTTRANSFER;
940	else
941		cmd = DWC3_DEPCMD_UPDATETRANSFER;
942
943	cmd |= DWC3_DEPCMD_PARAM(cmd_param);
944	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
945	if (ret < 0) {
946		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
947
948		/*
949		 * FIXME we need to iterate over the list of requests
950		 * here and stop, unmap, free and del each of the linked
951		 * requests instead of what we do now.
952		 */
953		usb_gadget_unmap_request(&dwc->gadget, &req->request,
954				req->direction);
955		list_del(&req->list);
956		return ret;
957	}
958
959	dep->flags |= DWC3_EP_BUSY;
960	dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
961			dep->number);
962
963	WARN_ON_ONCE(!dep->res_trans_idx);
964
965	return 0;
966}
967
968static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
969{
970	struct dwc3		*dwc = dep->dwc;
971	int			ret;
972
973	req->request.actual	= 0;
974	req->request.status	= -EINPROGRESS;
975	req->direction		= dep->direction;
976	req->epnum		= dep->number;
977
978	/*
979	 * We only add to our list of requests now and
980	 * start consuming the list once we get XferNotReady
981	 * IRQ.
982	 *
983	 * That way, we avoid doing anything that we don't need
984	 * to do now and defer it until the point we receive a
985	 * particular token from the Host side.
986	 *
987	 * This will also avoid Host cancelling URBs due to too
988	 * many NAKs.
989	 */
990	ret = usb_gadget_map_request(&dwc->gadget, &req->request,
991			dep->direction);
992	if (ret)
993		return ret;
994
995	list_add_tail(&req->list, &dep->request_list);
996
997	/*
998	 * There is one special case: XferNotReady with
999	 * empty list of requests. We need to kick the
1000	 * transfer here in that situation, otherwise
1001	 * we will be NAKing forever.
1002	 *
1003	 * If we get XferNotReady before gadget driver
1004	 * has a chance to queue a request, we will ACK
1005	 * the IRQ but won't be able to receive the data
1006	 * until the next request is queued. The following
1007	 * code is handling exactly that.
1008	 */
1009	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1010		int ret;
1011		int start_trans;
1012
1013		start_trans = 1;
1014		if (usb_endpoint_xfer_isoc(dep->desc) &&
1015				(dep->flags & DWC3_EP_BUSY))
1016			start_trans = 0;
1017
1018		ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
1019		if (ret && ret != -EBUSY) {
1020			struct dwc3	*dwc = dep->dwc;
1021
1022			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1023					dep->name);
1024		}
1025	};
1026
1027	return 0;
1028}
1029
1030static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1031	gfp_t gfp_flags)
1032{
1033	struct dwc3_request		*req = to_dwc3_request(request);
1034	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1035	struct dwc3			*dwc = dep->dwc;
1036
1037	unsigned long			flags;
1038
1039	int				ret;
1040
1041	if (!dep->desc) {
1042		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1043				request, ep->name);
1044		return -ESHUTDOWN;
1045	}
1046
1047	dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1048			request, ep->name, request->length);
1049
1050	spin_lock_irqsave(&dwc->lock, flags);
1051	ret = __dwc3_gadget_ep_queue(dep, req);
1052	spin_unlock_irqrestore(&dwc->lock, flags);
1053
1054	return ret;
1055}
1056
1057static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1058		struct usb_request *request)
1059{
1060	struct dwc3_request		*req = to_dwc3_request(request);
1061	struct dwc3_request		*r = NULL;
1062
1063	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1064	struct dwc3			*dwc = dep->dwc;
1065
1066	unsigned long			flags;
1067	int				ret = 0;
1068
1069	spin_lock_irqsave(&dwc->lock, flags);
1070
1071	list_for_each_entry(r, &dep->request_list, list) {
1072		if (r == req)
1073			break;
1074	}
1075
1076	if (r != req) {
1077		list_for_each_entry(r, &dep->req_queued, list) {
1078			if (r == req)
1079				break;
1080		}
1081		if (r == req) {
1082			/* wait until it is processed */
1083			dwc3_stop_active_transfer(dwc, dep->number);
1084			goto out0;
1085		}
1086		dev_err(dwc->dev, "request %p was not queued to %s\n",
1087				request, ep->name);
1088		ret = -EINVAL;
1089		goto out0;
1090	}
1091
1092	/* giveback the request */
1093	dwc3_gadget_giveback(dep, req, -ECONNRESET);
1094
1095out0:
1096	spin_unlock_irqrestore(&dwc->lock, flags);
1097
1098	return ret;
1099}
1100
1101int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1102{
1103	struct dwc3_gadget_ep_cmd_params	params;
1104	struct dwc3				*dwc = dep->dwc;
1105	int					ret;
1106
1107	memset(&params, 0x00, sizeof(params));
1108
1109	if (value) {
1110		if (dep->number == 0 || dep->number == 1) {
1111			/*
1112			 * Whenever EP0 is stalled, we will restart
1113			 * the state machine, thus moving back to
1114			 * Setup Phase
1115			 */
1116			dwc->ep0state = EP0_SETUP_PHASE;
1117		}
1118
1119		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1120			DWC3_DEPCMD_SETSTALL, &params);
1121		if (ret)
1122			dev_err(dwc->dev, "failed to %s STALL on %s\n",
1123					value ? "set" : "clear",
1124					dep->name);
1125		else
1126			dep->flags |= DWC3_EP_STALL;
1127	} else {
1128		if (dep->flags & DWC3_EP_WEDGE)
1129			return 0;
1130
1131		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1132			DWC3_DEPCMD_CLEARSTALL, &params);
1133		if (ret)
1134			dev_err(dwc->dev, "failed to %s STALL on %s\n",
1135					value ? "set" : "clear",
1136					dep->name);
1137		else
1138			dep->flags &= ~DWC3_EP_STALL;
1139	}
1140
1141	return ret;
1142}
1143
1144static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1145{
1146	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1147	struct dwc3			*dwc = dep->dwc;
1148
1149	unsigned long			flags;
1150
1151	int				ret;
1152
1153	spin_lock_irqsave(&dwc->lock, flags);
1154
1155	if (usb_endpoint_xfer_isoc(dep->desc)) {
1156		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1157		ret = -EINVAL;
1158		goto out;
1159	}
1160
1161	ret = __dwc3_gadget_ep_set_halt(dep, value);
1162out:
1163	spin_unlock_irqrestore(&dwc->lock, flags);
1164
1165	return ret;
1166}
1167
1168static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1169{
1170	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1171	struct dwc3			*dwc = dep->dwc;
1172	unsigned long			flags;
1173
1174	spin_lock_irqsave(&dwc->lock, flags);
1175	dep->flags |= DWC3_EP_WEDGE;
1176	spin_unlock_irqrestore(&dwc->lock, flags);
1177
1178	return dwc3_gadget_ep_set_halt(ep, 1);
1179}
1180
1181/* -------------------------------------------------------------------------- */
1182
1183static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1184	.bLength	= USB_DT_ENDPOINT_SIZE,
1185	.bDescriptorType = USB_DT_ENDPOINT,
1186	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
1187};
1188
1189static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1190	.enable		= dwc3_gadget_ep0_enable,
1191	.disable	= dwc3_gadget_ep0_disable,
1192	.alloc_request	= dwc3_gadget_ep_alloc_request,
1193	.free_request	= dwc3_gadget_ep_free_request,
1194	.queue		= dwc3_gadget_ep0_queue,
1195	.dequeue	= dwc3_gadget_ep_dequeue,
1196	.set_halt	= dwc3_gadget_ep_set_halt,
1197	.set_wedge	= dwc3_gadget_ep_set_wedge,
1198};
1199
1200static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1201	.enable		= dwc3_gadget_ep_enable,
1202	.disable	= dwc3_gadget_ep_disable,
1203	.alloc_request	= dwc3_gadget_ep_alloc_request,
1204	.free_request	= dwc3_gadget_ep_free_request,
1205	.queue		= dwc3_gadget_ep_queue,
1206	.dequeue	= dwc3_gadget_ep_dequeue,
1207	.set_halt	= dwc3_gadget_ep_set_halt,
1208	.set_wedge	= dwc3_gadget_ep_set_wedge,
1209};
1210
1211/* -------------------------------------------------------------------------- */
1212
1213static int dwc3_gadget_get_frame(struct usb_gadget *g)
1214{
1215	struct dwc3		*dwc = gadget_to_dwc(g);
1216	u32			reg;
1217
1218	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1219	return DWC3_DSTS_SOFFN(reg);
1220}
1221
1222static int dwc3_gadget_wakeup(struct usb_gadget *g)
1223{
1224	struct dwc3		*dwc = gadget_to_dwc(g);
1225
1226	unsigned long		timeout;
1227	unsigned long		flags;
1228
1229	u32			reg;
1230
1231	int			ret = 0;
1232
1233	u8			link_state;
1234	u8			speed;
1235
1236	spin_lock_irqsave(&dwc->lock, flags);
1237
1238	/*
1239	 * According to the Databook Remote wakeup request should
1240	 * be issued only when the device is in early suspend state.
1241	 *
1242	 * We can check that via USB Link State bits in DSTS register.
1243	 */
1244	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1245
1246	speed = reg & DWC3_DSTS_CONNECTSPD;
1247	if (speed == DWC3_DSTS_SUPERSPEED) {
1248		dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1249		ret = -EINVAL;
1250		goto out;
1251	}
1252
1253	link_state = DWC3_DSTS_USBLNKST(reg);
1254
1255	switch (link_state) {
1256	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
1257	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
1258		break;
1259	default:
1260		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1261				link_state);
1262		ret = -EINVAL;
1263		goto out;
1264	}
1265
1266	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1267	if (ret < 0) {
1268		dev_err(dwc->dev, "failed to put link in Recovery\n");
1269		goto out;
1270	}
1271
1272	/* write zeroes to Link Change Request */
1273	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1274	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1275
1276	/* poll until Link State changes to ON */
1277	timeout = jiffies + msecs_to_jiffies(100);
1278
1279	while (!time_after(jiffies, timeout)) {
1280		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1281
1282		/* in HS, means ON */
1283		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1284			break;
1285	}
1286
1287	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1288		dev_err(dwc->dev, "failed to send remote wakeup\n");
1289		ret = -EINVAL;
1290	}
1291
1292out:
1293	spin_unlock_irqrestore(&dwc->lock, flags);
1294
1295	return ret;
1296}
1297
1298static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1299		int is_selfpowered)
1300{
1301	struct dwc3		*dwc = gadget_to_dwc(g);
1302	unsigned long		flags;
1303
1304	spin_lock_irqsave(&dwc->lock, flags);
1305	dwc->is_selfpowered = !!is_selfpowered;
1306	spin_unlock_irqrestore(&dwc->lock, flags);
1307
1308	return 0;
1309}
1310
1311static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1312{
1313	u32			reg;
1314	u32			timeout = 500;
1315
1316	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1317	if (is_on) {
1318		reg &= ~DWC3_DCTL_TRGTULST_MASK;
1319		reg |= (DWC3_DCTL_RUN_STOP
1320				| DWC3_DCTL_TRGTULST_RX_DET);
1321	} else {
1322		reg &= ~DWC3_DCTL_RUN_STOP;
1323	}
1324
1325	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1326
1327	do {
1328		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1329		if (is_on) {
1330			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1331				break;
1332		} else {
1333			if (reg & DWC3_DSTS_DEVCTRLHLT)
1334				break;
1335		}
1336		timeout--;
1337		if (!timeout)
1338			break;
1339		udelay(1);
1340	} while (1);
1341
1342	dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1343			dwc->gadget_driver
1344			? dwc->gadget_driver->function : "no-function",
1345			is_on ? "connect" : "disconnect");
1346}
1347
1348static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1349{
1350	struct dwc3		*dwc = gadget_to_dwc(g);
1351	unsigned long		flags;
1352
1353	is_on = !!is_on;
1354
1355	spin_lock_irqsave(&dwc->lock, flags);
1356	dwc3_gadget_run_stop(dwc, is_on);
1357	spin_unlock_irqrestore(&dwc->lock, flags);
1358
1359	return 0;
1360}
1361
1362static int dwc3_gadget_start(struct usb_gadget *g,
1363		struct usb_gadget_driver *driver)
1364{
1365	struct dwc3		*dwc = gadget_to_dwc(g);
1366	struct dwc3_ep		*dep;
1367	unsigned long		flags;
1368	int			ret = 0;
1369	u32			reg;
1370
1371	spin_lock_irqsave(&dwc->lock, flags);
1372
1373	if (dwc->gadget_driver) {
1374		dev_err(dwc->dev, "%s is already bound to %s\n",
1375				dwc->gadget.name,
1376				dwc->gadget_driver->driver.name);
1377		ret = -EBUSY;
1378		goto err0;
1379	}
1380
1381	dwc->gadget_driver	= driver;
1382	dwc->gadget.dev.driver	= &driver->driver;
1383
1384	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1385	reg &= ~(DWC3_DCFG_SPEED_MASK);
1386
1387	/**
1388	 * WORKAROUND: DWC3 revision < 2.20a have an issue
1389	 * which would cause metastability state on Run/Stop
1390	 * bit if we try to force the IP to USB2-only mode.
1391	 *
1392	 * Because of that, we cannot configure the IP to any
1393	 * speed other than the SuperSpeed
1394	 *
1395	 * Refers to:
1396	 *
1397	 * STAR#9000525659: Clock Domain Crossing on DCTL in
1398	 * USB 2.0 Mode
1399	 */
1400	if (dwc->revision < DWC3_REVISION_220A)
1401		reg |= DWC3_DCFG_SUPERSPEED;
1402	else
1403		reg |= dwc->maximum_speed;
1404	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1405
1406	dwc->start_config_issued = false;
1407
1408	/* Start with SuperSpeed Default */
1409	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1410
1411	dep = dwc->eps[0];
1412	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
1413	if (ret) {
1414		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1415		goto err0;
1416	}
1417
1418	dep = dwc->eps[1];
1419	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
1420	if (ret) {
1421		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1422		goto err1;
1423	}
1424
1425	/* begin to receive SETUP packets */
1426	dwc->ep0state = EP0_SETUP_PHASE;
1427	dwc3_ep0_out_start(dwc);
1428
1429	spin_unlock_irqrestore(&dwc->lock, flags);
1430
1431	return 0;
1432
1433err1:
1434	__dwc3_gadget_ep_disable(dwc->eps[0]);
1435
1436err0:
1437	spin_unlock_irqrestore(&dwc->lock, flags);
1438
1439	return ret;
1440}
1441
1442static int dwc3_gadget_stop(struct usb_gadget *g,
1443		struct usb_gadget_driver *driver)
1444{
1445	struct dwc3		*dwc = gadget_to_dwc(g);
1446	unsigned long		flags;
1447
1448	spin_lock_irqsave(&dwc->lock, flags);
1449
1450	__dwc3_gadget_ep_disable(dwc->eps[0]);
1451	__dwc3_gadget_ep_disable(dwc->eps[1]);
1452
1453	dwc->gadget_driver	= NULL;
1454	dwc->gadget.dev.driver	= NULL;
1455
1456	spin_unlock_irqrestore(&dwc->lock, flags);
1457
1458	return 0;
1459}
1460static const struct usb_gadget_ops dwc3_gadget_ops = {
1461	.get_frame		= dwc3_gadget_get_frame,
1462	.wakeup			= dwc3_gadget_wakeup,
1463	.set_selfpowered	= dwc3_gadget_set_selfpowered,
1464	.pullup			= dwc3_gadget_pullup,
1465	.udc_start		= dwc3_gadget_start,
1466	.udc_stop		= dwc3_gadget_stop,
1467};
1468
1469/* -------------------------------------------------------------------------- */
1470
1471static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1472{
1473	struct dwc3_ep			*dep;
1474	u8				epnum;
1475
1476	INIT_LIST_HEAD(&dwc->gadget.ep_list);
1477
1478	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1479		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1480		if (!dep) {
1481			dev_err(dwc->dev, "can't allocate endpoint %d\n",
1482					epnum);
1483			return -ENOMEM;
1484		}
1485
1486		dep->dwc = dwc;
1487		dep->number = epnum;
1488		dwc->eps[epnum] = dep;
1489
1490		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1491				(epnum & 1) ? "in" : "out");
1492		dep->endpoint.name = dep->name;
1493		dep->direction = (epnum & 1);
1494
1495		if (epnum == 0 || epnum == 1) {
1496			dep->endpoint.maxpacket = 512;
1497			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1498			if (!epnum)
1499				dwc->gadget.ep0 = &dep->endpoint;
1500		} else {
1501			int		ret;
1502
1503			dep->endpoint.maxpacket = 1024;
1504			dep->endpoint.max_streams = 15;
1505			dep->endpoint.ops = &dwc3_gadget_ep_ops;
1506			list_add_tail(&dep->endpoint.ep_list,
1507					&dwc->gadget.ep_list);
1508
1509			ret = dwc3_alloc_trb_pool(dep);
1510			if (ret)
1511				return ret;
1512		}
1513
1514		INIT_LIST_HEAD(&dep->request_list);
1515		INIT_LIST_HEAD(&dep->req_queued);
1516	}
1517
1518	return 0;
1519}
1520
1521static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1522{
1523	struct dwc3_ep			*dep;
1524	u8				epnum;
1525
1526	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1527		dep = dwc->eps[epnum];
1528		dwc3_free_trb_pool(dep);
1529
1530		if (epnum != 0 && epnum != 1)
1531			list_del(&dep->endpoint.ep_list);
1532
1533		kfree(dep);
1534	}
1535}
1536
1537static void dwc3_gadget_release(struct device *dev)
1538{
1539	dev_dbg(dev, "%s\n", __func__);
1540}
1541
1542/* -------------------------------------------------------------------------- */
1543static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1544		const struct dwc3_event_depevt *event, int status)
1545{
1546	struct dwc3_request	*req;
1547	struct dwc3_trb		*trb;
1548	unsigned int		count;
1549	unsigned int		s_pkt = 0;
1550
1551	do {
1552		req = next_request(&dep->req_queued);
1553		if (!req) {
1554			WARN_ON_ONCE(1);
1555			return 1;
1556		}
1557
1558		trb = req->trb;
1559
1560		if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1561			/*
1562			 * We continue despite the error. There is not much we
1563			 * can do. If we don't clean it up we loop forever. If
1564			 * we skip the TRB then it gets overwritten after a
1565			 * while since we use them in a ring buffer. A BUG()
1566			 * would help. Lets hope that if this occurs, someone
1567			 * fixes the root cause instead of looking away :)
1568			 */
1569			dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1570					dep->name, req->trb);
1571		count = trb->size & DWC3_TRB_SIZE_MASK;
1572
1573		if (dep->direction) {
1574			if (count) {
1575				dev_err(dwc->dev, "incomplete IN transfer %s\n",
1576						dep->name);
1577				status = -ECONNRESET;
1578			}
1579		} else {
1580			if (count && (event->status & DEPEVT_STATUS_SHORT))
1581				s_pkt = 1;
1582		}
1583
1584		/*
1585		 * We assume here we will always receive the entire data block
1586		 * which we should receive. Meaning, if we program RX to
1587		 * receive 4K but we receive only 2K, we assume that's all we
1588		 * should receive and we simply bounce the request back to the
1589		 * gadget driver for further processing.
1590		 */
1591		req->request.actual += req->request.length - count;
1592		dwc3_gadget_giveback(dep, req, status);
1593		if (s_pkt)
1594			break;
1595		if ((event->status & DEPEVT_STATUS_LST) &&
1596				(trb->ctrl & DWC3_TRB_CTRL_LST))
1597			break;
1598		if ((event->status & DEPEVT_STATUS_IOC) &&
1599				(trb->ctrl & DWC3_TRB_CTRL_IOC))
1600			break;
1601	} while (1);
1602
1603	if ((event->status & DEPEVT_STATUS_IOC) &&
1604			(trb->ctrl & DWC3_TRB_CTRL_IOC))
1605		return 0;
1606	return 1;
1607}
1608
1609static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1610		struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1611		int start_new)
1612{
1613	unsigned		status = 0;
1614	int			clean_busy;
1615
1616	if (event->status & DEPEVT_STATUS_BUSERR)
1617		status = -ECONNRESET;
1618
1619	clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1620	if (clean_busy)
1621		dep->flags &= ~DWC3_EP_BUSY;
1622
1623	/*
1624	 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1625	 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1626	 */
1627	if (dwc->revision < DWC3_REVISION_183A) {
1628		u32		reg;
1629		int		i;
1630
1631		for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1632			struct dwc3_ep	*dep = dwc->eps[i];
1633
1634			if (!(dep->flags & DWC3_EP_ENABLED))
1635				continue;
1636
1637			if (!list_empty(&dep->req_queued))
1638				return;
1639		}
1640
1641		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1642		reg |= dwc->u1u2;
1643		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1644
1645		dwc->u1u2 = 0;
1646	}
1647}
1648
1649static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1650		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1651{
1652	u32 uf, mask;
1653
1654	if (list_empty(&dep->request_list)) {
1655		dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1656			dep->name);
1657		return;
1658	}
1659
1660	mask = ~(dep->interval - 1);
1661	uf = event->parameters & mask;
1662	/* 4 micro frames in the future */
1663	uf += dep->interval * 4;
1664
1665	__dwc3_gadget_kick_transfer(dep, uf, 1);
1666}
1667
1668static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1669		const struct dwc3_event_depevt *event)
1670{
1671	struct dwc3 *dwc = dep->dwc;
1672	struct dwc3_event_depevt mod_ev = *event;
1673
1674	/*
1675	 * We were asked to remove one request. It is possible that this
1676	 * request and a few others were started together and have the same
1677	 * transfer index. Since we stopped the complete endpoint we don't
1678	 * know how many requests were already completed (and not yet)
1679	 * reported and how could be done (later). We purge them all until
1680	 * the end of the list.
1681	 */
1682	mod_ev.status = DEPEVT_STATUS_LST;
1683	dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1684	dep->flags &= ~DWC3_EP_BUSY;
1685	/* pending requests are ignored and are queued on XferNotReady */
1686}
1687
1688static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1689		const struct dwc3_event_depevt *event)
1690{
1691	u32 param = event->parameters;
1692	u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1693
1694	switch (cmd_type) {
1695	case DWC3_DEPCMD_ENDTRANSFER:
1696		dwc3_process_ep_cmd_complete(dep, event);
1697		break;
1698	case DWC3_DEPCMD_STARTTRANSFER:
1699		dep->res_trans_idx = param & 0x7f;
1700		break;
1701	default:
1702		printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1703				__func__, cmd_type);
1704		break;
1705	};
1706}
1707
1708static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1709		const struct dwc3_event_depevt *event)
1710{
1711	struct dwc3_ep		*dep;
1712	u8			epnum = event->endpoint_number;
1713
1714	dep = dwc->eps[epnum];
1715
1716	dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1717			dwc3_ep_event_string(event->endpoint_event));
1718
1719	if (epnum == 0 || epnum == 1) {
1720		dwc3_ep0_interrupt(dwc, event);
1721		return;
1722	}
1723
1724	switch (event->endpoint_event) {
1725	case DWC3_DEPEVT_XFERCOMPLETE:
1726		dep->res_trans_idx = 0;
1727
1728		if (usb_endpoint_xfer_isoc(dep->desc)) {
1729			dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1730					dep->name);
1731			return;
1732		}
1733
1734		dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1735		break;
1736	case DWC3_DEPEVT_XFERINPROGRESS:
1737		if (!usb_endpoint_xfer_isoc(dep->desc)) {
1738			dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1739					dep->name);
1740			return;
1741		}
1742
1743		dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1744		break;
1745	case DWC3_DEPEVT_XFERNOTREADY:
1746		if (usb_endpoint_xfer_isoc(dep->desc)) {
1747			dwc3_gadget_start_isoc(dwc, dep, event);
1748		} else {
1749			int ret;
1750
1751			dev_vdbg(dwc->dev, "%s: reason %s\n",
1752					dep->name, event->status &
1753					DEPEVT_STATUS_TRANSFER_ACTIVE
1754					? "Transfer Active"
1755					: "Transfer Not Active");
1756
1757			ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1758			if (!ret || ret == -EBUSY)
1759				return;
1760
1761			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1762					dep->name);
1763		}
1764
1765		break;
1766	case DWC3_DEPEVT_STREAMEVT:
1767		if (!usb_endpoint_xfer_bulk(dep->desc)) {
1768			dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1769					dep->name);
1770			return;
1771		}
1772
1773		switch (event->status) {
1774		case DEPEVT_STREAMEVT_FOUND:
1775			dev_vdbg(dwc->dev, "Stream %d found and started\n",
1776					event->parameters);
1777
1778			break;
1779		case DEPEVT_STREAMEVT_NOTFOUND:
1780			/* FALLTHROUGH */
1781		default:
1782			dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1783		}
1784		break;
1785	case DWC3_DEPEVT_RXTXFIFOEVT:
1786		dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1787		break;
1788	case DWC3_DEPEVT_EPCMDCMPLT:
1789		dwc3_ep_cmd_compl(dep, event);
1790		break;
1791	}
1792}
1793
1794static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1795{
1796	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1797		spin_unlock(&dwc->lock);
1798		dwc->gadget_driver->disconnect(&dwc->gadget);
1799		spin_lock(&dwc->lock);
1800	}
1801}
1802
1803static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1804{
1805	struct dwc3_ep *dep;
1806	struct dwc3_gadget_ep_cmd_params params;
1807	u32 cmd;
1808	int ret;
1809
1810	dep = dwc->eps[epnum];
1811
1812	WARN_ON(!dep->res_trans_idx);
1813	if (dep->res_trans_idx) {
1814		cmd = DWC3_DEPCMD_ENDTRANSFER;
1815		cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1816		cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1817		memset(&params, 0, sizeof(params));
1818		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1819		WARN_ON_ONCE(ret);
1820		dep->res_trans_idx = 0;
1821	}
1822}
1823
1824static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1825{
1826	u32 epnum;
1827
1828	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1829		struct dwc3_ep *dep;
1830
1831		dep = dwc->eps[epnum];
1832		if (!(dep->flags & DWC3_EP_ENABLED))
1833			continue;
1834
1835		dwc3_remove_requests(dwc, dep);
1836	}
1837}
1838
1839static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1840{
1841	u32 epnum;
1842
1843	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1844		struct dwc3_ep *dep;
1845		struct dwc3_gadget_ep_cmd_params params;
1846		int ret;
1847
1848		dep = dwc->eps[epnum];
1849
1850		if (!(dep->flags & DWC3_EP_STALL))
1851			continue;
1852
1853		dep->flags &= ~DWC3_EP_STALL;
1854
1855		memset(&params, 0, sizeof(params));
1856		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1857				DWC3_DEPCMD_CLEARSTALL, &params);
1858		WARN_ON_ONCE(ret);
1859	}
1860}
1861
1862static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1863{
1864	dev_vdbg(dwc->dev, "%s\n", __func__);
1865#if 0
1866	XXX
1867	U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1868	enable it before we can disable it.
1869
1870	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1871	reg &= ~DWC3_DCTL_INITU1ENA;
1872	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1873
1874	reg &= ~DWC3_DCTL_INITU2ENA;
1875	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1876#endif
1877
1878	dwc3_stop_active_transfers(dwc);
1879	dwc3_disconnect_gadget(dwc);
1880	dwc->start_config_issued = false;
1881
1882	dwc->gadget.speed = USB_SPEED_UNKNOWN;
1883	dwc->setup_packet_pending = false;
1884}
1885
1886static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1887{
1888	u32			reg;
1889
1890	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1891
1892	if (on)
1893		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1894	else
1895		reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1896
1897	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1898}
1899
1900static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1901{
1902	u32			reg;
1903
1904	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1905
1906	if (on)
1907		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1908	else
1909		reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1910
1911	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1912}
1913
1914static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1915{
1916	u32			reg;
1917
1918	dev_vdbg(dwc->dev, "%s\n", __func__);
1919
1920	/*
1921	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
1922	 * would cause a missing Disconnect Event if there's a
1923	 * pending Setup Packet in the FIFO.
1924	 *
1925	 * There's no suggested workaround on the official Bug
1926	 * report, which states that "unless the driver/application
1927	 * is doing any special handling of a disconnect event,
1928	 * there is no functional issue".
1929	 *
1930	 * Unfortunately, it turns out that we _do_ some special
1931	 * handling of a disconnect event, namely complete all
1932	 * pending transfers, notify gadget driver of the
1933	 * disconnection, and so on.
1934	 *
1935	 * Our suggested workaround is to follow the Disconnect
1936	 * Event steps here, instead, based on a setup_packet_pending
1937	 * flag. Such flag gets set whenever we have a XferNotReady
1938	 * event on EP0 and gets cleared on XferComplete for the
1939	 * same endpoint.
1940	 *
1941	 * Refers to:
1942	 *
1943	 * STAR#9000466709: RTL: Device : Disconnect event not
1944	 * generated if setup packet pending in FIFO
1945	 */
1946	if (dwc->revision < DWC3_REVISION_188A) {
1947		if (dwc->setup_packet_pending)
1948			dwc3_gadget_disconnect_interrupt(dwc);
1949	}
1950
1951	/* after reset -> Default State */
1952	dwc->dev_state = DWC3_DEFAULT_STATE;
1953
1954	/* Enable PHYs */
1955	dwc3_gadget_usb2_phy_power(dwc, true);
1956	dwc3_gadget_usb3_phy_power(dwc, true);
1957
1958	if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1959		dwc3_disconnect_gadget(dwc);
1960
1961	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1962	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1963	reg &= ~(DWC3_DCTL_INITU1ENA | DWC3_DCTL_INITU2ENA);
1964	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1965	dwc->test_mode = false;
1966
1967	dwc3_stop_active_transfers(dwc);
1968	dwc3_clear_stall_all_ep(dwc);
1969	dwc->start_config_issued = false;
1970
1971	/* Reset device address to zero */
1972	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1973	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1974	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1975}
1976
1977static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1978{
1979	u32 reg;
1980	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1981
1982	/*
1983	 * We change the clock only at SS but I dunno why I would want to do
1984	 * this. Maybe it becomes part of the power saving plan.
1985	 */
1986
1987	if (speed != DWC3_DSTS_SUPERSPEED)
1988		return;
1989
1990	/*
1991	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
1992	 * each time on Connect Done.
1993	 */
1994	if (!usb30_clock)
1995		return;
1996
1997	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1998	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
1999	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2000}
2001
2002static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
2003{
2004	switch (speed) {
2005	case USB_SPEED_SUPER:
2006		dwc3_gadget_usb2_phy_power(dwc, false);
2007		break;
2008	case USB_SPEED_HIGH:
2009	case USB_SPEED_FULL:
2010	case USB_SPEED_LOW:
2011		dwc3_gadget_usb3_phy_power(dwc, false);
2012		break;
2013	}
2014}
2015
2016static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2017{
2018	struct dwc3_gadget_ep_cmd_params params;
2019	struct dwc3_ep		*dep;
2020	int			ret;
2021	u32			reg;
2022	u8			speed;
2023
2024	dev_vdbg(dwc->dev, "%s\n", __func__);
2025
2026	memset(&params, 0x00, sizeof(params));
2027
2028	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2029	speed = reg & DWC3_DSTS_CONNECTSPD;
2030	dwc->speed = speed;
2031
2032	dwc3_update_ram_clk_sel(dwc, speed);
2033
2034	switch (speed) {
2035	case DWC3_DCFG_SUPERSPEED:
2036		/*
2037		 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2038		 * would cause a missing USB3 Reset event.
2039		 *
2040		 * In such situations, we should force a USB3 Reset
2041		 * event by calling our dwc3_gadget_reset_interrupt()
2042		 * routine.
2043		 *
2044		 * Refers to:
2045		 *
2046		 * STAR#9000483510: RTL: SS : USB3 reset event may
2047		 * not be generated always when the link enters poll
2048		 */
2049		if (dwc->revision < DWC3_REVISION_190A)
2050			dwc3_gadget_reset_interrupt(dwc);
2051
2052		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2053		dwc->gadget.ep0->maxpacket = 512;
2054		dwc->gadget.speed = USB_SPEED_SUPER;
2055		break;
2056	case DWC3_DCFG_HIGHSPEED:
2057		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2058		dwc->gadget.ep0->maxpacket = 64;
2059		dwc->gadget.speed = USB_SPEED_HIGH;
2060		break;
2061	case DWC3_DCFG_FULLSPEED2:
2062	case DWC3_DCFG_FULLSPEED1:
2063		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2064		dwc->gadget.ep0->maxpacket = 64;
2065		dwc->gadget.speed = USB_SPEED_FULL;
2066		break;
2067	case DWC3_DCFG_LOWSPEED:
2068		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2069		dwc->gadget.ep0->maxpacket = 8;
2070		dwc->gadget.speed = USB_SPEED_LOW;
2071		break;
2072	}
2073
2074	/* Disable unneded PHY */
2075	dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
2076
2077	dep = dwc->eps[0];
2078	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
2079	if (ret) {
2080		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2081		return;
2082	}
2083
2084	dep = dwc->eps[1];
2085	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
2086	if (ret) {
2087		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2088		return;
2089	}
2090
2091	/*
2092	 * Configure PHY via GUSB3PIPECTLn if required.
2093	 *
2094	 * Update GTXFIFOSIZn
2095	 *
2096	 * In both cases reset values should be sufficient.
2097	 */
2098}
2099
2100static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2101{
2102	dev_vdbg(dwc->dev, "%s\n", __func__);
2103
2104	/*
2105	 * TODO take core out of low power mode when that's
2106	 * implemented.
2107	 */
2108
2109	dwc->gadget_driver->resume(&dwc->gadget);
2110}
2111
2112static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2113		unsigned int evtinfo)
2114{
2115	enum dwc3_link_state	next = evtinfo & DWC3_LINK_STATE_MASK;
2116
2117	/*
2118	 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2119	 * on the link partner, the USB session might do multiple entry/exit
2120	 * of low power states before a transfer takes place.
2121	 *
2122	 * Due to this problem, we might experience lower throughput. The
2123	 * suggested workaround is to disable DCTL[12:9] bits if we're
2124	 * transitioning from U1/U2 to U0 and enable those bits again
2125	 * after a transfer completes and there are no pending transfers
2126	 * on any of the enabled endpoints.
2127	 *
2128	 * This is the first half of that workaround.
2129	 *
2130	 * Refers to:
2131	 *
2132	 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2133	 * core send LGO_Ux entering U0
2134	 */
2135	if (dwc->revision < DWC3_REVISION_183A) {
2136		if (next == DWC3_LINK_STATE_U0) {
2137			u32	u1u2;
2138			u32	reg;
2139
2140			switch (dwc->link_state) {
2141			case DWC3_LINK_STATE_U1:
2142			case DWC3_LINK_STATE_U2:
2143				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2144				u1u2 = reg & (DWC3_DCTL_INITU2ENA
2145						| DWC3_DCTL_ACCEPTU2ENA
2146						| DWC3_DCTL_INITU1ENA
2147						| DWC3_DCTL_ACCEPTU1ENA);
2148
2149				if (!dwc->u1u2)
2150					dwc->u1u2 = reg & u1u2;
2151
2152				reg &= ~u1u2;
2153
2154				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2155				break;
2156			default:
2157				/* do nothing */
2158				break;
2159			}
2160		}
2161	}
2162
2163	dwc->link_state = next;
2164
2165	dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
2166}
2167
2168static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2169		const struct dwc3_event_devt *event)
2170{
2171	switch (event->type) {
2172	case DWC3_DEVICE_EVENT_DISCONNECT:
2173		dwc3_gadget_disconnect_interrupt(dwc);
2174		break;
2175	case DWC3_DEVICE_EVENT_RESET:
2176		dwc3_gadget_reset_interrupt(dwc);
2177		break;
2178	case DWC3_DEVICE_EVENT_CONNECT_DONE:
2179		dwc3_gadget_conndone_interrupt(dwc);
2180		break;
2181	case DWC3_DEVICE_EVENT_WAKEUP:
2182		dwc3_gadget_wakeup_interrupt(dwc);
2183		break;
2184	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2185		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2186		break;
2187	case DWC3_DEVICE_EVENT_EOPF:
2188		dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2189		break;
2190	case DWC3_DEVICE_EVENT_SOF:
2191		dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2192		break;
2193	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2194		dev_vdbg(dwc->dev, "Erratic Error\n");
2195		break;
2196	case DWC3_DEVICE_EVENT_CMD_CMPL:
2197		dev_vdbg(dwc->dev, "Command Complete\n");
2198		break;
2199	case DWC3_DEVICE_EVENT_OVERFLOW:
2200		dev_vdbg(dwc->dev, "Overflow\n");
2201		break;
2202	default:
2203		dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2204	}
2205}
2206
2207static void dwc3_process_event_entry(struct dwc3 *dwc,
2208		const union dwc3_event *event)
2209{
2210	/* Endpoint IRQ, handle it and return early */
2211	if (event->type.is_devspec == 0) {
2212		/* depevt */
2213		return dwc3_endpoint_interrupt(dwc, &event->depevt);
2214	}
2215
2216	switch (event->type.type) {
2217	case DWC3_EVENT_TYPE_DEV:
2218		dwc3_gadget_interrupt(dwc, &event->devt);
2219		break;
2220	/* REVISIT what to do with Carkit and I2C events ? */
2221	default:
2222		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2223	}
2224}
2225
2226static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2227{
2228	struct dwc3_event_buffer *evt;
2229	int left;
2230	u32 count;
2231
2232	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2233	count &= DWC3_GEVNTCOUNT_MASK;
2234	if (!count)
2235		return IRQ_NONE;
2236
2237	evt = dwc->ev_buffs[buf];
2238	left = count;
2239
2240	while (left > 0) {
2241		union dwc3_event event;
2242
2243		event.raw = *(u32 *) (evt->buf + evt->lpos);
2244
2245		dwc3_process_event_entry(dwc, &event);
2246		/*
2247		 * XXX we wrap around correctly to the next entry as almost all
2248		 * entries are 4 bytes in size. There is one entry which has 12
2249		 * bytes which is a regular entry followed by 8 bytes data. ATM
2250		 * I don't know how things are organized if were get next to the
2251		 * a boundary so I worry about that once we try to handle that.
2252		 */
2253		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2254		left -= 4;
2255
2256		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2257	}
2258
2259	return IRQ_HANDLED;
2260}
2261
2262static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2263{
2264	struct dwc3			*dwc = _dwc;
2265	int				i;
2266	irqreturn_t			ret = IRQ_NONE;
2267
2268	spin_lock(&dwc->lock);
2269
2270	for (i = 0; i < dwc->num_event_buffers; i++) {
2271		irqreturn_t status;
2272
2273		status = dwc3_process_event_buf(dwc, i);
2274		if (status == IRQ_HANDLED)
2275			ret = status;
2276	}
2277
2278	spin_unlock(&dwc->lock);
2279
2280	return ret;
2281}
2282
2283/**
2284 * dwc3_gadget_init - Initializes gadget related registers
2285 * @dwc: pointer to our controller context structure
2286 *
2287 * Returns 0 on success otherwise negative errno.
2288 */
2289int __devinit dwc3_gadget_init(struct dwc3 *dwc)
2290{
2291	u32					reg;
2292	int					ret;
2293	int					irq;
2294
2295	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2296			&dwc->ctrl_req_addr, GFP_KERNEL);
2297	if (!dwc->ctrl_req) {
2298		dev_err(dwc->dev, "failed to allocate ctrl request\n");
2299		ret = -ENOMEM;
2300		goto err0;
2301	}
2302
2303	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2304			&dwc->ep0_trb_addr, GFP_KERNEL);
2305	if (!dwc->ep0_trb) {
2306		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2307		ret = -ENOMEM;
2308		goto err1;
2309	}
2310
2311	dwc->setup_buf = kzalloc(sizeof(*dwc->setup_buf) * 2,
2312			GFP_KERNEL);
2313	if (!dwc->setup_buf) {
2314		dev_err(dwc->dev, "failed to allocate setup buffer\n");
2315		ret = -ENOMEM;
2316		goto err2;
2317	}
2318
2319	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2320			512, &dwc->ep0_bounce_addr, GFP_KERNEL);
2321	if (!dwc->ep0_bounce) {
2322		dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2323		ret = -ENOMEM;
2324		goto err3;
2325	}
2326
2327	dev_set_name(&dwc->gadget.dev, "gadget");
2328
2329	dwc->gadget.ops			= &dwc3_gadget_ops;
2330	dwc->gadget.max_speed		= USB_SPEED_SUPER;
2331	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
2332	dwc->gadget.dev.parent		= dwc->dev;
2333	dwc->gadget.sg_supported	= true;
2334
2335	dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
2336
2337	dwc->gadget.dev.dma_parms	= dwc->dev->dma_parms;
2338	dwc->gadget.dev.dma_mask	= dwc->dev->dma_mask;
2339	dwc->gadget.dev.release		= dwc3_gadget_release;
2340	dwc->gadget.name		= "dwc3-gadget";
2341
2342	/*
2343	 * REVISIT: Here we should clear all pending IRQs to be
2344	 * sure we're starting from a well known location.
2345	 */
2346
2347	ret = dwc3_gadget_init_endpoints(dwc);
2348	if (ret)
2349		goto err4;
2350
2351	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2352
2353	ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
2354			"dwc3", dwc);
2355	if (ret) {
2356		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2357				irq, ret);
2358		goto err5;
2359	}
2360
2361	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2362	reg |= DWC3_DCFG_LPM_CAP;
2363	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2364
2365	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2366	reg |= DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA;
2367	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2368
2369	/* Enable all but Start and End of Frame IRQs */
2370	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
2371			DWC3_DEVTEN_EVNTOVERFLOWEN |
2372			DWC3_DEVTEN_CMDCMPLTEN |
2373			DWC3_DEVTEN_ERRTICERREN |
2374			DWC3_DEVTEN_WKUPEVTEN |
2375			DWC3_DEVTEN_ULSTCNGEN |
2376			DWC3_DEVTEN_CONNECTDONEEN |
2377			DWC3_DEVTEN_USBRSTEN |
2378			DWC3_DEVTEN_DISCONNEVTEN);
2379	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2380
2381	ret = device_register(&dwc->gadget.dev);
2382	if (ret) {
2383		dev_err(dwc->dev, "failed to register gadget device\n");
2384		put_device(&dwc->gadget.dev);
2385		goto err6;
2386	}
2387
2388	ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2389	if (ret) {
2390		dev_err(dwc->dev, "failed to register udc\n");
2391		goto err7;
2392	}
2393
2394	return 0;
2395
2396err7:
2397	device_unregister(&dwc->gadget.dev);
2398
2399err6:
2400	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2401	free_irq(irq, dwc);
2402
2403err5:
2404	dwc3_gadget_free_endpoints(dwc);
2405
2406err4:
2407	dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2408			dwc->ep0_bounce_addr);
2409
2410err3:
2411	kfree(dwc->setup_buf);
2412
2413err2:
2414	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2415			dwc->ep0_trb, dwc->ep0_trb_addr);
2416
2417err1:
2418	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2419			dwc->ctrl_req, dwc->ctrl_req_addr);
2420
2421err0:
2422	return ret;
2423}
2424
2425void dwc3_gadget_exit(struct dwc3 *dwc)
2426{
2427	int			irq;
2428
2429	usb_del_gadget_udc(&dwc->gadget);
2430	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2431
2432	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2433	free_irq(irq, dwc);
2434
2435	dwc3_gadget_free_endpoints(dwc);
2436
2437	dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2438			dwc->ep0_bounce_addr);
2439
2440	kfree(dwc->setup_buf);
2441
2442	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2443			dwc->ep0_trb, dwc->ep0_trb_addr);
2444
2445	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2446			dwc->ctrl_req, dwc->ctrl_req_addr);
2447
2448	device_unregister(&dwc->gadget.dev);
2449}
2450