gadget.c revision 27a78d6a283d6782438f72306746afe4bf44c215
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions, and the following disclaimer,
14 *    without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 *    to endorse or promote products derived from this software without
20 *    specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <linux/kernel.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/interrupt.h>
46#include <linux/io.h>
47#include <linux/list.h>
48#include <linux/dma-mapping.h>
49
50#include <linux/usb/ch9.h>
51#include <linux/usb/gadget.h>
52
53#include "core.h"
54#include "gadget.h"
55#include "io.h"
56
57#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
58
59/**
60 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
61 * @dwc: pointer to our context structure
62 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
63 *
64 * Caller should take care of locking. This function will
65 * return 0 on success or -EINVAL if wrong Test Selector
66 * is passed
67 */
68int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
69{
70	u32		reg;
71
72	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
73	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
74
75	switch (mode) {
76	case TEST_J:
77	case TEST_K:
78	case TEST_SE0_NAK:
79	case TEST_PACKET:
80	case TEST_FORCE_EN:
81		reg |= mode << 1;
82		break;
83	default:
84		return -EINVAL;
85	}
86
87	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
88
89	return 0;
90}
91
92/**
93 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
94 * @dwc: pointer to our context structure
95 * @state: the state to put link into
96 *
97 * Caller should take care of locking. This function will
98 * return 0 on success or -EINVAL.
99 */
100int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
101{
102	int		retries = 100;
103	u32		reg;
104
105	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
106	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
107
108	/* set requested state */
109	reg |= DWC3_DCTL_ULSTCHNGREQ(state);
110	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
111
112	/* wait for a change in DSTS */
113	while (--retries) {
114		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
115
116		/* in HS, means ON */
117		if (DWC3_DSTS_USBLNKST(reg) == state)
118			return 0;
119
120		udelay(500);
121	}
122
123	dev_vdbg(dwc->dev, "link state change request timed out\n");
124
125	return -ETIMEDOUT;
126}
127
128/**
129 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
130 * @dwc: pointer to our context structure
131 *
132 * This function will a best effort FIFO allocation in order
133 * to improve FIFO usage and throughput, while still allowing
134 * us to enable as many endpoints as possible.
135 *
136 * Keep in mind that this operation will be highly dependent
137 * on the configured size for RAM1 - which contains TxFifo -,
138 * the amount of endpoints enabled on coreConsultant tool, and
139 * the width of the Master Bus.
140 *
141 * In the ideal world, we would always be able to satisfy the
142 * following equation:
143 *
144 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
145 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
146 *
147 * Unfortunately, due to many variables that's not always the case.
148 */
149int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
150{
151	int		last_fifo_depth = 0;
152	int		ram1_depth;
153	int		fifo_size;
154	int		mdwidth;
155	int		num;
156
157	if (!dwc->needs_fifo_resize)
158		return 0;
159
160	ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
161	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
162
163	/* MDWIDTH is represented in bits, we need it in bytes */
164	mdwidth >>= 3;
165
166	/*
167	 * FIXME For now we will only allocate 1 wMaxPacketSize space
168	 * for each enabled endpoint, later patches will come to
169	 * improve this algorithm so that we better use the internal
170	 * FIFO space
171	 */
172	for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
173		struct dwc3_ep	*dep = dwc->eps[num];
174		int		fifo_number = dep->number >> 1;
175		int		mult = 1;
176		int		tmp;
177
178		if (!(dep->number & 1))
179			continue;
180
181		if (!(dep->flags & DWC3_EP_ENABLED))
182			continue;
183
184		if (usb_endpoint_xfer_bulk(dep->desc)
185				|| usb_endpoint_xfer_isoc(dep->desc))
186			mult = 3;
187
188		/*
189		 * REVISIT: the following assumes we will always have enough
190		 * space available on the FIFO RAM for all possible use cases.
191		 * Make sure that's true somehow and change FIFO allocation
192		 * accordingly.
193		 *
194		 * If we have Bulk or Isochronous endpoints, we want
195		 * them to be able to be very, very fast. So we're giving
196		 * those endpoints a fifo_size which is enough for 3 full
197		 * packets
198		 */
199		tmp = mult * (dep->endpoint.maxpacket + mdwidth);
200		tmp += mdwidth;
201
202		fifo_size = DIV_ROUND_UP(tmp, mdwidth);
203
204		fifo_size |= (last_fifo_depth << 16);
205
206		dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
207				dep->name, last_fifo_depth, fifo_size & 0xffff);
208
209		dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
210				fifo_size);
211
212		last_fifo_depth += (fifo_size & 0xffff);
213	}
214
215	return 0;
216}
217
218void dwc3_map_buffer_to_dma(struct dwc3_request *req)
219{
220	struct dwc3			*dwc = req->dep->dwc;
221
222	if (req->request.length == 0) {
223		/* req->request.dma = dwc->setup_buf_addr; */
224		return;
225	}
226
227	if (req->request.num_sgs) {
228		int	mapped;
229
230		mapped = dma_map_sg(dwc->dev, req->request.sg,
231				req->request.num_sgs,
232				req->direction ? DMA_TO_DEVICE
233				: DMA_FROM_DEVICE);
234		if (mapped < 0) {
235			dev_err(dwc->dev, "failed to map SGs\n");
236			return;
237		}
238
239		req->request.num_mapped_sgs = mapped;
240		return;
241	}
242
243	if (req->request.dma == DMA_ADDR_INVALID) {
244		req->request.dma = dma_map_single(dwc->dev, req->request.buf,
245				req->request.length, req->direction
246				? DMA_TO_DEVICE : DMA_FROM_DEVICE);
247		req->mapped = true;
248	}
249}
250
251void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
252{
253	struct dwc3			*dwc = req->dep->dwc;
254
255	if (req->request.length == 0) {
256		req->request.dma = DMA_ADDR_INVALID;
257		return;
258	}
259
260	if (req->request.num_mapped_sgs) {
261		req->request.dma = DMA_ADDR_INVALID;
262		dma_unmap_sg(dwc->dev, req->request.sg,
263				req->request.num_mapped_sgs,
264				req->direction ? DMA_TO_DEVICE
265				: DMA_FROM_DEVICE);
266
267		req->request.num_mapped_sgs = 0;
268		return;
269	}
270
271	if (req->mapped) {
272		dma_unmap_single(dwc->dev, req->request.dma,
273				req->request.length, req->direction
274				? DMA_TO_DEVICE : DMA_FROM_DEVICE);
275		req->mapped = 0;
276		req->request.dma = DMA_ADDR_INVALID;
277	}
278}
279
280void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
281		int status)
282{
283	struct dwc3			*dwc = dep->dwc;
284
285	if (req->queued) {
286		if (req->request.num_mapped_sgs)
287			dep->busy_slot += req->request.num_mapped_sgs;
288		else
289			dep->busy_slot++;
290
291		/*
292		 * Skip LINK TRB. We can't use req->trb and check for
293		 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
294		 * completed (not the LINK TRB).
295		 */
296		if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
297				usb_endpoint_xfer_isoc(dep->desc))
298			dep->busy_slot++;
299	}
300	list_del(&req->list);
301	req->trb = NULL;
302
303	if (req->request.status == -EINPROGRESS)
304		req->request.status = status;
305
306	dwc3_unmap_buffer_from_dma(req);
307
308	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
309			req, dep->name, req->request.actual,
310			req->request.length, status);
311
312	spin_unlock(&dwc->lock);
313	req->request.complete(&req->dep->endpoint, &req->request);
314	spin_lock(&dwc->lock);
315}
316
317static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
318{
319	switch (cmd) {
320	case DWC3_DEPCMD_DEPSTARTCFG:
321		return "Start New Configuration";
322	case DWC3_DEPCMD_ENDTRANSFER:
323		return "End Transfer";
324	case DWC3_DEPCMD_UPDATETRANSFER:
325		return "Update Transfer";
326	case DWC3_DEPCMD_STARTTRANSFER:
327		return "Start Transfer";
328	case DWC3_DEPCMD_CLEARSTALL:
329		return "Clear Stall";
330	case DWC3_DEPCMD_SETSTALL:
331		return "Set Stall";
332	case DWC3_DEPCMD_GETSEQNUMBER:
333		return "Get Data Sequence Number";
334	case DWC3_DEPCMD_SETTRANSFRESOURCE:
335		return "Set Endpoint Transfer Resource";
336	case DWC3_DEPCMD_SETEPCONFIG:
337		return "Set Endpoint Configuration";
338	default:
339		return "UNKNOWN command";
340	}
341}
342
343int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
344		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
345{
346	struct dwc3_ep		*dep = dwc->eps[ep];
347	u32			timeout = 500;
348	u32			reg;
349
350	dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
351			dep->name,
352			dwc3_gadget_ep_cmd_string(cmd), params->param0,
353			params->param1, params->param2);
354
355	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
356	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
357	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
358
359	dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
360	do {
361		reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
362		if (!(reg & DWC3_DEPCMD_CMDACT)) {
363			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
364					DWC3_DEPCMD_STATUS(reg));
365			return 0;
366		}
367
368		/*
369		 * We can't sleep here, because it is also called from
370		 * interrupt context.
371		 */
372		timeout--;
373		if (!timeout)
374			return -ETIMEDOUT;
375
376		udelay(1);
377	} while (1);
378}
379
380static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
381		struct dwc3_trb *trb)
382{
383	u32		offset = (char *) trb - (char *) dep->trb_pool;
384
385	return dep->trb_pool_dma + offset;
386}
387
388static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
389{
390	struct dwc3		*dwc = dep->dwc;
391
392	if (dep->trb_pool)
393		return 0;
394
395	if (dep->number == 0 || dep->number == 1)
396		return 0;
397
398	dep->trb_pool = dma_alloc_coherent(dwc->dev,
399			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
400			&dep->trb_pool_dma, GFP_KERNEL);
401	if (!dep->trb_pool) {
402		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
403				dep->name);
404		return -ENOMEM;
405	}
406
407	return 0;
408}
409
410static void dwc3_free_trb_pool(struct dwc3_ep *dep)
411{
412	struct dwc3		*dwc = dep->dwc;
413
414	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
415			dep->trb_pool, dep->trb_pool_dma);
416
417	dep->trb_pool = NULL;
418	dep->trb_pool_dma = 0;
419}
420
421static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
422{
423	struct dwc3_gadget_ep_cmd_params params;
424	u32			cmd;
425
426	memset(&params, 0x00, sizeof(params));
427
428	if (dep->number != 1) {
429		cmd = DWC3_DEPCMD_DEPSTARTCFG;
430		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
431		if (dep->number > 1) {
432			if (dwc->start_config_issued)
433				return 0;
434			dwc->start_config_issued = true;
435			cmd |= DWC3_DEPCMD_PARAM(2);
436		}
437
438		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
439	}
440
441	return 0;
442}
443
444static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
445		const struct usb_endpoint_descriptor *desc,
446		const struct usb_ss_ep_comp_descriptor *comp_desc)
447{
448	struct dwc3_gadget_ep_cmd_params params;
449
450	memset(&params, 0x00, sizeof(params));
451
452	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
453		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
454		| DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
455
456	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
457		| DWC3_DEPCFG_XFER_NOT_READY_EN;
458
459	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
460		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
461			| DWC3_DEPCFG_STREAM_EVENT_EN;
462		dep->stream_capable = true;
463	}
464
465	if (usb_endpoint_xfer_isoc(desc))
466		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
467
468	/*
469	 * We are doing 1:1 mapping for endpoints, meaning
470	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
471	 * so on. We consider the direction bit as part of the physical
472	 * endpoint number. So USB endpoint 0x81 is 0x03.
473	 */
474	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
475
476	/*
477	 * We must use the lower 16 TX FIFOs even though
478	 * HW might have more
479	 */
480	if (dep->direction)
481		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
482
483	if (desc->bInterval) {
484		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
485		dep->interval = 1 << (desc->bInterval - 1);
486	}
487
488	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
489			DWC3_DEPCMD_SETEPCONFIG, &params);
490}
491
492static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
493{
494	struct dwc3_gadget_ep_cmd_params params;
495
496	memset(&params, 0x00, sizeof(params));
497
498	params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
499
500	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
501			DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
502}
503
504/**
505 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
506 * @dep: endpoint to be initialized
507 * @desc: USB Endpoint Descriptor
508 *
509 * Caller should take care of locking
510 */
511static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
512		const struct usb_endpoint_descriptor *desc,
513		const struct usb_ss_ep_comp_descriptor *comp_desc)
514{
515	struct dwc3		*dwc = dep->dwc;
516	u32			reg;
517	int			ret = -ENOMEM;
518
519	if (!(dep->flags & DWC3_EP_ENABLED)) {
520		ret = dwc3_gadget_start_config(dwc, dep);
521		if (ret)
522			return ret;
523	}
524
525	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc);
526	if (ret)
527		return ret;
528
529	if (!(dep->flags & DWC3_EP_ENABLED)) {
530		struct dwc3_trb	*trb_st_hw;
531		struct dwc3_trb	*trb_link;
532
533		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
534		if (ret)
535			return ret;
536
537		dep->desc = desc;
538		dep->comp_desc = comp_desc;
539		dep->type = usb_endpoint_type(desc);
540		dep->flags |= DWC3_EP_ENABLED;
541
542		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
543		reg |= DWC3_DALEPENA_EP(dep->number);
544		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
545
546		if (!usb_endpoint_xfer_isoc(desc))
547			return 0;
548
549		memset(&trb_link, 0, sizeof(trb_link));
550
551		/* Link TRB for ISOC. The HWO bit is never reset */
552		trb_st_hw = &dep->trb_pool[0];
553
554		trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
555
556		trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
557		trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
558		trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
559		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
560	}
561
562	return 0;
563}
564
565static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
566static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
567{
568	struct dwc3_request		*req;
569
570	if (!list_empty(&dep->req_queued))
571		dwc3_stop_active_transfer(dwc, dep->number);
572
573	while (!list_empty(&dep->request_list)) {
574		req = next_request(&dep->request_list);
575
576		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
577	}
578}
579
580/**
581 * __dwc3_gadget_ep_disable - Disables a HW endpoint
582 * @dep: the endpoint to disable
583 *
584 * This function also removes requests which are currently processed ny the
585 * hardware and those which are not yet scheduled.
586 * Caller should take care of locking.
587 */
588static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
589{
590	struct dwc3		*dwc = dep->dwc;
591	u32			reg;
592
593	dwc3_remove_requests(dwc, dep);
594
595	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
596	reg &= ~DWC3_DALEPENA_EP(dep->number);
597	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
598
599	dep->stream_capable = false;
600	dep->desc = NULL;
601	dep->comp_desc = NULL;
602	dep->type = 0;
603	dep->flags = 0;
604
605	return 0;
606}
607
608/* -------------------------------------------------------------------------- */
609
610static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
611		const struct usb_endpoint_descriptor *desc)
612{
613	return -EINVAL;
614}
615
616static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
617{
618	return -EINVAL;
619}
620
621/* -------------------------------------------------------------------------- */
622
623static int dwc3_gadget_ep_enable(struct usb_ep *ep,
624		const struct usb_endpoint_descriptor *desc)
625{
626	struct dwc3_ep			*dep;
627	struct dwc3			*dwc;
628	unsigned long			flags;
629	int				ret;
630
631	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
632		pr_debug("dwc3: invalid parameters\n");
633		return -EINVAL;
634	}
635
636	if (!desc->wMaxPacketSize) {
637		pr_debug("dwc3: missing wMaxPacketSize\n");
638		return -EINVAL;
639	}
640
641	dep = to_dwc3_ep(ep);
642	dwc = dep->dwc;
643
644	switch (usb_endpoint_type(desc)) {
645	case USB_ENDPOINT_XFER_CONTROL:
646		strlcat(dep->name, "-control", sizeof(dep->name));
647		break;
648	case USB_ENDPOINT_XFER_ISOC:
649		strlcat(dep->name, "-isoc", sizeof(dep->name));
650		break;
651	case USB_ENDPOINT_XFER_BULK:
652		strlcat(dep->name, "-bulk", sizeof(dep->name));
653		break;
654	case USB_ENDPOINT_XFER_INT:
655		strlcat(dep->name, "-int", sizeof(dep->name));
656		break;
657	default:
658		dev_err(dwc->dev, "invalid endpoint transfer type\n");
659	}
660
661	if (dep->flags & DWC3_EP_ENABLED) {
662		dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
663				dep->name);
664		return 0;
665	}
666
667	dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
668
669	spin_lock_irqsave(&dwc->lock, flags);
670	ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc);
671	spin_unlock_irqrestore(&dwc->lock, flags);
672
673	return ret;
674}
675
676static int dwc3_gadget_ep_disable(struct usb_ep *ep)
677{
678	struct dwc3_ep			*dep;
679	struct dwc3			*dwc;
680	unsigned long			flags;
681	int				ret;
682
683	if (!ep) {
684		pr_debug("dwc3: invalid parameters\n");
685		return -EINVAL;
686	}
687
688	dep = to_dwc3_ep(ep);
689	dwc = dep->dwc;
690
691	if (!(dep->flags & DWC3_EP_ENABLED)) {
692		dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
693				dep->name);
694		return 0;
695	}
696
697	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
698			dep->number >> 1,
699			(dep->number & 1) ? "in" : "out");
700
701	spin_lock_irqsave(&dwc->lock, flags);
702	ret = __dwc3_gadget_ep_disable(dep);
703	spin_unlock_irqrestore(&dwc->lock, flags);
704
705	return ret;
706}
707
708static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
709	gfp_t gfp_flags)
710{
711	struct dwc3_request		*req;
712	struct dwc3_ep			*dep = to_dwc3_ep(ep);
713	struct dwc3			*dwc = dep->dwc;
714
715	req = kzalloc(sizeof(*req), gfp_flags);
716	if (!req) {
717		dev_err(dwc->dev, "not enough memory\n");
718		return NULL;
719	}
720
721	req->epnum	= dep->number;
722	req->dep	= dep;
723	req->request.dma = DMA_ADDR_INVALID;
724
725	return &req->request;
726}
727
728static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
729		struct usb_request *request)
730{
731	struct dwc3_request		*req = to_dwc3_request(request);
732
733	kfree(req);
734}
735
736/**
737 * dwc3_prepare_one_trb - setup one TRB from one request
738 * @dep: endpoint for which this request is prepared
739 * @req: dwc3_request pointer
740 */
741static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
742		struct dwc3_request *req, dma_addr_t dma,
743		unsigned length, unsigned last, unsigned chain)
744{
745	struct dwc3		*dwc = dep->dwc;
746	struct dwc3_trb		*trb;
747
748	unsigned int		cur_slot;
749
750	dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
751			dep->name, req, (unsigned long long) dma,
752			length, last ? " last" : "",
753			chain ? " chain" : "");
754
755	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
756	cur_slot = dep->free_slot;
757	dep->free_slot++;
758
759	/* Skip the LINK-TRB on ISOC */
760	if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
761			usb_endpoint_xfer_isoc(dep->desc))
762		return;
763
764	if (!req->trb) {
765		dwc3_gadget_move_request_queued(req);
766		req->trb = trb;
767		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
768	}
769
770	trb->size = DWC3_TRB_SIZE_LENGTH(length);
771	trb->bpl = lower_32_bits(dma);
772	trb->bph = upper_32_bits(dma);
773
774	switch (usb_endpoint_type(dep->desc)) {
775	case USB_ENDPOINT_XFER_CONTROL:
776		trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
777		break;
778
779	case USB_ENDPOINT_XFER_ISOC:
780		trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
781
782		/* IOC every DWC3_TRB_NUM / 4 so we can refill */
783		if (!(cur_slot % (DWC3_TRB_NUM / 4)))
784			trb->ctrl |= DWC3_TRB_CTRL_IOC;
785		break;
786
787	case USB_ENDPOINT_XFER_BULK:
788	case USB_ENDPOINT_XFER_INT:
789		trb->ctrl = DWC3_TRBCTL_NORMAL;
790		break;
791	default:
792		/*
793		 * This is only possible with faulty memory because we
794		 * checked it already :)
795		 */
796		BUG();
797	}
798
799	if (usb_endpoint_xfer_isoc(dep->desc)) {
800		trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
801		trb->ctrl |= DWC3_TRB_CTRL_CSP;
802	} else {
803		if (chain)
804			trb->ctrl |= DWC3_TRB_CTRL_CHN;
805
806		if (last)
807			trb->ctrl |= DWC3_TRB_CTRL_LST;
808	}
809
810	if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
811		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
812
813	trb->ctrl |= DWC3_TRB_CTRL_HWO;
814}
815
816/*
817 * dwc3_prepare_trbs - setup TRBs from requests
818 * @dep: endpoint for which requests are being prepared
819 * @starting: true if the endpoint is idle and no requests are queued.
820 *
821 * The function goes through the requests list and sets up TRBs for the
822 * transfers. The function returns once there are no more TRBs available or
823 * it runs out of requests.
824 */
825static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
826{
827	struct dwc3_request	*req, *n;
828	u32			trbs_left;
829	u32			max;
830	unsigned int		last_one = 0;
831
832	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
833
834	/* the first request must not be queued */
835	trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
836
837	/* Can't wrap around on a non-isoc EP since there's no link TRB */
838	if (!usb_endpoint_xfer_isoc(dep->desc)) {
839		max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
840		if (trbs_left > max)
841			trbs_left = max;
842	}
843
844	/*
845	 * If busy & slot are equal than it is either full or empty. If we are
846	 * starting to process requests then we are empty. Otherwise we are
847	 * full and don't do anything
848	 */
849	if (!trbs_left) {
850		if (!starting)
851			return;
852		trbs_left = DWC3_TRB_NUM;
853		/*
854		 * In case we start from scratch, we queue the ISOC requests
855		 * starting from slot 1. This is done because we use ring
856		 * buffer and have no LST bit to stop us. Instead, we place
857		 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
858		 * after the first request so we start at slot 1 and have
859		 * 7 requests proceed before we hit the first IOC.
860		 * Other transfer types don't use the ring buffer and are
861		 * processed from the first TRB until the last one. Since we
862		 * don't wrap around we have to start at the beginning.
863		 */
864		if (usb_endpoint_xfer_isoc(dep->desc)) {
865			dep->busy_slot = 1;
866			dep->free_slot = 1;
867		} else {
868			dep->busy_slot = 0;
869			dep->free_slot = 0;
870		}
871	}
872
873	/* The last TRB is a link TRB, not used for xfer */
874	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
875		return;
876
877	list_for_each_entry_safe(req, n, &dep->request_list, list) {
878		unsigned	length;
879		dma_addr_t	dma;
880
881		if (req->request.num_mapped_sgs > 0) {
882			struct usb_request *request = &req->request;
883			struct scatterlist *sg = request->sg;
884			struct scatterlist *s;
885			int		i;
886
887			for_each_sg(sg, s, request->num_mapped_sgs, i) {
888				unsigned chain = true;
889
890				length = sg_dma_len(s);
891				dma = sg_dma_address(s);
892
893				if (i == (request->num_mapped_sgs - 1) ||
894						sg_is_last(s)) {
895					last_one = true;
896					chain = false;
897				}
898
899				trbs_left--;
900				if (!trbs_left)
901					last_one = true;
902
903				if (last_one)
904					chain = false;
905
906				dwc3_prepare_one_trb(dep, req, dma, length,
907						last_one, chain);
908
909				if (last_one)
910					break;
911			}
912		} else {
913			dma = req->request.dma;
914			length = req->request.length;
915			trbs_left--;
916
917			if (!trbs_left)
918				last_one = 1;
919
920			/* Is this the last request? */
921			if (list_is_last(&req->list, &dep->request_list))
922				last_one = 1;
923
924			dwc3_prepare_one_trb(dep, req, dma, length,
925					last_one, false);
926
927			if (last_one)
928				break;
929		}
930	}
931}
932
933static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
934		int start_new)
935{
936	struct dwc3_gadget_ep_cmd_params params;
937	struct dwc3_request		*req;
938	struct dwc3			*dwc = dep->dwc;
939	int				ret;
940	u32				cmd;
941
942	if (start_new && (dep->flags & DWC3_EP_BUSY)) {
943		dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
944		return -EBUSY;
945	}
946	dep->flags &= ~DWC3_EP_PENDING_REQUEST;
947
948	/*
949	 * If we are getting here after a short-out-packet we don't enqueue any
950	 * new requests as we try to set the IOC bit only on the last request.
951	 */
952	if (start_new) {
953		if (list_empty(&dep->req_queued))
954			dwc3_prepare_trbs(dep, start_new);
955
956		/* req points to the first request which will be sent */
957		req = next_request(&dep->req_queued);
958	} else {
959		dwc3_prepare_trbs(dep, start_new);
960
961		/*
962		 * req points to the first request where HWO changed from 0 to 1
963		 */
964		req = next_request(&dep->req_queued);
965	}
966	if (!req) {
967		dep->flags |= DWC3_EP_PENDING_REQUEST;
968		return 0;
969	}
970
971	memset(&params, 0, sizeof(params));
972	params.param0 = upper_32_bits(req->trb_dma);
973	params.param1 = lower_32_bits(req->trb_dma);
974
975	if (start_new)
976		cmd = DWC3_DEPCMD_STARTTRANSFER;
977	else
978		cmd = DWC3_DEPCMD_UPDATETRANSFER;
979
980	cmd |= DWC3_DEPCMD_PARAM(cmd_param);
981	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
982	if (ret < 0) {
983		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
984
985		/*
986		 * FIXME we need to iterate over the list of requests
987		 * here and stop, unmap, free and del each of the linked
988		 * requests instead of what we do now.
989		 */
990		dwc3_unmap_buffer_from_dma(req);
991		list_del(&req->list);
992		return ret;
993	}
994
995	dep->flags |= DWC3_EP_BUSY;
996	dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
997			dep->number);
998
999	WARN_ON_ONCE(!dep->res_trans_idx);
1000
1001	return 0;
1002}
1003
1004static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1005{
1006	req->request.actual	= 0;
1007	req->request.status	= -EINPROGRESS;
1008	req->direction		= dep->direction;
1009	req->epnum		= dep->number;
1010
1011	/*
1012	 * We only add to our list of requests now and
1013	 * start consuming the list once we get XferNotReady
1014	 * IRQ.
1015	 *
1016	 * That way, we avoid doing anything that we don't need
1017	 * to do now and defer it until the point we receive a
1018	 * particular token from the Host side.
1019	 *
1020	 * This will also avoid Host cancelling URBs due to too
1021	 * many NAKs.
1022	 */
1023	dwc3_map_buffer_to_dma(req);
1024	list_add_tail(&req->list, &dep->request_list);
1025
1026	/*
1027	 * There is one special case: XferNotReady with
1028	 * empty list of requests. We need to kick the
1029	 * transfer here in that situation, otherwise
1030	 * we will be NAKing forever.
1031	 *
1032	 * If we get XferNotReady before gadget driver
1033	 * has a chance to queue a request, we will ACK
1034	 * the IRQ but won't be able to receive the data
1035	 * until the next request is queued. The following
1036	 * code is handling exactly that.
1037	 */
1038	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1039		int ret;
1040		int start_trans;
1041
1042		start_trans = 1;
1043		if (usb_endpoint_xfer_isoc(dep->desc) &&
1044				(dep->flags & DWC3_EP_BUSY))
1045			start_trans = 0;
1046
1047		ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
1048		if (ret && ret != -EBUSY) {
1049			struct dwc3	*dwc = dep->dwc;
1050
1051			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1052					dep->name);
1053		}
1054	};
1055
1056	return 0;
1057}
1058
1059static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1060	gfp_t gfp_flags)
1061{
1062	struct dwc3_request		*req = to_dwc3_request(request);
1063	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1064	struct dwc3			*dwc = dep->dwc;
1065
1066	unsigned long			flags;
1067
1068	int				ret;
1069
1070	if (!dep->desc) {
1071		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1072				request, ep->name);
1073		return -ESHUTDOWN;
1074	}
1075
1076	dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1077			request, ep->name, request->length);
1078
1079	spin_lock_irqsave(&dwc->lock, flags);
1080	ret = __dwc3_gadget_ep_queue(dep, req);
1081	spin_unlock_irqrestore(&dwc->lock, flags);
1082
1083	return ret;
1084}
1085
1086static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1087		struct usb_request *request)
1088{
1089	struct dwc3_request		*req = to_dwc3_request(request);
1090	struct dwc3_request		*r = NULL;
1091
1092	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1093	struct dwc3			*dwc = dep->dwc;
1094
1095	unsigned long			flags;
1096	int				ret = 0;
1097
1098	spin_lock_irqsave(&dwc->lock, flags);
1099
1100	list_for_each_entry(r, &dep->request_list, list) {
1101		if (r == req)
1102			break;
1103	}
1104
1105	if (r != req) {
1106		list_for_each_entry(r, &dep->req_queued, list) {
1107			if (r == req)
1108				break;
1109		}
1110		if (r == req) {
1111			/* wait until it is processed */
1112			dwc3_stop_active_transfer(dwc, dep->number);
1113			goto out0;
1114		}
1115		dev_err(dwc->dev, "request %p was not queued to %s\n",
1116				request, ep->name);
1117		ret = -EINVAL;
1118		goto out0;
1119	}
1120
1121	/* giveback the request */
1122	dwc3_gadget_giveback(dep, req, -ECONNRESET);
1123
1124out0:
1125	spin_unlock_irqrestore(&dwc->lock, flags);
1126
1127	return ret;
1128}
1129
1130int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1131{
1132	struct dwc3_gadget_ep_cmd_params	params;
1133	struct dwc3				*dwc = dep->dwc;
1134	int					ret;
1135
1136	memset(&params, 0x00, sizeof(params));
1137
1138	if (value) {
1139		if (dep->number == 0 || dep->number == 1) {
1140			/*
1141			 * Whenever EP0 is stalled, we will restart
1142			 * the state machine, thus moving back to
1143			 * Setup Phase
1144			 */
1145			dwc->ep0state = EP0_SETUP_PHASE;
1146		}
1147
1148		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1149			DWC3_DEPCMD_SETSTALL, &params);
1150		if (ret)
1151			dev_err(dwc->dev, "failed to %s STALL on %s\n",
1152					value ? "set" : "clear",
1153					dep->name);
1154		else
1155			dep->flags |= DWC3_EP_STALL;
1156	} else {
1157		if (dep->flags & DWC3_EP_WEDGE)
1158			return 0;
1159
1160		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1161			DWC3_DEPCMD_CLEARSTALL, &params);
1162		if (ret)
1163			dev_err(dwc->dev, "failed to %s STALL on %s\n",
1164					value ? "set" : "clear",
1165					dep->name);
1166		else
1167			dep->flags &= ~DWC3_EP_STALL;
1168	}
1169
1170	return ret;
1171}
1172
1173static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1174{
1175	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1176	struct dwc3			*dwc = dep->dwc;
1177
1178	unsigned long			flags;
1179
1180	int				ret;
1181
1182	spin_lock_irqsave(&dwc->lock, flags);
1183
1184	if (usb_endpoint_xfer_isoc(dep->desc)) {
1185		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1186		ret = -EINVAL;
1187		goto out;
1188	}
1189
1190	ret = __dwc3_gadget_ep_set_halt(dep, value);
1191out:
1192	spin_unlock_irqrestore(&dwc->lock, flags);
1193
1194	return ret;
1195}
1196
1197static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1198{
1199	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1200
1201	dep->flags |= DWC3_EP_WEDGE;
1202
1203	return dwc3_gadget_ep_set_halt(ep, 1);
1204}
1205
1206/* -------------------------------------------------------------------------- */
1207
1208static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1209	.bLength	= USB_DT_ENDPOINT_SIZE,
1210	.bDescriptorType = USB_DT_ENDPOINT,
1211	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
1212};
1213
1214static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1215	.enable		= dwc3_gadget_ep0_enable,
1216	.disable	= dwc3_gadget_ep0_disable,
1217	.alloc_request	= dwc3_gadget_ep_alloc_request,
1218	.free_request	= dwc3_gadget_ep_free_request,
1219	.queue		= dwc3_gadget_ep0_queue,
1220	.dequeue	= dwc3_gadget_ep_dequeue,
1221	.set_halt	= dwc3_gadget_ep_set_halt,
1222	.set_wedge	= dwc3_gadget_ep_set_wedge,
1223};
1224
1225static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1226	.enable		= dwc3_gadget_ep_enable,
1227	.disable	= dwc3_gadget_ep_disable,
1228	.alloc_request	= dwc3_gadget_ep_alloc_request,
1229	.free_request	= dwc3_gadget_ep_free_request,
1230	.queue		= dwc3_gadget_ep_queue,
1231	.dequeue	= dwc3_gadget_ep_dequeue,
1232	.set_halt	= dwc3_gadget_ep_set_halt,
1233	.set_wedge	= dwc3_gadget_ep_set_wedge,
1234};
1235
1236/* -------------------------------------------------------------------------- */
1237
1238static int dwc3_gadget_get_frame(struct usb_gadget *g)
1239{
1240	struct dwc3		*dwc = gadget_to_dwc(g);
1241	u32			reg;
1242
1243	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1244	return DWC3_DSTS_SOFFN(reg);
1245}
1246
1247static int dwc3_gadget_wakeup(struct usb_gadget *g)
1248{
1249	struct dwc3		*dwc = gadget_to_dwc(g);
1250
1251	unsigned long		timeout;
1252	unsigned long		flags;
1253
1254	u32			reg;
1255
1256	int			ret = 0;
1257
1258	u8			link_state;
1259	u8			speed;
1260
1261	spin_lock_irqsave(&dwc->lock, flags);
1262
1263	/*
1264	 * According to the Databook Remote wakeup request should
1265	 * be issued only when the device is in early suspend state.
1266	 *
1267	 * We can check that via USB Link State bits in DSTS register.
1268	 */
1269	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1270
1271	speed = reg & DWC3_DSTS_CONNECTSPD;
1272	if (speed == DWC3_DSTS_SUPERSPEED) {
1273		dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1274		ret = -EINVAL;
1275		goto out;
1276	}
1277
1278	link_state = DWC3_DSTS_USBLNKST(reg);
1279
1280	switch (link_state) {
1281	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
1282	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
1283		break;
1284	default:
1285		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1286				link_state);
1287		ret = -EINVAL;
1288		goto out;
1289	}
1290
1291	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1292	if (ret < 0) {
1293		dev_err(dwc->dev, "failed to put link in Recovery\n");
1294		goto out;
1295	}
1296
1297	/* write zeroes to Link Change Request */
1298	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1299	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1300
1301	/* poll until Link State changes to ON */
1302	timeout = jiffies + msecs_to_jiffies(100);
1303
1304	while (!time_after(jiffies, timeout)) {
1305		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1306
1307		/* in HS, means ON */
1308		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1309			break;
1310	}
1311
1312	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1313		dev_err(dwc->dev, "failed to send remote wakeup\n");
1314		ret = -EINVAL;
1315	}
1316
1317out:
1318	spin_unlock_irqrestore(&dwc->lock, flags);
1319
1320	return ret;
1321}
1322
1323static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1324		int is_selfpowered)
1325{
1326	struct dwc3		*dwc = gadget_to_dwc(g);
1327
1328	dwc->is_selfpowered = !!is_selfpowered;
1329
1330	return 0;
1331}
1332
1333static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1334{
1335	u32			reg;
1336	u32			timeout = 500;
1337
1338	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1339	if (is_on) {
1340		reg &= ~DWC3_DCTL_TRGTULST_MASK;
1341		reg |= (DWC3_DCTL_RUN_STOP
1342				| DWC3_DCTL_TRGTULST_RX_DET);
1343	} else {
1344		reg &= ~DWC3_DCTL_RUN_STOP;
1345	}
1346
1347	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1348
1349	do {
1350		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1351		if (is_on) {
1352			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1353				break;
1354		} else {
1355			if (reg & DWC3_DSTS_DEVCTRLHLT)
1356				break;
1357		}
1358		timeout--;
1359		if (!timeout)
1360			break;
1361		udelay(1);
1362	} while (1);
1363
1364	dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1365			dwc->gadget_driver
1366			? dwc->gadget_driver->function : "no-function",
1367			is_on ? "connect" : "disconnect");
1368}
1369
1370static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1371{
1372	struct dwc3		*dwc = gadget_to_dwc(g);
1373	unsigned long		flags;
1374
1375	is_on = !!is_on;
1376
1377	spin_lock_irqsave(&dwc->lock, flags);
1378	dwc3_gadget_run_stop(dwc, is_on);
1379	spin_unlock_irqrestore(&dwc->lock, flags);
1380
1381	return 0;
1382}
1383
1384static int dwc3_gadget_start(struct usb_gadget *g,
1385		struct usb_gadget_driver *driver)
1386{
1387	struct dwc3		*dwc = gadget_to_dwc(g);
1388	struct dwc3_ep		*dep;
1389	unsigned long		flags;
1390	int			ret = 0;
1391	u32			reg;
1392
1393	spin_lock_irqsave(&dwc->lock, flags);
1394
1395	if (dwc->gadget_driver) {
1396		dev_err(dwc->dev, "%s is already bound to %s\n",
1397				dwc->gadget.name,
1398				dwc->gadget_driver->driver.name);
1399		ret = -EBUSY;
1400		goto err0;
1401	}
1402
1403	dwc->gadget_driver	= driver;
1404	dwc->gadget.dev.driver	= &driver->driver;
1405
1406	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1407	reg &= ~(DWC3_DCFG_SPEED_MASK);
1408	reg |= dwc->maximum_speed;
1409	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1410
1411	dwc->start_config_issued = false;
1412
1413	/* Start with SuperSpeed Default */
1414	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1415
1416	dep = dwc->eps[0];
1417	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
1418	if (ret) {
1419		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1420		goto err0;
1421	}
1422
1423	dep = dwc->eps[1];
1424	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
1425	if (ret) {
1426		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1427		goto err1;
1428	}
1429
1430	/* begin to receive SETUP packets */
1431	dwc->ep0state = EP0_SETUP_PHASE;
1432	dwc3_ep0_out_start(dwc);
1433
1434	spin_unlock_irqrestore(&dwc->lock, flags);
1435
1436	return 0;
1437
1438err1:
1439	__dwc3_gadget_ep_disable(dwc->eps[0]);
1440
1441err0:
1442	spin_unlock_irqrestore(&dwc->lock, flags);
1443
1444	return ret;
1445}
1446
1447static int dwc3_gadget_stop(struct usb_gadget *g,
1448		struct usb_gadget_driver *driver)
1449{
1450	struct dwc3		*dwc = gadget_to_dwc(g);
1451	unsigned long		flags;
1452
1453	spin_lock_irqsave(&dwc->lock, flags);
1454
1455	__dwc3_gadget_ep_disable(dwc->eps[0]);
1456	__dwc3_gadget_ep_disable(dwc->eps[1]);
1457
1458	dwc->gadget_driver	= NULL;
1459	dwc->gadget.dev.driver	= NULL;
1460
1461	spin_unlock_irqrestore(&dwc->lock, flags);
1462
1463	return 0;
1464}
1465static const struct usb_gadget_ops dwc3_gadget_ops = {
1466	.get_frame		= dwc3_gadget_get_frame,
1467	.wakeup			= dwc3_gadget_wakeup,
1468	.set_selfpowered	= dwc3_gadget_set_selfpowered,
1469	.pullup			= dwc3_gadget_pullup,
1470	.udc_start		= dwc3_gadget_start,
1471	.udc_stop		= dwc3_gadget_stop,
1472};
1473
1474/* -------------------------------------------------------------------------- */
1475
1476static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1477{
1478	struct dwc3_ep			*dep;
1479	u8				epnum;
1480
1481	INIT_LIST_HEAD(&dwc->gadget.ep_list);
1482
1483	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1484		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1485		if (!dep) {
1486			dev_err(dwc->dev, "can't allocate endpoint %d\n",
1487					epnum);
1488			return -ENOMEM;
1489		}
1490
1491		dep->dwc = dwc;
1492		dep->number = epnum;
1493		dwc->eps[epnum] = dep;
1494
1495		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1496				(epnum & 1) ? "in" : "out");
1497		dep->endpoint.name = dep->name;
1498		dep->direction = (epnum & 1);
1499
1500		if (epnum == 0 || epnum == 1) {
1501			dep->endpoint.maxpacket = 512;
1502			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1503			if (!epnum)
1504				dwc->gadget.ep0 = &dep->endpoint;
1505		} else {
1506			int		ret;
1507
1508			dep->endpoint.maxpacket = 1024;
1509			dep->endpoint.max_streams = 15;
1510			dep->endpoint.ops = &dwc3_gadget_ep_ops;
1511			list_add_tail(&dep->endpoint.ep_list,
1512					&dwc->gadget.ep_list);
1513
1514			ret = dwc3_alloc_trb_pool(dep);
1515			if (ret)
1516				return ret;
1517		}
1518
1519		INIT_LIST_HEAD(&dep->request_list);
1520		INIT_LIST_HEAD(&dep->req_queued);
1521	}
1522
1523	return 0;
1524}
1525
1526static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1527{
1528	struct dwc3_ep			*dep;
1529	u8				epnum;
1530
1531	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1532		dep = dwc->eps[epnum];
1533		dwc3_free_trb_pool(dep);
1534
1535		if (epnum != 0 && epnum != 1)
1536			list_del(&dep->endpoint.ep_list);
1537
1538		kfree(dep);
1539	}
1540}
1541
1542static void dwc3_gadget_release(struct device *dev)
1543{
1544	dev_dbg(dev, "%s\n", __func__);
1545}
1546
1547/* -------------------------------------------------------------------------- */
1548static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1549		const struct dwc3_event_depevt *event, int status)
1550{
1551	struct dwc3_request	*req;
1552	struct dwc3_trb		*trb;
1553	unsigned int		count;
1554	unsigned int		s_pkt = 0;
1555
1556	do {
1557		req = next_request(&dep->req_queued);
1558		if (!req) {
1559			WARN_ON_ONCE(1);
1560			return 1;
1561		}
1562
1563		trb = req->trb;
1564
1565		if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1566			/*
1567			 * We continue despite the error. There is not much we
1568			 * can do. If we don't clean it up we loop forever. If
1569			 * we skip the TRB then it gets overwritten after a
1570			 * while since we use them in a ring buffer. A BUG()
1571			 * would help. Lets hope that if this occurs, someone
1572			 * fixes the root cause instead of looking away :)
1573			 */
1574			dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1575					dep->name, req->trb);
1576		count = trb->size & DWC3_TRB_SIZE_MASK;
1577
1578		if (dep->direction) {
1579			if (count) {
1580				dev_err(dwc->dev, "incomplete IN transfer %s\n",
1581						dep->name);
1582				status = -ECONNRESET;
1583			}
1584		} else {
1585			if (count && (event->status & DEPEVT_STATUS_SHORT))
1586				s_pkt = 1;
1587		}
1588
1589		/*
1590		 * We assume here we will always receive the entire data block
1591		 * which we should receive. Meaning, if we program RX to
1592		 * receive 4K but we receive only 2K, we assume that's all we
1593		 * should receive and we simply bounce the request back to the
1594		 * gadget driver for further processing.
1595		 */
1596		req->request.actual += req->request.length - count;
1597		dwc3_gadget_giveback(dep, req, status);
1598		if (s_pkt)
1599			break;
1600		if ((event->status & DEPEVT_STATUS_LST) &&
1601				(trb->ctrl & DWC3_TRB_CTRL_LST))
1602			break;
1603		if ((event->status & DEPEVT_STATUS_IOC) &&
1604				(trb->ctrl & DWC3_TRB_CTRL_IOC))
1605			break;
1606	} while (1);
1607
1608	if ((event->status & DEPEVT_STATUS_IOC) &&
1609			(trb->ctrl & DWC3_TRB_CTRL_IOC))
1610		return 0;
1611	return 1;
1612}
1613
1614static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1615		struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1616		int start_new)
1617{
1618	unsigned		status = 0;
1619	int			clean_busy;
1620
1621	if (event->status & DEPEVT_STATUS_BUSERR)
1622		status = -ECONNRESET;
1623
1624	clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1625	if (clean_busy) {
1626		dep->flags &= ~DWC3_EP_BUSY;
1627		dep->res_trans_idx = 0;
1628	}
1629
1630	/*
1631	 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1632	 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1633	 */
1634	if (dwc->revision < DWC3_REVISION_183A) {
1635		u32		reg;
1636		int		i;
1637
1638		for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1639			struct dwc3_ep	*dep = dwc->eps[i];
1640
1641			if (!(dep->flags & DWC3_EP_ENABLED))
1642				continue;
1643
1644			if (!list_empty(&dep->req_queued))
1645				return;
1646		}
1647
1648		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1649		reg |= dwc->u1u2;
1650		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1651
1652		dwc->u1u2 = 0;
1653	}
1654}
1655
1656static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1657		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1658{
1659	u32 uf;
1660
1661	if (list_empty(&dep->request_list)) {
1662		dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1663			dep->name);
1664		return;
1665	}
1666
1667	if (event->parameters) {
1668		u32 mask;
1669
1670		mask = ~(dep->interval - 1);
1671		uf = event->parameters & mask;
1672		/* 4 micro frames in the future */
1673		uf += dep->interval * 4;
1674	} else {
1675		uf = 0;
1676	}
1677
1678	__dwc3_gadget_kick_transfer(dep, uf, 1);
1679}
1680
1681static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1682		const struct dwc3_event_depevt *event)
1683{
1684	struct dwc3 *dwc = dep->dwc;
1685	struct dwc3_event_depevt mod_ev = *event;
1686
1687	/*
1688	 * We were asked to remove one request. It is possible that this
1689	 * request and a few others were started together and have the same
1690	 * transfer index. Since we stopped the complete endpoint we don't
1691	 * know how many requests were already completed (and not yet)
1692	 * reported and how could be done (later). We purge them all until
1693	 * the end of the list.
1694	 */
1695	mod_ev.status = DEPEVT_STATUS_LST;
1696	dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1697	dep->flags &= ~DWC3_EP_BUSY;
1698	/* pending requests are ignored and are queued on XferNotReady */
1699}
1700
1701static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1702		const struct dwc3_event_depevt *event)
1703{
1704	u32 param = event->parameters;
1705	u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1706
1707	switch (cmd_type) {
1708	case DWC3_DEPCMD_ENDTRANSFER:
1709		dwc3_process_ep_cmd_complete(dep, event);
1710		break;
1711	case DWC3_DEPCMD_STARTTRANSFER:
1712		dep->res_trans_idx = param & 0x7f;
1713		break;
1714	default:
1715		printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1716				__func__, cmd_type);
1717		break;
1718	};
1719}
1720
1721static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1722		const struct dwc3_event_depevt *event)
1723{
1724	struct dwc3_ep		*dep;
1725	u8			epnum = event->endpoint_number;
1726
1727	dep = dwc->eps[epnum];
1728
1729	dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1730			dwc3_ep_event_string(event->endpoint_event));
1731
1732	if (epnum == 0 || epnum == 1) {
1733		dwc3_ep0_interrupt(dwc, event);
1734		return;
1735	}
1736
1737	switch (event->endpoint_event) {
1738	case DWC3_DEPEVT_XFERCOMPLETE:
1739		if (usb_endpoint_xfer_isoc(dep->desc)) {
1740			dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1741					dep->name);
1742			return;
1743		}
1744
1745		dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1746		break;
1747	case DWC3_DEPEVT_XFERINPROGRESS:
1748		if (!usb_endpoint_xfer_isoc(dep->desc)) {
1749			dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1750					dep->name);
1751			return;
1752		}
1753
1754		dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1755		break;
1756	case DWC3_DEPEVT_XFERNOTREADY:
1757		if (usb_endpoint_xfer_isoc(dep->desc)) {
1758			dwc3_gadget_start_isoc(dwc, dep, event);
1759		} else {
1760			int ret;
1761
1762			dev_vdbg(dwc->dev, "%s: reason %s\n",
1763					dep->name, event->status &
1764					DEPEVT_STATUS_TRANSFER_ACTIVE
1765					? "Transfer Active"
1766					: "Transfer Not Active");
1767
1768			ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1769			if (!ret || ret == -EBUSY)
1770				return;
1771
1772			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1773					dep->name);
1774		}
1775
1776		break;
1777	case DWC3_DEPEVT_STREAMEVT:
1778		if (!usb_endpoint_xfer_bulk(dep->desc)) {
1779			dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1780					dep->name);
1781			return;
1782		}
1783
1784		switch (event->status) {
1785		case DEPEVT_STREAMEVT_FOUND:
1786			dev_vdbg(dwc->dev, "Stream %d found and started\n",
1787					event->parameters);
1788
1789			break;
1790		case DEPEVT_STREAMEVT_NOTFOUND:
1791			/* FALLTHROUGH */
1792		default:
1793			dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1794		}
1795		break;
1796	case DWC3_DEPEVT_RXTXFIFOEVT:
1797		dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1798		break;
1799	case DWC3_DEPEVT_EPCMDCMPLT:
1800		dwc3_ep_cmd_compl(dep, event);
1801		break;
1802	}
1803}
1804
1805static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1806{
1807	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1808		spin_unlock(&dwc->lock);
1809		dwc->gadget_driver->disconnect(&dwc->gadget);
1810		spin_lock(&dwc->lock);
1811	}
1812}
1813
1814static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1815{
1816	struct dwc3_ep *dep;
1817	struct dwc3_gadget_ep_cmd_params params;
1818	u32 cmd;
1819	int ret;
1820
1821	dep = dwc->eps[epnum];
1822
1823	WARN_ON(!dep->res_trans_idx);
1824	if (dep->res_trans_idx) {
1825		cmd = DWC3_DEPCMD_ENDTRANSFER;
1826		cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1827		cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1828		memset(&params, 0, sizeof(params));
1829		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1830		WARN_ON_ONCE(ret);
1831		dep->res_trans_idx = 0;
1832	}
1833}
1834
1835static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1836{
1837	u32 epnum;
1838
1839	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1840		struct dwc3_ep *dep;
1841
1842		dep = dwc->eps[epnum];
1843		if (!(dep->flags & DWC3_EP_ENABLED))
1844			continue;
1845
1846		dwc3_remove_requests(dwc, dep);
1847	}
1848}
1849
1850static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1851{
1852	u32 epnum;
1853
1854	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1855		struct dwc3_ep *dep;
1856		struct dwc3_gadget_ep_cmd_params params;
1857		int ret;
1858
1859		dep = dwc->eps[epnum];
1860
1861		if (!(dep->flags & DWC3_EP_STALL))
1862			continue;
1863
1864		dep->flags &= ~DWC3_EP_STALL;
1865
1866		memset(&params, 0, sizeof(params));
1867		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1868				DWC3_DEPCMD_CLEARSTALL, &params);
1869		WARN_ON_ONCE(ret);
1870	}
1871}
1872
1873static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1874{
1875	dev_vdbg(dwc->dev, "%s\n", __func__);
1876#if 0
1877	XXX
1878	U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1879	enable it before we can disable it.
1880
1881	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1882	reg &= ~DWC3_DCTL_INITU1ENA;
1883	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1884
1885	reg &= ~DWC3_DCTL_INITU2ENA;
1886	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1887#endif
1888
1889	dwc3_stop_active_transfers(dwc);
1890	dwc3_disconnect_gadget(dwc);
1891	dwc->start_config_issued = false;
1892
1893	dwc->gadget.speed = USB_SPEED_UNKNOWN;
1894	dwc->setup_packet_pending = false;
1895}
1896
1897static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1898{
1899	u32			reg;
1900
1901	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1902
1903	if (on)
1904		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1905	else
1906		reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1907
1908	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1909}
1910
1911static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1912{
1913	u32			reg;
1914
1915	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1916
1917	if (on)
1918		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1919	else
1920		reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1921
1922	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1923}
1924
1925static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1926{
1927	u32			reg;
1928
1929	dev_vdbg(dwc->dev, "%s\n", __func__);
1930
1931	/*
1932	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
1933	 * would cause a missing Disconnect Event if there's a
1934	 * pending Setup Packet in the FIFO.
1935	 *
1936	 * There's no suggested workaround on the official Bug
1937	 * report, which states that "unless the driver/application
1938	 * is doing any special handling of a disconnect event,
1939	 * there is no functional issue".
1940	 *
1941	 * Unfortunately, it turns out that we _do_ some special
1942	 * handling of a disconnect event, namely complete all
1943	 * pending transfers, notify gadget driver of the
1944	 * disconnection, and so on.
1945	 *
1946	 * Our suggested workaround is to follow the Disconnect
1947	 * Event steps here, instead, based on a setup_packet_pending
1948	 * flag. Such flag gets set whenever we have a XferNotReady
1949	 * event on EP0 and gets cleared on XferComplete for the
1950	 * same endpoint.
1951	 *
1952	 * Refers to:
1953	 *
1954	 * STAR#9000466709: RTL: Device : Disconnect event not
1955	 * generated if setup packet pending in FIFO
1956	 */
1957	if (dwc->revision < DWC3_REVISION_188A) {
1958		if (dwc->setup_packet_pending)
1959			dwc3_gadget_disconnect_interrupt(dwc);
1960	}
1961
1962	/* after reset -> Default State */
1963	dwc->dev_state = DWC3_DEFAULT_STATE;
1964
1965	/* Enable PHYs */
1966	dwc3_gadget_usb2_phy_power(dwc, true);
1967	dwc3_gadget_usb3_phy_power(dwc, true);
1968
1969	if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1970		dwc3_disconnect_gadget(dwc);
1971
1972	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1973	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1974	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1975	dwc->test_mode = false;
1976
1977	dwc3_stop_active_transfers(dwc);
1978	dwc3_clear_stall_all_ep(dwc);
1979	dwc->start_config_issued = false;
1980
1981	/* Reset device address to zero */
1982	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1983	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1984	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1985}
1986
1987static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1988{
1989	u32 reg;
1990	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1991
1992	/*
1993	 * We change the clock only at SS but I dunno why I would want to do
1994	 * this. Maybe it becomes part of the power saving plan.
1995	 */
1996
1997	if (speed != DWC3_DSTS_SUPERSPEED)
1998		return;
1999
2000	/*
2001	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2002	 * each time on Connect Done.
2003	 */
2004	if (!usb30_clock)
2005		return;
2006
2007	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2008	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2009	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2010}
2011
2012static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
2013{
2014	switch (speed) {
2015	case USB_SPEED_SUPER:
2016		dwc3_gadget_usb2_phy_power(dwc, false);
2017		break;
2018	case USB_SPEED_HIGH:
2019	case USB_SPEED_FULL:
2020	case USB_SPEED_LOW:
2021		dwc3_gadget_usb3_phy_power(dwc, false);
2022		break;
2023	}
2024}
2025
2026static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2027{
2028	struct dwc3_gadget_ep_cmd_params params;
2029	struct dwc3_ep		*dep;
2030	int			ret;
2031	u32			reg;
2032	u8			speed;
2033
2034	dev_vdbg(dwc->dev, "%s\n", __func__);
2035
2036	memset(&params, 0x00, sizeof(params));
2037
2038	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2039	speed = reg & DWC3_DSTS_CONNECTSPD;
2040	dwc->speed = speed;
2041
2042	dwc3_update_ram_clk_sel(dwc, speed);
2043
2044	switch (speed) {
2045	case DWC3_DCFG_SUPERSPEED:
2046		/*
2047		 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2048		 * would cause a missing USB3 Reset event.
2049		 *
2050		 * In such situations, we should force a USB3 Reset
2051		 * event by calling our dwc3_gadget_reset_interrupt()
2052		 * routine.
2053		 *
2054		 * Refers to:
2055		 *
2056		 * STAR#9000483510: RTL: SS : USB3 reset event may
2057		 * not be generated always when the link enters poll
2058		 */
2059		if (dwc->revision < DWC3_REVISION_190A)
2060			dwc3_gadget_reset_interrupt(dwc);
2061
2062		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2063		dwc->gadget.ep0->maxpacket = 512;
2064		dwc->gadget.speed = USB_SPEED_SUPER;
2065		break;
2066	case DWC3_DCFG_HIGHSPEED:
2067		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2068		dwc->gadget.ep0->maxpacket = 64;
2069		dwc->gadget.speed = USB_SPEED_HIGH;
2070		break;
2071	case DWC3_DCFG_FULLSPEED2:
2072	case DWC3_DCFG_FULLSPEED1:
2073		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2074		dwc->gadget.ep0->maxpacket = 64;
2075		dwc->gadget.speed = USB_SPEED_FULL;
2076		break;
2077	case DWC3_DCFG_LOWSPEED:
2078		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2079		dwc->gadget.ep0->maxpacket = 8;
2080		dwc->gadget.speed = USB_SPEED_LOW;
2081		break;
2082	}
2083
2084	/* Disable unneded PHY */
2085	dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
2086
2087	dep = dwc->eps[0];
2088	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
2089	if (ret) {
2090		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2091		return;
2092	}
2093
2094	dep = dwc->eps[1];
2095	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
2096	if (ret) {
2097		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2098		return;
2099	}
2100
2101	/*
2102	 * Configure PHY via GUSB3PIPECTLn if required.
2103	 *
2104	 * Update GTXFIFOSIZn
2105	 *
2106	 * In both cases reset values should be sufficient.
2107	 */
2108}
2109
2110static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2111{
2112	dev_vdbg(dwc->dev, "%s\n", __func__);
2113
2114	/*
2115	 * TODO take core out of low power mode when that's
2116	 * implemented.
2117	 */
2118
2119	dwc->gadget_driver->resume(&dwc->gadget);
2120}
2121
2122static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2123		unsigned int evtinfo)
2124{
2125	enum dwc3_link_state	next = evtinfo & DWC3_LINK_STATE_MASK;
2126
2127	/*
2128	 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2129	 * on the link partner, the USB session might do multiple entry/exit
2130	 * of low power states before a transfer takes place.
2131	 *
2132	 * Due to this problem, we might experience lower throughput. The
2133	 * suggested workaround is to disable DCTL[12:9] bits if we're
2134	 * transitioning from U1/U2 to U0 and enable those bits again
2135	 * after a transfer completes and there are no pending transfers
2136	 * on any of the enabled endpoints.
2137	 *
2138	 * This is the first half of that workaround.
2139	 *
2140	 * Refers to:
2141	 *
2142	 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2143	 * core send LGO_Ux entering U0
2144	 */
2145	if (dwc->revision < DWC3_REVISION_183A) {
2146		if (next == DWC3_LINK_STATE_U0) {
2147			u32	u1u2;
2148			u32	reg;
2149
2150			switch (dwc->link_state) {
2151			case DWC3_LINK_STATE_U1:
2152			case DWC3_LINK_STATE_U2:
2153				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2154				u1u2 = reg & (DWC3_DCTL_INITU2ENA
2155						| DWC3_DCTL_ACCEPTU2ENA
2156						| DWC3_DCTL_INITU1ENA
2157						| DWC3_DCTL_ACCEPTU1ENA);
2158
2159				if (!dwc->u1u2)
2160					dwc->u1u2 = reg & u1u2;
2161
2162				reg &= ~u1u2;
2163
2164				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2165				break;
2166			default:
2167				/* do nothing */
2168				break;
2169			}
2170		}
2171	}
2172
2173	dwc->link_state = next;
2174
2175	dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
2176}
2177
2178static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2179		const struct dwc3_event_devt *event)
2180{
2181	switch (event->type) {
2182	case DWC3_DEVICE_EVENT_DISCONNECT:
2183		dwc3_gadget_disconnect_interrupt(dwc);
2184		break;
2185	case DWC3_DEVICE_EVENT_RESET:
2186		dwc3_gadget_reset_interrupt(dwc);
2187		break;
2188	case DWC3_DEVICE_EVENT_CONNECT_DONE:
2189		dwc3_gadget_conndone_interrupt(dwc);
2190		break;
2191	case DWC3_DEVICE_EVENT_WAKEUP:
2192		dwc3_gadget_wakeup_interrupt(dwc);
2193		break;
2194	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2195		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2196		break;
2197	case DWC3_DEVICE_EVENT_EOPF:
2198		dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2199		break;
2200	case DWC3_DEVICE_EVENT_SOF:
2201		dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2202		break;
2203	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2204		dev_vdbg(dwc->dev, "Erratic Error\n");
2205		break;
2206	case DWC3_DEVICE_EVENT_CMD_CMPL:
2207		dev_vdbg(dwc->dev, "Command Complete\n");
2208		break;
2209	case DWC3_DEVICE_EVENT_OVERFLOW:
2210		dev_vdbg(dwc->dev, "Overflow\n");
2211		break;
2212	default:
2213		dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2214	}
2215}
2216
2217static void dwc3_process_event_entry(struct dwc3 *dwc,
2218		const union dwc3_event *event)
2219{
2220	/* Endpoint IRQ, handle it and return early */
2221	if (event->type.is_devspec == 0) {
2222		/* depevt */
2223		return dwc3_endpoint_interrupt(dwc, &event->depevt);
2224	}
2225
2226	switch (event->type.type) {
2227	case DWC3_EVENT_TYPE_DEV:
2228		dwc3_gadget_interrupt(dwc, &event->devt);
2229		break;
2230	/* REVISIT what to do with Carkit and I2C events ? */
2231	default:
2232		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2233	}
2234}
2235
2236static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2237{
2238	struct dwc3_event_buffer *evt;
2239	int left;
2240	u32 count;
2241
2242	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2243	count &= DWC3_GEVNTCOUNT_MASK;
2244	if (!count)
2245		return IRQ_NONE;
2246
2247	evt = dwc->ev_buffs[buf];
2248	left = count;
2249
2250	while (left > 0) {
2251		union dwc3_event event;
2252
2253		event.raw = *(u32 *) (evt->buf + evt->lpos);
2254
2255		dwc3_process_event_entry(dwc, &event);
2256		/*
2257		 * XXX we wrap around correctly to the next entry as almost all
2258		 * entries are 4 bytes in size. There is one entry which has 12
2259		 * bytes which is a regular entry followed by 8 bytes data. ATM
2260		 * I don't know how things are organized if were get next to the
2261		 * a boundary so I worry about that once we try to handle that.
2262		 */
2263		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2264		left -= 4;
2265
2266		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2267	}
2268
2269	return IRQ_HANDLED;
2270}
2271
2272static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2273{
2274	struct dwc3			*dwc = _dwc;
2275	int				i;
2276	irqreturn_t			ret = IRQ_NONE;
2277
2278	spin_lock(&dwc->lock);
2279
2280	for (i = 0; i < dwc->num_event_buffers; i++) {
2281		irqreturn_t status;
2282
2283		status = dwc3_process_event_buf(dwc, i);
2284		if (status == IRQ_HANDLED)
2285			ret = status;
2286	}
2287
2288	spin_unlock(&dwc->lock);
2289
2290	return ret;
2291}
2292
2293/**
2294 * dwc3_gadget_init - Initializes gadget related registers
2295 * @dwc: pointer to our controller context structure
2296 *
2297 * Returns 0 on success otherwise negative errno.
2298 */
2299int __devinit dwc3_gadget_init(struct dwc3 *dwc)
2300{
2301	u32					reg;
2302	int					ret;
2303	int					irq;
2304
2305	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2306			&dwc->ctrl_req_addr, GFP_KERNEL);
2307	if (!dwc->ctrl_req) {
2308		dev_err(dwc->dev, "failed to allocate ctrl request\n");
2309		ret = -ENOMEM;
2310		goto err0;
2311	}
2312
2313	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2314			&dwc->ep0_trb_addr, GFP_KERNEL);
2315	if (!dwc->ep0_trb) {
2316		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2317		ret = -ENOMEM;
2318		goto err1;
2319	}
2320
2321	dwc->setup_buf = dma_alloc_coherent(dwc->dev,
2322			sizeof(*dwc->setup_buf) * 2,
2323			&dwc->setup_buf_addr, GFP_KERNEL);
2324	if (!dwc->setup_buf) {
2325		dev_err(dwc->dev, "failed to allocate setup buffer\n");
2326		ret = -ENOMEM;
2327		goto err2;
2328	}
2329
2330	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2331			512, &dwc->ep0_bounce_addr, GFP_KERNEL);
2332	if (!dwc->ep0_bounce) {
2333		dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2334		ret = -ENOMEM;
2335		goto err3;
2336	}
2337
2338	dev_set_name(&dwc->gadget.dev, "gadget");
2339
2340	dwc->gadget.ops			= &dwc3_gadget_ops;
2341	dwc->gadget.max_speed		= USB_SPEED_SUPER;
2342	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
2343	dwc->gadget.dev.parent		= dwc->dev;
2344	dwc->gadget.sg_supported	= true;
2345
2346	dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
2347
2348	dwc->gadget.dev.dma_parms	= dwc->dev->dma_parms;
2349	dwc->gadget.dev.dma_mask	= dwc->dev->dma_mask;
2350	dwc->gadget.dev.release		= dwc3_gadget_release;
2351	dwc->gadget.name		= "dwc3-gadget";
2352
2353	/*
2354	 * REVISIT: Here we should clear all pending IRQs to be
2355	 * sure we're starting from a well known location.
2356	 */
2357
2358	ret = dwc3_gadget_init_endpoints(dwc);
2359	if (ret)
2360		goto err4;
2361
2362	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2363
2364	ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
2365			"dwc3", dwc);
2366	if (ret) {
2367		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2368				irq, ret);
2369		goto err5;
2370	}
2371
2372	/* Enable all but Start and End of Frame IRQs */
2373	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
2374			DWC3_DEVTEN_EVNTOVERFLOWEN |
2375			DWC3_DEVTEN_CMDCMPLTEN |
2376			DWC3_DEVTEN_ERRTICERREN |
2377			DWC3_DEVTEN_WKUPEVTEN |
2378			DWC3_DEVTEN_ULSTCNGEN |
2379			DWC3_DEVTEN_CONNECTDONEEN |
2380			DWC3_DEVTEN_USBRSTEN |
2381			DWC3_DEVTEN_DISCONNEVTEN);
2382	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2383
2384	ret = device_register(&dwc->gadget.dev);
2385	if (ret) {
2386		dev_err(dwc->dev, "failed to register gadget device\n");
2387		put_device(&dwc->gadget.dev);
2388		goto err6;
2389	}
2390
2391	ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2392	if (ret) {
2393		dev_err(dwc->dev, "failed to register udc\n");
2394		goto err7;
2395	}
2396
2397	return 0;
2398
2399err7:
2400	device_unregister(&dwc->gadget.dev);
2401
2402err6:
2403	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2404	free_irq(irq, dwc);
2405
2406err5:
2407	dwc3_gadget_free_endpoints(dwc);
2408
2409err4:
2410	dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2411			dwc->ep0_bounce_addr);
2412
2413err3:
2414	dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2415			dwc->setup_buf, dwc->setup_buf_addr);
2416
2417err2:
2418	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2419			dwc->ep0_trb, dwc->ep0_trb_addr);
2420
2421err1:
2422	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2423			dwc->ctrl_req, dwc->ctrl_req_addr);
2424
2425err0:
2426	return ret;
2427}
2428
2429void dwc3_gadget_exit(struct dwc3 *dwc)
2430{
2431	int			irq;
2432
2433	usb_del_gadget_udc(&dwc->gadget);
2434	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2435
2436	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2437	free_irq(irq, dwc);
2438
2439	dwc3_gadget_free_endpoints(dwc);
2440
2441	dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2442			dwc->ep0_bounce_addr);
2443
2444	dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2445			dwc->setup_buf, dwc->setup_buf_addr);
2446
2447	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2448			dwc->ep0_trb, dwc->ep0_trb_addr);
2449
2450	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2451			dwc->ctrl_req, dwc->ctrl_req_addr);
2452
2453	device_unregister(&dwc->gadget.dev);
2454}
2455