gadget.c revision 9bafa56c7cee5c6fa68de5924220abb220c7e229
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions, and the following disclaimer,
14 *    without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 *    to endorse or promote products derived from this software without
20 *    specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <linux/kernel.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/interrupt.h>
46#include <linux/io.h>
47#include <linux/list.h>
48#include <linux/dma-mapping.h>
49
50#include <linux/usb/ch9.h>
51#include <linux/usb/gadget.h>
52
53#include "core.h"
54#include "gadget.h"
55#include "io.h"
56
57#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
58
59/**
60 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
61 * @dwc: pointer to our context structure
62 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
63 *
64 * Caller should take care of locking. This function will
65 * return 0 on success or -EINVAL if wrong Test Selector
66 * is passed
67 */
68int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
69{
70	u32		reg;
71
72	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
73	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
74
75	switch (mode) {
76	case TEST_J:
77	case TEST_K:
78	case TEST_SE0_NAK:
79	case TEST_PACKET:
80	case TEST_FORCE_EN:
81		reg |= mode << 1;
82		break;
83	default:
84		return -EINVAL;
85	}
86
87	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
88
89	return 0;
90}
91
92/**
93 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
94 * @dwc: pointer to our context structure
95 * @state: the state to put link into
96 *
97 * Caller should take care of locking. This function will
98 * return 0 on success or -EINVAL.
99 */
100int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
101{
102	int		retries = 100;
103	u32		reg;
104
105	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
106	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
107
108	/* set requested state */
109	reg |= DWC3_DCTL_ULSTCHNGREQ(state);
110	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
111
112	/* wait for a change in DSTS */
113	while (--retries) {
114		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
115
116		/* in HS, means ON */
117		if (DWC3_DSTS_USBLNKST(reg) == state)
118			return 0;
119
120		udelay(500);
121	}
122
123	dev_vdbg(dwc->dev, "link state change request timed out\n");
124
125	return -ETIMEDOUT;
126}
127
128/**
129 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
130 * @dwc: pointer to our context structure
131 *
132 * This function will a best effort FIFO allocation in order
133 * to improve FIFO usage and throughput, while still allowing
134 * us to enable as many endpoints as possible.
135 *
136 * Keep in mind that this operation will be highly dependent
137 * on the configured size for RAM1 - which contains TxFifo -,
138 * the amount of endpoints enabled on coreConsultant tool, and
139 * the width of the Master Bus.
140 *
141 * In the ideal world, we would always be able to satisfy the
142 * following equation:
143 *
144 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
145 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
146 *
147 * Unfortunately, due to many variables that's not always the case.
148 */
149int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
150{
151	int		last_fifo_depth = 0;
152	int		ram1_depth;
153	int		fifo_size;
154	int		mdwidth;
155	int		num;
156
157	if (!dwc->needs_fifo_resize)
158		return 0;
159
160	ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
161	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
162
163	/* MDWIDTH is represented in bits, we need it in bytes */
164	mdwidth >>= 3;
165
166	/*
167	 * FIXME For now we will only allocate 1 wMaxPacketSize space
168	 * for each enabled endpoint, later patches will come to
169	 * improve this algorithm so that we better use the internal
170	 * FIFO space
171	 */
172	for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
173		struct dwc3_ep	*dep = dwc->eps[num];
174		int		fifo_number = dep->number >> 1;
175		int		mult = 1;
176		int		tmp;
177
178		if (!(dep->number & 1))
179			continue;
180
181		if (!(dep->flags & DWC3_EP_ENABLED))
182			continue;
183
184		if (usb_endpoint_xfer_bulk(dep->desc)
185				|| usb_endpoint_xfer_isoc(dep->desc))
186			mult = 3;
187
188		/*
189		 * REVISIT: the following assumes we will always have enough
190		 * space available on the FIFO RAM for all possible use cases.
191		 * Make sure that's true somehow and change FIFO allocation
192		 * accordingly.
193		 *
194		 * If we have Bulk or Isochronous endpoints, we want
195		 * them to be able to be very, very fast. So we're giving
196		 * those endpoints a fifo_size which is enough for 3 full
197		 * packets
198		 */
199		tmp = mult * (dep->endpoint.maxpacket + mdwidth);
200		tmp += mdwidth;
201
202		fifo_size = DIV_ROUND_UP(tmp, mdwidth);
203
204		fifo_size |= (last_fifo_depth << 16);
205
206		dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
207				dep->name, last_fifo_depth, fifo_size & 0xffff);
208
209		dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
210				fifo_size);
211
212		last_fifo_depth += (fifo_size & 0xffff);
213	}
214
215	return 0;
216}
217
218void dwc3_map_buffer_to_dma(struct dwc3_request *req)
219{
220	struct dwc3			*dwc = req->dep->dwc;
221
222	if (req->request.length == 0) {
223		/* req->request.dma = dwc->setup_buf_addr; */
224		return;
225	}
226
227	if (req->request.num_sgs) {
228		int	mapped;
229
230		mapped = dma_map_sg(dwc->dev, req->request.sg,
231				req->request.num_sgs,
232				req->direction ? DMA_TO_DEVICE
233				: DMA_FROM_DEVICE);
234		if (mapped < 0) {
235			dev_err(dwc->dev, "failed to map SGs\n");
236			return;
237		}
238
239		req->request.num_mapped_sgs = mapped;
240		return;
241	}
242
243	if (req->request.dma == DMA_ADDR_INVALID) {
244		req->request.dma = dma_map_single(dwc->dev, req->request.buf,
245				req->request.length, req->direction
246				? DMA_TO_DEVICE : DMA_FROM_DEVICE);
247		req->mapped = true;
248	}
249}
250
251void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
252{
253	struct dwc3			*dwc = req->dep->dwc;
254
255	if (req->request.length == 0) {
256		req->request.dma = DMA_ADDR_INVALID;
257		return;
258	}
259
260	if (req->request.num_mapped_sgs) {
261		req->request.dma = DMA_ADDR_INVALID;
262		dma_unmap_sg(dwc->dev, req->request.sg,
263				req->request.num_mapped_sgs,
264				req->direction ? DMA_TO_DEVICE
265				: DMA_FROM_DEVICE);
266
267		req->request.num_mapped_sgs = 0;
268		return;
269	}
270
271	if (req->mapped) {
272		dma_unmap_single(dwc->dev, req->request.dma,
273				req->request.length, req->direction
274				? DMA_TO_DEVICE : DMA_FROM_DEVICE);
275		req->mapped = 0;
276		req->request.dma = DMA_ADDR_INVALID;
277	}
278}
279
280void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
281		int status)
282{
283	struct dwc3			*dwc = dep->dwc;
284
285	if (req->queued) {
286		if (req->request.num_mapped_sgs)
287			dep->busy_slot += req->request.num_mapped_sgs;
288		else
289			dep->busy_slot++;
290
291		/*
292		 * Skip LINK TRB. We can't use req->trb and check for
293		 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
294		 * completed (not the LINK TRB).
295		 */
296		if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
297				usb_endpoint_xfer_isoc(dep->desc))
298			dep->busy_slot++;
299	}
300	list_del(&req->list);
301	req->trb = NULL;
302
303	if (req->request.status == -EINPROGRESS)
304		req->request.status = status;
305
306	dwc3_unmap_buffer_from_dma(req);
307
308	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
309			req, dep->name, req->request.actual,
310			req->request.length, status);
311
312	spin_unlock(&dwc->lock);
313	req->request.complete(&req->dep->endpoint, &req->request);
314	spin_lock(&dwc->lock);
315}
316
317static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
318{
319	switch (cmd) {
320	case DWC3_DEPCMD_DEPSTARTCFG:
321		return "Start New Configuration";
322	case DWC3_DEPCMD_ENDTRANSFER:
323		return "End Transfer";
324	case DWC3_DEPCMD_UPDATETRANSFER:
325		return "Update Transfer";
326	case DWC3_DEPCMD_STARTTRANSFER:
327		return "Start Transfer";
328	case DWC3_DEPCMD_CLEARSTALL:
329		return "Clear Stall";
330	case DWC3_DEPCMD_SETSTALL:
331		return "Set Stall";
332	case DWC3_DEPCMD_GETSEQNUMBER:
333		return "Get Data Sequence Number";
334	case DWC3_DEPCMD_SETTRANSFRESOURCE:
335		return "Set Endpoint Transfer Resource";
336	case DWC3_DEPCMD_SETEPCONFIG:
337		return "Set Endpoint Configuration";
338	default:
339		return "UNKNOWN command";
340	}
341}
342
343int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
344		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
345{
346	struct dwc3_ep		*dep = dwc->eps[ep];
347	u32			timeout = 500;
348	u32			reg;
349
350	dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
351			dep->name,
352			dwc3_gadget_ep_cmd_string(cmd), params->param0,
353			params->param1, params->param2);
354
355	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
356	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
357	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
358
359	dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
360	do {
361		reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
362		if (!(reg & DWC3_DEPCMD_CMDACT)) {
363			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
364					DWC3_DEPCMD_STATUS(reg));
365			return 0;
366		}
367
368		/*
369		 * We can't sleep here, because it is also called from
370		 * interrupt context.
371		 */
372		timeout--;
373		if (!timeout)
374			return -ETIMEDOUT;
375
376		udelay(1);
377	} while (1);
378}
379
380static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
381		struct dwc3_trb *trb)
382{
383	u32		offset = (char *) trb - (char *) dep->trb_pool;
384
385	return dep->trb_pool_dma + offset;
386}
387
388static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
389{
390	struct dwc3		*dwc = dep->dwc;
391
392	if (dep->trb_pool)
393		return 0;
394
395	if (dep->number == 0 || dep->number == 1)
396		return 0;
397
398	dep->trb_pool = dma_alloc_coherent(dwc->dev,
399			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
400			&dep->trb_pool_dma, GFP_KERNEL);
401	if (!dep->trb_pool) {
402		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
403				dep->name);
404		return -ENOMEM;
405	}
406
407	return 0;
408}
409
410static void dwc3_free_trb_pool(struct dwc3_ep *dep)
411{
412	struct dwc3		*dwc = dep->dwc;
413
414	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
415			dep->trb_pool, dep->trb_pool_dma);
416
417	dep->trb_pool = NULL;
418	dep->trb_pool_dma = 0;
419}
420
421static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
422{
423	struct dwc3_gadget_ep_cmd_params params;
424	u32			cmd;
425
426	memset(&params, 0x00, sizeof(params));
427
428	if (dep->number != 1) {
429		cmd = DWC3_DEPCMD_DEPSTARTCFG;
430		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
431		if (dep->number > 1) {
432			if (dwc->start_config_issued)
433				return 0;
434			dwc->start_config_issued = true;
435			cmd |= DWC3_DEPCMD_PARAM(2);
436		}
437
438		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
439	}
440
441	return 0;
442}
443
444static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
445		const struct usb_endpoint_descriptor *desc,
446		const struct usb_ss_ep_comp_descriptor *comp_desc)
447{
448	struct dwc3_gadget_ep_cmd_params params;
449
450	memset(&params, 0x00, sizeof(params));
451
452	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
453		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
454		| DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
455
456	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
457		| DWC3_DEPCFG_XFER_NOT_READY_EN;
458
459	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
460		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
461			| DWC3_DEPCFG_STREAM_EVENT_EN;
462		dep->stream_capable = true;
463	}
464
465	if (usb_endpoint_xfer_isoc(desc))
466		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
467
468	/*
469	 * We are doing 1:1 mapping for endpoints, meaning
470	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
471	 * so on. We consider the direction bit as part of the physical
472	 * endpoint number. So USB endpoint 0x81 is 0x03.
473	 */
474	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
475
476	/*
477	 * We must use the lower 16 TX FIFOs even though
478	 * HW might have more
479	 */
480	if (dep->direction)
481		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
482
483	if (desc->bInterval) {
484		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
485		dep->interval = 1 << (desc->bInterval - 1);
486	}
487
488	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
489			DWC3_DEPCMD_SETEPCONFIG, &params);
490}
491
492static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
493{
494	struct dwc3_gadget_ep_cmd_params params;
495
496	memset(&params, 0x00, sizeof(params));
497
498	params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
499
500	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
501			DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
502}
503
504/**
505 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
506 * @dep: endpoint to be initialized
507 * @desc: USB Endpoint Descriptor
508 *
509 * Caller should take care of locking
510 */
511static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
512		const struct usb_endpoint_descriptor *desc,
513		const struct usb_ss_ep_comp_descriptor *comp_desc)
514{
515	struct dwc3		*dwc = dep->dwc;
516	u32			reg;
517	int			ret = -ENOMEM;
518
519	if (!(dep->flags & DWC3_EP_ENABLED)) {
520		ret = dwc3_gadget_start_config(dwc, dep);
521		if (ret)
522			return ret;
523	}
524
525	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc);
526	if (ret)
527		return ret;
528
529	if (!(dep->flags & DWC3_EP_ENABLED)) {
530		struct dwc3_trb	*trb_st_hw;
531		struct dwc3_trb	*trb_link;
532
533		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
534		if (ret)
535			return ret;
536
537		dep->desc = desc;
538		dep->comp_desc = comp_desc;
539		dep->type = usb_endpoint_type(desc);
540		dep->flags |= DWC3_EP_ENABLED;
541
542		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
543		reg |= DWC3_DALEPENA_EP(dep->number);
544		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
545
546		if (!usb_endpoint_xfer_isoc(desc))
547			return 0;
548
549		memset(&trb_link, 0, sizeof(trb_link));
550
551		/* Link TRB for ISOC. The HWO bit is never reset */
552		trb_st_hw = &dep->trb_pool[0];
553
554		trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
555
556		trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
557		trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
558		trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
559		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
560	}
561
562	return 0;
563}
564
565static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
566static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
567{
568	struct dwc3_request		*req;
569
570	if (!list_empty(&dep->req_queued))
571		dwc3_stop_active_transfer(dwc, dep->number);
572
573	while (!list_empty(&dep->request_list)) {
574		req = next_request(&dep->request_list);
575
576		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
577	}
578}
579
580/**
581 * __dwc3_gadget_ep_disable - Disables a HW endpoint
582 * @dep: the endpoint to disable
583 *
584 * This function also removes requests which are currently processed ny the
585 * hardware and those which are not yet scheduled.
586 * Caller should take care of locking.
587 */
588static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
589{
590	struct dwc3		*dwc = dep->dwc;
591	u32			reg;
592
593	dwc3_remove_requests(dwc, dep);
594
595	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
596	reg &= ~DWC3_DALEPENA_EP(dep->number);
597	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
598
599	dep->stream_capable = false;
600	dep->desc = NULL;
601	dep->comp_desc = NULL;
602	dep->type = 0;
603	dep->flags = 0;
604
605	return 0;
606}
607
608/* -------------------------------------------------------------------------- */
609
610static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
611		const struct usb_endpoint_descriptor *desc)
612{
613	return -EINVAL;
614}
615
616static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
617{
618	return -EINVAL;
619}
620
621/* -------------------------------------------------------------------------- */
622
623static int dwc3_gadget_ep_enable(struct usb_ep *ep,
624		const struct usb_endpoint_descriptor *desc)
625{
626	struct dwc3_ep			*dep;
627	struct dwc3			*dwc;
628	unsigned long			flags;
629	int				ret;
630
631	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
632		pr_debug("dwc3: invalid parameters\n");
633		return -EINVAL;
634	}
635
636	if (!desc->wMaxPacketSize) {
637		pr_debug("dwc3: missing wMaxPacketSize\n");
638		return -EINVAL;
639	}
640
641	dep = to_dwc3_ep(ep);
642	dwc = dep->dwc;
643
644	switch (usb_endpoint_type(desc)) {
645	case USB_ENDPOINT_XFER_CONTROL:
646		strlcat(dep->name, "-control", sizeof(dep->name));
647		break;
648	case USB_ENDPOINT_XFER_ISOC:
649		strlcat(dep->name, "-isoc", sizeof(dep->name));
650		break;
651	case USB_ENDPOINT_XFER_BULK:
652		strlcat(dep->name, "-bulk", sizeof(dep->name));
653		break;
654	case USB_ENDPOINT_XFER_INT:
655		strlcat(dep->name, "-int", sizeof(dep->name));
656		break;
657	default:
658		dev_err(dwc->dev, "invalid endpoint transfer type\n");
659	}
660
661	if (dep->flags & DWC3_EP_ENABLED) {
662		dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
663				dep->name);
664		return 0;
665	}
666
667	dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
668
669	spin_lock_irqsave(&dwc->lock, flags);
670	ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc);
671	spin_unlock_irqrestore(&dwc->lock, flags);
672
673	return ret;
674}
675
676static int dwc3_gadget_ep_disable(struct usb_ep *ep)
677{
678	struct dwc3_ep			*dep;
679	struct dwc3			*dwc;
680	unsigned long			flags;
681	int				ret;
682
683	if (!ep) {
684		pr_debug("dwc3: invalid parameters\n");
685		return -EINVAL;
686	}
687
688	dep = to_dwc3_ep(ep);
689	dwc = dep->dwc;
690
691	if (!(dep->flags & DWC3_EP_ENABLED)) {
692		dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
693				dep->name);
694		return 0;
695	}
696
697	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
698			dep->number >> 1,
699			(dep->number & 1) ? "in" : "out");
700
701	spin_lock_irqsave(&dwc->lock, flags);
702	ret = __dwc3_gadget_ep_disable(dep);
703	spin_unlock_irqrestore(&dwc->lock, flags);
704
705	return ret;
706}
707
708static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
709	gfp_t gfp_flags)
710{
711	struct dwc3_request		*req;
712	struct dwc3_ep			*dep = to_dwc3_ep(ep);
713	struct dwc3			*dwc = dep->dwc;
714
715	req = kzalloc(sizeof(*req), gfp_flags);
716	if (!req) {
717		dev_err(dwc->dev, "not enough memory\n");
718		return NULL;
719	}
720
721	req->epnum	= dep->number;
722	req->dep	= dep;
723	req->request.dma = DMA_ADDR_INVALID;
724
725	return &req->request;
726}
727
728static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
729		struct usb_request *request)
730{
731	struct dwc3_request		*req = to_dwc3_request(request);
732
733	kfree(req);
734}
735
736/**
737 * dwc3_prepare_one_trb - setup one TRB from one request
738 * @dep: endpoint for which this request is prepared
739 * @req: dwc3_request pointer
740 */
741static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
742		struct dwc3_request *req, dma_addr_t dma,
743		unsigned length, unsigned last, unsigned chain)
744{
745	struct dwc3		*dwc = dep->dwc;
746	struct dwc3_trb		*trb;
747
748	unsigned int		cur_slot;
749
750	dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
751			dep->name, req, (unsigned long long) dma,
752			length, last ? " last" : "",
753			chain ? " chain" : "");
754
755	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
756	cur_slot = dep->free_slot;
757	dep->free_slot++;
758
759	/* Skip the LINK-TRB on ISOC */
760	if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
761			usb_endpoint_xfer_isoc(dep->desc))
762		return;
763
764	if (!req->trb) {
765		dwc3_gadget_move_request_queued(req);
766		req->trb = trb;
767		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
768	}
769
770	trb->size = DWC3_TRB_SIZE_LENGTH(length);
771	trb->bpl = lower_32_bits(dma);
772	trb->bph = upper_32_bits(dma);
773
774	switch (usb_endpoint_type(dep->desc)) {
775	case USB_ENDPOINT_XFER_CONTROL:
776		trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
777		break;
778
779	case USB_ENDPOINT_XFER_ISOC:
780		trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
781
782		/* IOC every DWC3_TRB_NUM / 4 so we can refill */
783		if (!(cur_slot % (DWC3_TRB_NUM / 4)))
784			trb->ctrl |= DWC3_TRB_CTRL_IOC;
785		break;
786
787	case USB_ENDPOINT_XFER_BULK:
788	case USB_ENDPOINT_XFER_INT:
789		trb->ctrl = DWC3_TRBCTL_NORMAL;
790		break;
791	default:
792		/*
793		 * This is only possible with faulty memory because we
794		 * checked it already :)
795		 */
796		BUG();
797	}
798
799	if (usb_endpoint_xfer_isoc(dep->desc)) {
800		trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
801		trb->ctrl |= DWC3_TRB_CTRL_CSP;
802	} else {
803		if (chain)
804			trb->ctrl |= DWC3_TRB_CTRL_CHN;
805
806		if (last)
807			trb->ctrl |= DWC3_TRB_CTRL_LST;
808	}
809
810	if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
811		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
812
813	trb->ctrl |= DWC3_TRB_CTRL_HWO;
814}
815
816/*
817 * dwc3_prepare_trbs - setup TRBs from requests
818 * @dep: endpoint for which requests are being prepared
819 * @starting: true if the endpoint is idle and no requests are queued.
820 *
821 * The function goes through the requests list and sets up TRBs for the
822 * transfers. The function returns once there are no more TRBs available or
823 * it runs out of requests.
824 */
825static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
826{
827	struct dwc3_request	*req, *n;
828	u32			trbs_left;
829	u32			max;
830	unsigned int		last_one = 0;
831
832	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
833
834	/* the first request must not be queued */
835	trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
836
837	/* Can't wrap around on a non-isoc EP since there's no link TRB */
838	if (!usb_endpoint_xfer_isoc(dep->desc)) {
839		max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
840		if (trbs_left > max)
841			trbs_left = max;
842	}
843
844	/*
845	 * If busy & slot are equal than it is either full or empty. If we are
846	 * starting to process requests then we are empty. Otherwise we are
847	 * full and don't do anything
848	 */
849	if (!trbs_left) {
850		if (!starting)
851			return;
852		trbs_left = DWC3_TRB_NUM;
853		/*
854		 * In case we start from scratch, we queue the ISOC requests
855		 * starting from slot 1. This is done because we use ring
856		 * buffer and have no LST bit to stop us. Instead, we place
857		 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
858		 * after the first request so we start at slot 1 and have
859		 * 7 requests proceed before we hit the first IOC.
860		 * Other transfer types don't use the ring buffer and are
861		 * processed from the first TRB until the last one. Since we
862		 * don't wrap around we have to start at the beginning.
863		 */
864		if (usb_endpoint_xfer_isoc(dep->desc)) {
865			dep->busy_slot = 1;
866			dep->free_slot = 1;
867		} else {
868			dep->busy_slot = 0;
869			dep->free_slot = 0;
870		}
871	}
872
873	/* The last TRB is a link TRB, not used for xfer */
874	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
875		return;
876
877	list_for_each_entry_safe(req, n, &dep->request_list, list) {
878		unsigned	length;
879		dma_addr_t	dma;
880
881		if (req->request.num_mapped_sgs > 0) {
882			struct usb_request *request = &req->request;
883			struct scatterlist *sg = request->sg;
884			struct scatterlist *s;
885			int		i;
886
887			for_each_sg(sg, s, request->num_mapped_sgs, i) {
888				unsigned chain = true;
889
890				length = sg_dma_len(s);
891				dma = sg_dma_address(s);
892
893				if (i == (request->num_mapped_sgs - 1) ||
894						sg_is_last(s)) {
895					last_one = true;
896					chain = false;
897				}
898
899				trbs_left--;
900				if (!trbs_left)
901					last_one = true;
902
903				if (last_one)
904					chain = false;
905
906				dwc3_prepare_one_trb(dep, req, dma, length,
907						last_one, chain);
908
909				if (last_one)
910					break;
911			}
912		} else {
913			dma = req->request.dma;
914			length = req->request.length;
915			trbs_left--;
916
917			if (!trbs_left)
918				last_one = 1;
919
920			/* Is this the last request? */
921			if (list_is_last(&req->list, &dep->request_list))
922				last_one = 1;
923
924			dwc3_prepare_one_trb(dep, req, dma, length,
925					last_one, false);
926
927			if (last_one)
928				break;
929		}
930	}
931}
932
933static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
934		int start_new)
935{
936	struct dwc3_gadget_ep_cmd_params params;
937	struct dwc3_request		*req;
938	struct dwc3			*dwc = dep->dwc;
939	int				ret;
940	u32				cmd;
941
942	if (start_new && (dep->flags & DWC3_EP_BUSY)) {
943		dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
944		return -EBUSY;
945	}
946	dep->flags &= ~DWC3_EP_PENDING_REQUEST;
947
948	/*
949	 * If we are getting here after a short-out-packet we don't enqueue any
950	 * new requests as we try to set the IOC bit only on the last request.
951	 */
952	if (start_new) {
953		if (list_empty(&dep->req_queued))
954			dwc3_prepare_trbs(dep, start_new);
955
956		/* req points to the first request which will be sent */
957		req = next_request(&dep->req_queued);
958	} else {
959		dwc3_prepare_trbs(dep, start_new);
960
961		/*
962		 * req points to the first request where HWO changed from 0 to 1
963		 */
964		req = next_request(&dep->req_queued);
965	}
966	if (!req) {
967		dep->flags |= DWC3_EP_PENDING_REQUEST;
968		return 0;
969	}
970
971	memset(&params, 0, sizeof(params));
972	params.param0 = upper_32_bits(req->trb_dma);
973	params.param1 = lower_32_bits(req->trb_dma);
974
975	if (start_new)
976		cmd = DWC3_DEPCMD_STARTTRANSFER;
977	else
978		cmd = DWC3_DEPCMD_UPDATETRANSFER;
979
980	cmd |= DWC3_DEPCMD_PARAM(cmd_param);
981	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
982	if (ret < 0) {
983		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
984
985		/*
986		 * FIXME we need to iterate over the list of requests
987		 * here and stop, unmap, free and del each of the linked
988		 * requests instead of what we do now.
989		 */
990		dwc3_unmap_buffer_from_dma(req);
991		list_del(&req->list);
992		return ret;
993	}
994
995	dep->flags |= DWC3_EP_BUSY;
996	dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
997			dep->number);
998
999	WARN_ON_ONCE(!dep->res_trans_idx);
1000
1001	return 0;
1002}
1003
1004static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1005{
1006	req->request.actual	= 0;
1007	req->request.status	= -EINPROGRESS;
1008	req->direction		= dep->direction;
1009	req->epnum		= dep->number;
1010
1011	/*
1012	 * We only add to our list of requests now and
1013	 * start consuming the list once we get XferNotReady
1014	 * IRQ.
1015	 *
1016	 * That way, we avoid doing anything that we don't need
1017	 * to do now and defer it until the point we receive a
1018	 * particular token from the Host side.
1019	 *
1020	 * This will also avoid Host cancelling URBs due to too
1021	 * many NAKs.
1022	 */
1023	dwc3_map_buffer_to_dma(req);
1024	list_add_tail(&req->list, &dep->request_list);
1025
1026	/*
1027	 * There is one special case: XferNotReady with
1028	 * empty list of requests. We need to kick the
1029	 * transfer here in that situation, otherwise
1030	 * we will be NAKing forever.
1031	 *
1032	 * If we get XferNotReady before gadget driver
1033	 * has a chance to queue a request, we will ACK
1034	 * the IRQ but won't be able to receive the data
1035	 * until the next request is queued. The following
1036	 * code is handling exactly that.
1037	 */
1038	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1039		int ret;
1040		int start_trans;
1041
1042		start_trans = 1;
1043		if (usb_endpoint_xfer_isoc(dep->desc) &&
1044				(dep->flags & DWC3_EP_BUSY))
1045			start_trans = 0;
1046
1047		ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
1048		if (ret && ret != -EBUSY) {
1049			struct dwc3	*dwc = dep->dwc;
1050
1051			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1052					dep->name);
1053		}
1054	};
1055
1056	return 0;
1057}
1058
1059static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1060	gfp_t gfp_flags)
1061{
1062	struct dwc3_request		*req = to_dwc3_request(request);
1063	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1064	struct dwc3			*dwc = dep->dwc;
1065
1066	unsigned long			flags;
1067
1068	int				ret;
1069
1070	if (!dep->desc) {
1071		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1072				request, ep->name);
1073		return -ESHUTDOWN;
1074	}
1075
1076	dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1077			request, ep->name, request->length);
1078
1079	spin_lock_irqsave(&dwc->lock, flags);
1080	ret = __dwc3_gadget_ep_queue(dep, req);
1081	spin_unlock_irqrestore(&dwc->lock, flags);
1082
1083	return ret;
1084}
1085
1086static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1087		struct usb_request *request)
1088{
1089	struct dwc3_request		*req = to_dwc3_request(request);
1090	struct dwc3_request		*r = NULL;
1091
1092	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1093	struct dwc3			*dwc = dep->dwc;
1094
1095	unsigned long			flags;
1096	int				ret = 0;
1097
1098	spin_lock_irqsave(&dwc->lock, flags);
1099
1100	list_for_each_entry(r, &dep->request_list, list) {
1101		if (r == req)
1102			break;
1103	}
1104
1105	if (r != req) {
1106		list_for_each_entry(r, &dep->req_queued, list) {
1107			if (r == req)
1108				break;
1109		}
1110		if (r == req) {
1111			/* wait until it is processed */
1112			dwc3_stop_active_transfer(dwc, dep->number);
1113			goto out0;
1114		}
1115		dev_err(dwc->dev, "request %p was not queued to %s\n",
1116				request, ep->name);
1117		ret = -EINVAL;
1118		goto out0;
1119	}
1120
1121	/* giveback the request */
1122	dwc3_gadget_giveback(dep, req, -ECONNRESET);
1123
1124out0:
1125	spin_unlock_irqrestore(&dwc->lock, flags);
1126
1127	return ret;
1128}
1129
1130int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1131{
1132	struct dwc3_gadget_ep_cmd_params	params;
1133	struct dwc3				*dwc = dep->dwc;
1134	int					ret;
1135
1136	memset(&params, 0x00, sizeof(params));
1137
1138	if (value) {
1139		if (dep->number == 0 || dep->number == 1) {
1140			/*
1141			 * Whenever EP0 is stalled, we will restart
1142			 * the state machine, thus moving back to
1143			 * Setup Phase
1144			 */
1145			dwc->ep0state = EP0_SETUP_PHASE;
1146		}
1147
1148		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1149			DWC3_DEPCMD_SETSTALL, &params);
1150		if (ret)
1151			dev_err(dwc->dev, "failed to %s STALL on %s\n",
1152					value ? "set" : "clear",
1153					dep->name);
1154		else
1155			dep->flags |= DWC3_EP_STALL;
1156	} else {
1157		if (dep->flags & DWC3_EP_WEDGE)
1158			return 0;
1159
1160		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1161			DWC3_DEPCMD_CLEARSTALL, &params);
1162		if (ret)
1163			dev_err(dwc->dev, "failed to %s STALL on %s\n",
1164					value ? "set" : "clear",
1165					dep->name);
1166		else
1167			dep->flags &= ~DWC3_EP_STALL;
1168	}
1169
1170	return ret;
1171}
1172
1173static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1174{
1175	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1176	struct dwc3			*dwc = dep->dwc;
1177
1178	unsigned long			flags;
1179
1180	int				ret;
1181
1182	spin_lock_irqsave(&dwc->lock, flags);
1183
1184	if (usb_endpoint_xfer_isoc(dep->desc)) {
1185		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1186		ret = -EINVAL;
1187		goto out;
1188	}
1189
1190	ret = __dwc3_gadget_ep_set_halt(dep, value);
1191out:
1192	spin_unlock_irqrestore(&dwc->lock, flags);
1193
1194	return ret;
1195}
1196
1197static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1198{
1199	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1200
1201	dep->flags |= DWC3_EP_WEDGE;
1202
1203	return dwc3_gadget_ep_set_halt(ep, 1);
1204}
1205
1206/* -------------------------------------------------------------------------- */
1207
1208static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1209	.bLength	= USB_DT_ENDPOINT_SIZE,
1210	.bDescriptorType = USB_DT_ENDPOINT,
1211	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
1212};
1213
1214static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1215	.enable		= dwc3_gadget_ep0_enable,
1216	.disable	= dwc3_gadget_ep0_disable,
1217	.alloc_request	= dwc3_gadget_ep_alloc_request,
1218	.free_request	= dwc3_gadget_ep_free_request,
1219	.queue		= dwc3_gadget_ep0_queue,
1220	.dequeue	= dwc3_gadget_ep_dequeue,
1221	.set_halt	= dwc3_gadget_ep_set_halt,
1222	.set_wedge	= dwc3_gadget_ep_set_wedge,
1223};
1224
1225static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1226	.enable		= dwc3_gadget_ep_enable,
1227	.disable	= dwc3_gadget_ep_disable,
1228	.alloc_request	= dwc3_gadget_ep_alloc_request,
1229	.free_request	= dwc3_gadget_ep_free_request,
1230	.queue		= dwc3_gadget_ep_queue,
1231	.dequeue	= dwc3_gadget_ep_dequeue,
1232	.set_halt	= dwc3_gadget_ep_set_halt,
1233	.set_wedge	= dwc3_gadget_ep_set_wedge,
1234};
1235
1236/* -------------------------------------------------------------------------- */
1237
1238static int dwc3_gadget_get_frame(struct usb_gadget *g)
1239{
1240	struct dwc3		*dwc = gadget_to_dwc(g);
1241	u32			reg;
1242
1243	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1244	return DWC3_DSTS_SOFFN(reg);
1245}
1246
1247static int dwc3_gadget_wakeup(struct usb_gadget *g)
1248{
1249	struct dwc3		*dwc = gadget_to_dwc(g);
1250
1251	unsigned long		timeout;
1252	unsigned long		flags;
1253
1254	u32			reg;
1255
1256	int			ret = 0;
1257
1258	u8			link_state;
1259	u8			speed;
1260
1261	spin_lock_irqsave(&dwc->lock, flags);
1262
1263	/*
1264	 * According to the Databook Remote wakeup request should
1265	 * be issued only when the device is in early suspend state.
1266	 *
1267	 * We can check that via USB Link State bits in DSTS register.
1268	 */
1269	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1270
1271	speed = reg & DWC3_DSTS_CONNECTSPD;
1272	if (speed == DWC3_DSTS_SUPERSPEED) {
1273		dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1274		ret = -EINVAL;
1275		goto out;
1276	}
1277
1278	link_state = DWC3_DSTS_USBLNKST(reg);
1279
1280	switch (link_state) {
1281	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
1282	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
1283		break;
1284	default:
1285		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1286				link_state);
1287		ret = -EINVAL;
1288		goto out;
1289	}
1290
1291	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1292	if (ret < 0) {
1293		dev_err(dwc->dev, "failed to put link in Recovery\n");
1294		goto out;
1295	}
1296
1297	/* write zeroes to Link Change Request */
1298	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1299	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1300
1301	/* poll until Link State changes to ON */
1302	timeout = jiffies + msecs_to_jiffies(100);
1303
1304	while (!time_after(jiffies, timeout)) {
1305		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1306
1307		/* in HS, means ON */
1308		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1309			break;
1310	}
1311
1312	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1313		dev_err(dwc->dev, "failed to send remote wakeup\n");
1314		ret = -EINVAL;
1315	}
1316
1317out:
1318	spin_unlock_irqrestore(&dwc->lock, flags);
1319
1320	return ret;
1321}
1322
1323static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1324		int is_selfpowered)
1325{
1326	struct dwc3		*dwc = gadget_to_dwc(g);
1327
1328	dwc->is_selfpowered = !!is_selfpowered;
1329
1330	return 0;
1331}
1332
1333static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1334{
1335	u32			reg;
1336	u32			timeout = 500;
1337
1338	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1339	if (is_on) {
1340		reg &= ~DWC3_DCTL_TRGTULST_MASK;
1341		reg |= (DWC3_DCTL_RUN_STOP
1342				| DWC3_DCTL_TRGTULST_RX_DET);
1343	} else {
1344		reg &= ~DWC3_DCTL_RUN_STOP;
1345	}
1346
1347	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1348
1349	do {
1350		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1351		if (is_on) {
1352			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1353				break;
1354		} else {
1355			if (reg & DWC3_DSTS_DEVCTRLHLT)
1356				break;
1357		}
1358		timeout--;
1359		if (!timeout)
1360			break;
1361		udelay(1);
1362	} while (1);
1363
1364	dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1365			dwc->gadget_driver
1366			? dwc->gadget_driver->function : "no-function",
1367			is_on ? "connect" : "disconnect");
1368}
1369
1370static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1371{
1372	struct dwc3		*dwc = gadget_to_dwc(g);
1373	unsigned long		flags;
1374
1375	is_on = !!is_on;
1376
1377	spin_lock_irqsave(&dwc->lock, flags);
1378	dwc3_gadget_run_stop(dwc, is_on);
1379	spin_unlock_irqrestore(&dwc->lock, flags);
1380
1381	return 0;
1382}
1383
1384static int dwc3_gadget_start(struct usb_gadget *g,
1385		struct usb_gadget_driver *driver)
1386{
1387	struct dwc3		*dwc = gadget_to_dwc(g);
1388	struct dwc3_ep		*dep;
1389	unsigned long		flags;
1390	int			ret = 0;
1391	u32			reg;
1392
1393	spin_lock_irqsave(&dwc->lock, flags);
1394
1395	if (dwc->gadget_driver) {
1396		dev_err(dwc->dev, "%s is already bound to %s\n",
1397				dwc->gadget.name,
1398				dwc->gadget_driver->driver.name);
1399		ret = -EBUSY;
1400		goto err0;
1401	}
1402
1403	dwc->gadget_driver	= driver;
1404	dwc->gadget.dev.driver	= &driver->driver;
1405
1406	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1407	reg &= ~(DWC3_DCFG_SPEED_MASK);
1408	reg |= dwc->maximum_speed;
1409	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1410
1411	dwc->start_config_issued = false;
1412
1413	/* Start with SuperSpeed Default */
1414	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1415
1416	dep = dwc->eps[0];
1417	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
1418	if (ret) {
1419		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1420		goto err0;
1421	}
1422
1423	dep = dwc->eps[1];
1424	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
1425	if (ret) {
1426		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1427		goto err1;
1428	}
1429
1430	/* begin to receive SETUP packets */
1431	dwc->ep0state = EP0_SETUP_PHASE;
1432	dwc3_ep0_out_start(dwc);
1433
1434	spin_unlock_irqrestore(&dwc->lock, flags);
1435
1436	return 0;
1437
1438err1:
1439	__dwc3_gadget_ep_disable(dwc->eps[0]);
1440
1441err0:
1442	spin_unlock_irqrestore(&dwc->lock, flags);
1443
1444	return ret;
1445}
1446
1447static int dwc3_gadget_stop(struct usb_gadget *g,
1448		struct usb_gadget_driver *driver)
1449{
1450	struct dwc3		*dwc = gadget_to_dwc(g);
1451	unsigned long		flags;
1452
1453	spin_lock_irqsave(&dwc->lock, flags);
1454
1455	__dwc3_gadget_ep_disable(dwc->eps[0]);
1456	__dwc3_gadget_ep_disable(dwc->eps[1]);
1457
1458	dwc->gadget_driver	= NULL;
1459	dwc->gadget.dev.driver	= NULL;
1460
1461	spin_unlock_irqrestore(&dwc->lock, flags);
1462
1463	return 0;
1464}
1465static const struct usb_gadget_ops dwc3_gadget_ops = {
1466	.get_frame		= dwc3_gadget_get_frame,
1467	.wakeup			= dwc3_gadget_wakeup,
1468	.set_selfpowered	= dwc3_gadget_set_selfpowered,
1469	.pullup			= dwc3_gadget_pullup,
1470	.udc_start		= dwc3_gadget_start,
1471	.udc_stop		= dwc3_gadget_stop,
1472};
1473
1474/* -------------------------------------------------------------------------- */
1475
1476static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1477{
1478	struct dwc3_ep			*dep;
1479	u8				epnum;
1480
1481	INIT_LIST_HEAD(&dwc->gadget.ep_list);
1482
1483	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1484		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1485		if (!dep) {
1486			dev_err(dwc->dev, "can't allocate endpoint %d\n",
1487					epnum);
1488			return -ENOMEM;
1489		}
1490
1491		dep->dwc = dwc;
1492		dep->number = epnum;
1493		dwc->eps[epnum] = dep;
1494
1495		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1496				(epnum & 1) ? "in" : "out");
1497		dep->endpoint.name = dep->name;
1498		dep->direction = (epnum & 1);
1499
1500		if (epnum == 0 || epnum == 1) {
1501			dep->endpoint.maxpacket = 512;
1502			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1503			if (!epnum)
1504				dwc->gadget.ep0 = &dep->endpoint;
1505		} else {
1506			int		ret;
1507
1508			dep->endpoint.maxpacket = 1024;
1509			dep->endpoint.max_streams = 15;
1510			dep->endpoint.ops = &dwc3_gadget_ep_ops;
1511			list_add_tail(&dep->endpoint.ep_list,
1512					&dwc->gadget.ep_list);
1513
1514			ret = dwc3_alloc_trb_pool(dep);
1515			if (ret)
1516				return ret;
1517		}
1518
1519		INIT_LIST_HEAD(&dep->request_list);
1520		INIT_LIST_HEAD(&dep->req_queued);
1521	}
1522
1523	return 0;
1524}
1525
1526static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1527{
1528	struct dwc3_ep			*dep;
1529	u8				epnum;
1530
1531	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1532		dep = dwc->eps[epnum];
1533		dwc3_free_trb_pool(dep);
1534
1535		if (epnum != 0 && epnum != 1)
1536			list_del(&dep->endpoint.ep_list);
1537
1538		kfree(dep);
1539	}
1540}
1541
1542static void dwc3_gadget_release(struct device *dev)
1543{
1544	dev_dbg(dev, "%s\n", __func__);
1545}
1546
1547/* -------------------------------------------------------------------------- */
1548static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1549		const struct dwc3_event_depevt *event, int status)
1550{
1551	struct dwc3_request	*req;
1552	struct dwc3_trb		*trb;
1553	unsigned int		count;
1554	unsigned int		s_pkt = 0;
1555
1556	do {
1557		req = next_request(&dep->req_queued);
1558		if (!req) {
1559			WARN_ON_ONCE(1);
1560			return 1;
1561		}
1562
1563		trb = req->trb;
1564
1565		if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1566			/*
1567			 * We continue despite the error. There is not much we
1568			 * can do. If we don't clean it up we loop forever. If
1569			 * we skip the TRB then it gets overwritten after a
1570			 * while since we use them in a ring buffer. A BUG()
1571			 * would help. Lets hope that if this occurs, someone
1572			 * fixes the root cause instead of looking away :)
1573			 */
1574			dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1575					dep->name, req->trb);
1576		count = trb->size & DWC3_TRB_SIZE_MASK;
1577
1578		if (dep->direction) {
1579			if (count) {
1580				dev_err(dwc->dev, "incomplete IN transfer %s\n",
1581						dep->name);
1582				status = -ECONNRESET;
1583			}
1584		} else {
1585			if (count && (event->status & DEPEVT_STATUS_SHORT))
1586				s_pkt = 1;
1587		}
1588
1589		/*
1590		 * We assume here we will always receive the entire data block
1591		 * which we should receive. Meaning, if we program RX to
1592		 * receive 4K but we receive only 2K, we assume that's all we
1593		 * should receive and we simply bounce the request back to the
1594		 * gadget driver for further processing.
1595		 */
1596		req->request.actual += req->request.length - count;
1597		dwc3_gadget_giveback(dep, req, status);
1598		if (s_pkt)
1599			break;
1600		if ((event->status & DEPEVT_STATUS_LST) &&
1601				(trb->ctrl & DWC3_TRB_CTRL_LST))
1602			break;
1603		if ((event->status & DEPEVT_STATUS_IOC) &&
1604				(trb->ctrl & DWC3_TRB_CTRL_IOC))
1605			break;
1606	} while (1);
1607
1608	if ((event->status & DEPEVT_STATUS_IOC) &&
1609			(trb->ctrl & DWC3_TRB_CTRL_IOC))
1610		return 0;
1611	return 1;
1612}
1613
1614static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1615		struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1616		int start_new)
1617{
1618	unsigned		status = 0;
1619	int			clean_busy;
1620
1621	if (event->status & DEPEVT_STATUS_BUSERR)
1622		status = -ECONNRESET;
1623
1624	clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1625	if (clean_busy) {
1626		dep->flags &= ~DWC3_EP_BUSY;
1627		dep->res_trans_idx = 0;
1628	}
1629
1630	/*
1631	 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1632	 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1633	 */
1634	if (dwc->revision < DWC3_REVISION_183A) {
1635		u32		reg;
1636		int		i;
1637
1638		for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1639			struct dwc3_ep	*dep = dwc->eps[i];
1640
1641			if (!(dep->flags & DWC3_EP_ENABLED))
1642				continue;
1643
1644			if (!list_empty(&dep->req_queued))
1645				return;
1646		}
1647
1648		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1649		reg |= dwc->u1u2;
1650		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1651
1652		dwc->u1u2 = 0;
1653	}
1654}
1655
1656static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1657		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1658{
1659	u32 uf, mask;
1660
1661	if (list_empty(&dep->request_list)) {
1662		dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1663			dep->name);
1664		return;
1665	}
1666
1667	mask = ~(dep->interval - 1);
1668	uf = event->parameters & mask;
1669	/* 4 micro frames in the future */
1670	uf += dep->interval * 4;
1671
1672	__dwc3_gadget_kick_transfer(dep, uf, 1);
1673}
1674
1675static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1676		const struct dwc3_event_depevt *event)
1677{
1678	struct dwc3 *dwc = dep->dwc;
1679	struct dwc3_event_depevt mod_ev = *event;
1680
1681	/*
1682	 * We were asked to remove one request. It is possible that this
1683	 * request and a few others were started together and have the same
1684	 * transfer index. Since we stopped the complete endpoint we don't
1685	 * know how many requests were already completed (and not yet)
1686	 * reported and how could be done (later). We purge them all until
1687	 * the end of the list.
1688	 */
1689	mod_ev.status = DEPEVT_STATUS_LST;
1690	dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1691	dep->flags &= ~DWC3_EP_BUSY;
1692	/* pending requests are ignored and are queued on XferNotReady */
1693}
1694
1695static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1696		const struct dwc3_event_depevt *event)
1697{
1698	u32 param = event->parameters;
1699	u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1700
1701	switch (cmd_type) {
1702	case DWC3_DEPCMD_ENDTRANSFER:
1703		dwc3_process_ep_cmd_complete(dep, event);
1704		break;
1705	case DWC3_DEPCMD_STARTTRANSFER:
1706		dep->res_trans_idx = param & 0x7f;
1707		break;
1708	default:
1709		printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1710				__func__, cmd_type);
1711		break;
1712	};
1713}
1714
1715static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1716		const struct dwc3_event_depevt *event)
1717{
1718	struct dwc3_ep		*dep;
1719	u8			epnum = event->endpoint_number;
1720
1721	dep = dwc->eps[epnum];
1722
1723	dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1724			dwc3_ep_event_string(event->endpoint_event));
1725
1726	if (epnum == 0 || epnum == 1) {
1727		dwc3_ep0_interrupt(dwc, event);
1728		return;
1729	}
1730
1731	switch (event->endpoint_event) {
1732	case DWC3_DEPEVT_XFERCOMPLETE:
1733		if (usb_endpoint_xfer_isoc(dep->desc)) {
1734			dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1735					dep->name);
1736			return;
1737		}
1738
1739		dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1740		break;
1741	case DWC3_DEPEVT_XFERINPROGRESS:
1742		if (!usb_endpoint_xfer_isoc(dep->desc)) {
1743			dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1744					dep->name);
1745			return;
1746		}
1747
1748		dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1749		break;
1750	case DWC3_DEPEVT_XFERNOTREADY:
1751		if (usb_endpoint_xfer_isoc(dep->desc)) {
1752			dwc3_gadget_start_isoc(dwc, dep, event);
1753		} else {
1754			int ret;
1755
1756			dev_vdbg(dwc->dev, "%s: reason %s\n",
1757					dep->name, event->status &
1758					DEPEVT_STATUS_TRANSFER_ACTIVE
1759					? "Transfer Active"
1760					: "Transfer Not Active");
1761
1762			ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1763			if (!ret || ret == -EBUSY)
1764				return;
1765
1766			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1767					dep->name);
1768		}
1769
1770		break;
1771	case DWC3_DEPEVT_STREAMEVT:
1772		if (!usb_endpoint_xfer_bulk(dep->desc)) {
1773			dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1774					dep->name);
1775			return;
1776		}
1777
1778		switch (event->status) {
1779		case DEPEVT_STREAMEVT_FOUND:
1780			dev_vdbg(dwc->dev, "Stream %d found and started\n",
1781					event->parameters);
1782
1783			break;
1784		case DEPEVT_STREAMEVT_NOTFOUND:
1785			/* FALLTHROUGH */
1786		default:
1787			dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1788		}
1789		break;
1790	case DWC3_DEPEVT_RXTXFIFOEVT:
1791		dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1792		break;
1793	case DWC3_DEPEVT_EPCMDCMPLT:
1794		dwc3_ep_cmd_compl(dep, event);
1795		break;
1796	}
1797}
1798
1799static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1800{
1801	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1802		spin_unlock(&dwc->lock);
1803		dwc->gadget_driver->disconnect(&dwc->gadget);
1804		spin_lock(&dwc->lock);
1805	}
1806}
1807
1808static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1809{
1810	struct dwc3_ep *dep;
1811	struct dwc3_gadget_ep_cmd_params params;
1812	u32 cmd;
1813	int ret;
1814
1815	dep = dwc->eps[epnum];
1816
1817	WARN_ON(!dep->res_trans_idx);
1818	if (dep->res_trans_idx) {
1819		cmd = DWC3_DEPCMD_ENDTRANSFER;
1820		cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1821		cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1822		memset(&params, 0, sizeof(params));
1823		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1824		WARN_ON_ONCE(ret);
1825		dep->res_trans_idx = 0;
1826	}
1827}
1828
1829static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1830{
1831	u32 epnum;
1832
1833	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1834		struct dwc3_ep *dep;
1835
1836		dep = dwc->eps[epnum];
1837		if (!(dep->flags & DWC3_EP_ENABLED))
1838			continue;
1839
1840		dwc3_remove_requests(dwc, dep);
1841	}
1842}
1843
1844static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1845{
1846	u32 epnum;
1847
1848	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1849		struct dwc3_ep *dep;
1850		struct dwc3_gadget_ep_cmd_params params;
1851		int ret;
1852
1853		dep = dwc->eps[epnum];
1854
1855		if (!(dep->flags & DWC3_EP_STALL))
1856			continue;
1857
1858		dep->flags &= ~DWC3_EP_STALL;
1859
1860		memset(&params, 0, sizeof(params));
1861		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1862				DWC3_DEPCMD_CLEARSTALL, &params);
1863		WARN_ON_ONCE(ret);
1864	}
1865}
1866
1867static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1868{
1869	dev_vdbg(dwc->dev, "%s\n", __func__);
1870#if 0
1871	XXX
1872	U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1873	enable it before we can disable it.
1874
1875	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1876	reg &= ~DWC3_DCTL_INITU1ENA;
1877	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1878
1879	reg &= ~DWC3_DCTL_INITU2ENA;
1880	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1881#endif
1882
1883	dwc3_stop_active_transfers(dwc);
1884	dwc3_disconnect_gadget(dwc);
1885	dwc->start_config_issued = false;
1886
1887	dwc->gadget.speed = USB_SPEED_UNKNOWN;
1888	dwc->setup_packet_pending = false;
1889}
1890
1891static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1892{
1893	u32			reg;
1894
1895	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1896
1897	if (on)
1898		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1899	else
1900		reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1901
1902	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1903}
1904
1905static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1906{
1907	u32			reg;
1908
1909	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1910
1911	if (on)
1912		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1913	else
1914		reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1915
1916	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1917}
1918
1919static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1920{
1921	u32			reg;
1922
1923	dev_vdbg(dwc->dev, "%s\n", __func__);
1924
1925	/*
1926	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
1927	 * would cause a missing Disconnect Event if there's a
1928	 * pending Setup Packet in the FIFO.
1929	 *
1930	 * There's no suggested workaround on the official Bug
1931	 * report, which states that "unless the driver/application
1932	 * is doing any special handling of a disconnect event,
1933	 * there is no functional issue".
1934	 *
1935	 * Unfortunately, it turns out that we _do_ some special
1936	 * handling of a disconnect event, namely complete all
1937	 * pending transfers, notify gadget driver of the
1938	 * disconnection, and so on.
1939	 *
1940	 * Our suggested workaround is to follow the Disconnect
1941	 * Event steps here, instead, based on a setup_packet_pending
1942	 * flag. Such flag gets set whenever we have a XferNotReady
1943	 * event on EP0 and gets cleared on XferComplete for the
1944	 * same endpoint.
1945	 *
1946	 * Refers to:
1947	 *
1948	 * STAR#9000466709: RTL: Device : Disconnect event not
1949	 * generated if setup packet pending in FIFO
1950	 */
1951	if (dwc->revision < DWC3_REVISION_188A) {
1952		if (dwc->setup_packet_pending)
1953			dwc3_gadget_disconnect_interrupt(dwc);
1954	}
1955
1956	/* after reset -> Default State */
1957	dwc->dev_state = DWC3_DEFAULT_STATE;
1958
1959	/* Enable PHYs */
1960	dwc3_gadget_usb2_phy_power(dwc, true);
1961	dwc3_gadget_usb3_phy_power(dwc, true);
1962
1963	if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1964		dwc3_disconnect_gadget(dwc);
1965
1966	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1967	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1968	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1969	dwc->test_mode = false;
1970
1971	dwc3_stop_active_transfers(dwc);
1972	dwc3_clear_stall_all_ep(dwc);
1973	dwc->start_config_issued = false;
1974
1975	/* Reset device address to zero */
1976	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1977	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1978	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1979}
1980
1981static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1982{
1983	u32 reg;
1984	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1985
1986	/*
1987	 * We change the clock only at SS but I dunno why I would want to do
1988	 * this. Maybe it becomes part of the power saving plan.
1989	 */
1990
1991	if (speed != DWC3_DSTS_SUPERSPEED)
1992		return;
1993
1994	/*
1995	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
1996	 * each time on Connect Done.
1997	 */
1998	if (!usb30_clock)
1999		return;
2000
2001	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2002	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2003	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2004}
2005
2006static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
2007{
2008	switch (speed) {
2009	case USB_SPEED_SUPER:
2010		dwc3_gadget_usb2_phy_power(dwc, false);
2011		break;
2012	case USB_SPEED_HIGH:
2013	case USB_SPEED_FULL:
2014	case USB_SPEED_LOW:
2015		dwc3_gadget_usb3_phy_power(dwc, false);
2016		break;
2017	}
2018}
2019
2020static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2021{
2022	struct dwc3_gadget_ep_cmd_params params;
2023	struct dwc3_ep		*dep;
2024	int			ret;
2025	u32			reg;
2026	u8			speed;
2027
2028	dev_vdbg(dwc->dev, "%s\n", __func__);
2029
2030	memset(&params, 0x00, sizeof(params));
2031
2032	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2033	speed = reg & DWC3_DSTS_CONNECTSPD;
2034	dwc->speed = speed;
2035
2036	dwc3_update_ram_clk_sel(dwc, speed);
2037
2038	switch (speed) {
2039	case DWC3_DCFG_SUPERSPEED:
2040		/*
2041		 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2042		 * would cause a missing USB3 Reset event.
2043		 *
2044		 * In such situations, we should force a USB3 Reset
2045		 * event by calling our dwc3_gadget_reset_interrupt()
2046		 * routine.
2047		 *
2048		 * Refers to:
2049		 *
2050		 * STAR#9000483510: RTL: SS : USB3 reset event may
2051		 * not be generated always when the link enters poll
2052		 */
2053		if (dwc->revision < DWC3_REVISION_190A)
2054			dwc3_gadget_reset_interrupt(dwc);
2055
2056		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2057		dwc->gadget.ep0->maxpacket = 512;
2058		dwc->gadget.speed = USB_SPEED_SUPER;
2059		break;
2060	case DWC3_DCFG_HIGHSPEED:
2061		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2062		dwc->gadget.ep0->maxpacket = 64;
2063		dwc->gadget.speed = USB_SPEED_HIGH;
2064		break;
2065	case DWC3_DCFG_FULLSPEED2:
2066	case DWC3_DCFG_FULLSPEED1:
2067		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2068		dwc->gadget.ep0->maxpacket = 64;
2069		dwc->gadget.speed = USB_SPEED_FULL;
2070		break;
2071	case DWC3_DCFG_LOWSPEED:
2072		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2073		dwc->gadget.ep0->maxpacket = 8;
2074		dwc->gadget.speed = USB_SPEED_LOW;
2075		break;
2076	}
2077
2078	/* Disable unneded PHY */
2079	dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
2080
2081	dep = dwc->eps[0];
2082	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
2083	if (ret) {
2084		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2085		return;
2086	}
2087
2088	dep = dwc->eps[1];
2089	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
2090	if (ret) {
2091		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2092		return;
2093	}
2094
2095	/*
2096	 * Configure PHY via GUSB3PIPECTLn if required.
2097	 *
2098	 * Update GTXFIFOSIZn
2099	 *
2100	 * In both cases reset values should be sufficient.
2101	 */
2102}
2103
2104static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2105{
2106	dev_vdbg(dwc->dev, "%s\n", __func__);
2107
2108	/*
2109	 * TODO take core out of low power mode when that's
2110	 * implemented.
2111	 */
2112
2113	dwc->gadget_driver->resume(&dwc->gadget);
2114}
2115
2116static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2117		unsigned int evtinfo)
2118{
2119	enum dwc3_link_state	next = evtinfo & DWC3_LINK_STATE_MASK;
2120
2121	/*
2122	 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2123	 * on the link partner, the USB session might do multiple entry/exit
2124	 * of low power states before a transfer takes place.
2125	 *
2126	 * Due to this problem, we might experience lower throughput. The
2127	 * suggested workaround is to disable DCTL[12:9] bits if we're
2128	 * transitioning from U1/U2 to U0 and enable those bits again
2129	 * after a transfer completes and there are no pending transfers
2130	 * on any of the enabled endpoints.
2131	 *
2132	 * This is the first half of that workaround.
2133	 *
2134	 * Refers to:
2135	 *
2136	 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2137	 * core send LGO_Ux entering U0
2138	 */
2139	if (dwc->revision < DWC3_REVISION_183A) {
2140		if (next == DWC3_LINK_STATE_U0) {
2141			u32	u1u2;
2142			u32	reg;
2143
2144			switch (dwc->link_state) {
2145			case DWC3_LINK_STATE_U1:
2146			case DWC3_LINK_STATE_U2:
2147				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2148				u1u2 = reg & (DWC3_DCTL_INITU2ENA
2149						| DWC3_DCTL_ACCEPTU2ENA
2150						| DWC3_DCTL_INITU1ENA
2151						| DWC3_DCTL_ACCEPTU1ENA);
2152
2153				if (!dwc->u1u2)
2154					dwc->u1u2 = reg & u1u2;
2155
2156				reg &= ~u1u2;
2157
2158				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2159				break;
2160			default:
2161				/* do nothing */
2162				break;
2163			}
2164		}
2165	}
2166
2167	dwc->link_state = next;
2168
2169	dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
2170}
2171
2172static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2173		const struct dwc3_event_devt *event)
2174{
2175	switch (event->type) {
2176	case DWC3_DEVICE_EVENT_DISCONNECT:
2177		dwc3_gadget_disconnect_interrupt(dwc);
2178		break;
2179	case DWC3_DEVICE_EVENT_RESET:
2180		dwc3_gadget_reset_interrupt(dwc);
2181		break;
2182	case DWC3_DEVICE_EVENT_CONNECT_DONE:
2183		dwc3_gadget_conndone_interrupt(dwc);
2184		break;
2185	case DWC3_DEVICE_EVENT_WAKEUP:
2186		dwc3_gadget_wakeup_interrupt(dwc);
2187		break;
2188	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2189		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2190		break;
2191	case DWC3_DEVICE_EVENT_EOPF:
2192		dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2193		break;
2194	case DWC3_DEVICE_EVENT_SOF:
2195		dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2196		break;
2197	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2198		dev_vdbg(dwc->dev, "Erratic Error\n");
2199		break;
2200	case DWC3_DEVICE_EVENT_CMD_CMPL:
2201		dev_vdbg(dwc->dev, "Command Complete\n");
2202		break;
2203	case DWC3_DEVICE_EVENT_OVERFLOW:
2204		dev_vdbg(dwc->dev, "Overflow\n");
2205		break;
2206	default:
2207		dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2208	}
2209}
2210
2211static void dwc3_process_event_entry(struct dwc3 *dwc,
2212		const union dwc3_event *event)
2213{
2214	/* Endpoint IRQ, handle it and return early */
2215	if (event->type.is_devspec == 0) {
2216		/* depevt */
2217		return dwc3_endpoint_interrupt(dwc, &event->depevt);
2218	}
2219
2220	switch (event->type.type) {
2221	case DWC3_EVENT_TYPE_DEV:
2222		dwc3_gadget_interrupt(dwc, &event->devt);
2223		break;
2224	/* REVISIT what to do with Carkit and I2C events ? */
2225	default:
2226		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2227	}
2228}
2229
2230static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2231{
2232	struct dwc3_event_buffer *evt;
2233	int left;
2234	u32 count;
2235
2236	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2237	count &= DWC3_GEVNTCOUNT_MASK;
2238	if (!count)
2239		return IRQ_NONE;
2240
2241	evt = dwc->ev_buffs[buf];
2242	left = count;
2243
2244	while (left > 0) {
2245		union dwc3_event event;
2246
2247		event.raw = *(u32 *) (evt->buf + evt->lpos);
2248
2249		dwc3_process_event_entry(dwc, &event);
2250		/*
2251		 * XXX we wrap around correctly to the next entry as almost all
2252		 * entries are 4 bytes in size. There is one entry which has 12
2253		 * bytes which is a regular entry followed by 8 bytes data. ATM
2254		 * I don't know how things are organized if were get next to the
2255		 * a boundary so I worry about that once we try to handle that.
2256		 */
2257		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2258		left -= 4;
2259
2260		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2261	}
2262
2263	return IRQ_HANDLED;
2264}
2265
2266static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2267{
2268	struct dwc3			*dwc = _dwc;
2269	int				i;
2270	irqreturn_t			ret = IRQ_NONE;
2271
2272	spin_lock(&dwc->lock);
2273
2274	for (i = 0; i < dwc->num_event_buffers; i++) {
2275		irqreturn_t status;
2276
2277		status = dwc3_process_event_buf(dwc, i);
2278		if (status == IRQ_HANDLED)
2279			ret = status;
2280	}
2281
2282	spin_unlock(&dwc->lock);
2283
2284	return ret;
2285}
2286
2287/**
2288 * dwc3_gadget_init - Initializes gadget related registers
2289 * @dwc: pointer to our controller context structure
2290 *
2291 * Returns 0 on success otherwise negative errno.
2292 */
2293int __devinit dwc3_gadget_init(struct dwc3 *dwc)
2294{
2295	u32					reg;
2296	int					ret;
2297	int					irq;
2298
2299	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2300			&dwc->ctrl_req_addr, GFP_KERNEL);
2301	if (!dwc->ctrl_req) {
2302		dev_err(dwc->dev, "failed to allocate ctrl request\n");
2303		ret = -ENOMEM;
2304		goto err0;
2305	}
2306
2307	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2308			&dwc->ep0_trb_addr, GFP_KERNEL);
2309	if (!dwc->ep0_trb) {
2310		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2311		ret = -ENOMEM;
2312		goto err1;
2313	}
2314
2315	dwc->setup_buf = dma_alloc_coherent(dwc->dev,
2316			sizeof(*dwc->setup_buf) * 2,
2317			&dwc->setup_buf_addr, GFP_KERNEL);
2318	if (!dwc->setup_buf) {
2319		dev_err(dwc->dev, "failed to allocate setup buffer\n");
2320		ret = -ENOMEM;
2321		goto err2;
2322	}
2323
2324	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2325			512, &dwc->ep0_bounce_addr, GFP_KERNEL);
2326	if (!dwc->ep0_bounce) {
2327		dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2328		ret = -ENOMEM;
2329		goto err3;
2330	}
2331
2332	dev_set_name(&dwc->gadget.dev, "gadget");
2333
2334	dwc->gadget.ops			= &dwc3_gadget_ops;
2335	dwc->gadget.max_speed		= USB_SPEED_SUPER;
2336	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
2337	dwc->gadget.dev.parent		= dwc->dev;
2338	dwc->gadget.sg_supported	= true;
2339
2340	dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
2341
2342	dwc->gadget.dev.dma_parms	= dwc->dev->dma_parms;
2343	dwc->gadget.dev.dma_mask	= dwc->dev->dma_mask;
2344	dwc->gadget.dev.release		= dwc3_gadget_release;
2345	dwc->gadget.name		= "dwc3-gadget";
2346
2347	/*
2348	 * REVISIT: Here we should clear all pending IRQs to be
2349	 * sure we're starting from a well known location.
2350	 */
2351
2352	ret = dwc3_gadget_init_endpoints(dwc);
2353	if (ret)
2354		goto err4;
2355
2356	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2357
2358	ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
2359			"dwc3", dwc);
2360	if (ret) {
2361		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2362				irq, ret);
2363		goto err5;
2364	}
2365
2366	/* Enable all but Start and End of Frame IRQs */
2367	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
2368			DWC3_DEVTEN_EVNTOVERFLOWEN |
2369			DWC3_DEVTEN_CMDCMPLTEN |
2370			DWC3_DEVTEN_ERRTICERREN |
2371			DWC3_DEVTEN_WKUPEVTEN |
2372			DWC3_DEVTEN_ULSTCNGEN |
2373			DWC3_DEVTEN_CONNECTDONEEN |
2374			DWC3_DEVTEN_USBRSTEN |
2375			DWC3_DEVTEN_DISCONNEVTEN);
2376	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2377
2378	ret = device_register(&dwc->gadget.dev);
2379	if (ret) {
2380		dev_err(dwc->dev, "failed to register gadget device\n");
2381		put_device(&dwc->gadget.dev);
2382		goto err6;
2383	}
2384
2385	ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2386	if (ret) {
2387		dev_err(dwc->dev, "failed to register udc\n");
2388		goto err7;
2389	}
2390
2391	return 0;
2392
2393err7:
2394	device_unregister(&dwc->gadget.dev);
2395
2396err6:
2397	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2398	free_irq(irq, dwc);
2399
2400err5:
2401	dwc3_gadget_free_endpoints(dwc);
2402
2403err4:
2404	dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2405			dwc->ep0_bounce_addr);
2406
2407err3:
2408	dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2409			dwc->setup_buf, dwc->setup_buf_addr);
2410
2411err2:
2412	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2413			dwc->ep0_trb, dwc->ep0_trb_addr);
2414
2415err1:
2416	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2417			dwc->ctrl_req, dwc->ctrl_req_addr);
2418
2419err0:
2420	return ret;
2421}
2422
2423void dwc3_gadget_exit(struct dwc3 *dwc)
2424{
2425	int			irq;
2426
2427	usb_del_gadget_udc(&dwc->gadget);
2428	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2429
2430	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2431	free_irq(irq, dwc);
2432
2433	dwc3_gadget_free_endpoints(dwc);
2434
2435	dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2436			dwc->ep0_bounce_addr);
2437
2438	dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2439			dwc->setup_buf, dwc->setup_buf_addr);
2440
2441	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2442			dwc->ep0_trb, dwc->ep0_trb_addr);
2443
2444	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2445			dwc->ctrl_req, dwc->ctrl_req_addr);
2446
2447	device_unregister(&dwc->gadget.dev);
2448}
2449