mv_udc_core.c revision 5e6c86b017691230b6b47f19b7d5449997e8a0b8
1/*
2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3 * Author: Chao Xie <chao.xie@marvell.com>
4 *	   Neil Zhang <zhangwm@marvell.com>
5 *
6 * This program is free software; you can redistribute  it and/or modify it
7 * under  the terms of  the GNU General  Public License as published by the
8 * Free Software Foundation;  either version 2 of the  License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/dmapool.h>
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/ioport.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/timer.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/moduleparam.h>
27#include <linux/device.h>
28#include <linux/usb/ch9.h>
29#include <linux/usb/gadget.h>
30#include <linux/usb/otg.h>
31#include <linux/pm.h>
32#include <linux/io.h>
33#include <linux/irq.h>
34#include <linux/platform_device.h>
35#include <linux/clk.h>
36#include <linux/platform_data/mv_usb.h>
37#include <asm/system.h>
38#include <asm/unaligned.h>
39
40#include "mv_udc.h"
41
42#define DRIVER_DESC		"Marvell PXA USB Device Controller driver"
43#define DRIVER_VERSION		"8 Nov 2010"
44
45#define ep_dir(ep)	(((ep)->ep_num == 0) ? \
46				((ep)->udc->ep0_dir) : ((ep)->direction))
47
48/* timeout value -- usec */
49#define RESET_TIMEOUT		10000
50#define FLUSH_TIMEOUT		10000
51#define EPSTATUS_TIMEOUT	10000
52#define PRIME_TIMEOUT		10000
53#define READSAFE_TIMEOUT	1000
54#define DTD_TIMEOUT		1000
55
56#define LOOPS_USEC_SHIFT	4
57#define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
58#define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
59
60static DECLARE_COMPLETION(release_done);
61
62static const char driver_name[] = "mv_udc";
63static const char driver_desc[] = DRIVER_DESC;
64
65/* controller device global variable */
66static struct mv_udc	*the_controller;
67int mv_usb_otgsc;
68
69static void nuke(struct mv_ep *ep, int status);
70static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
71
72/* for endpoint 0 operations */
73static const struct usb_endpoint_descriptor mv_ep0_desc = {
74	.bLength =		USB_DT_ENDPOINT_SIZE,
75	.bDescriptorType =	USB_DT_ENDPOINT,
76	.bEndpointAddress =	0,
77	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
78	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
79};
80
81static void ep0_reset(struct mv_udc *udc)
82{
83	struct mv_ep *ep;
84	u32 epctrlx;
85	int i = 0;
86
87	/* ep0 in and out */
88	for (i = 0; i < 2; i++) {
89		ep = &udc->eps[i];
90		ep->udc = udc;
91
92		/* ep0 dQH */
93		ep->dqh = &udc->ep_dqh[i];
94
95		/* configure ep0 endpoint capabilities in dQH */
96		ep->dqh->max_packet_length =
97			(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
98			| EP_QUEUE_HEAD_IOS;
99
100		ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
101
102		epctrlx = readl(&udc->op_regs->epctrlx[0]);
103		if (i) {	/* TX */
104			epctrlx |= EPCTRL_TX_ENABLE
105				| (USB_ENDPOINT_XFER_CONTROL
106					<< EPCTRL_TX_EP_TYPE_SHIFT);
107
108		} else {	/* RX */
109			epctrlx |= EPCTRL_RX_ENABLE
110				| (USB_ENDPOINT_XFER_CONTROL
111					<< EPCTRL_RX_EP_TYPE_SHIFT);
112		}
113
114		writel(epctrlx, &udc->op_regs->epctrlx[0]);
115	}
116}
117
118/* protocol ep0 stall, will automatically be cleared on new transaction */
119static void ep0_stall(struct mv_udc *udc)
120{
121	u32	epctrlx;
122
123	/* set TX and RX to stall */
124	epctrlx = readl(&udc->op_regs->epctrlx[0]);
125	epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
126	writel(epctrlx, &udc->op_regs->epctrlx[0]);
127
128	/* update ep0 state */
129	udc->ep0_state = WAIT_FOR_SETUP;
130	udc->ep0_dir = EP_DIR_OUT;
131}
132
133static int process_ep_req(struct mv_udc *udc, int index,
134	struct mv_req *curr_req)
135{
136	struct mv_dtd	*curr_dtd;
137	struct mv_dqh	*curr_dqh;
138	int td_complete, actual, remaining_length;
139	int i, direction;
140	int retval = 0;
141	u32 errors;
142	u32 bit_pos;
143
144	curr_dqh = &udc->ep_dqh[index];
145	direction = index % 2;
146
147	curr_dtd = curr_req->head;
148	td_complete = 0;
149	actual = curr_req->req.length;
150
151	for (i = 0; i < curr_req->dtd_count; i++) {
152		if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
153			dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
154				udc->eps[index].name);
155			return 1;
156		}
157
158		errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
159		if (!errors) {
160			remaining_length =
161				(curr_dtd->size_ioc_sts	& DTD_PACKET_SIZE)
162					>> DTD_LENGTH_BIT_POS;
163			actual -= remaining_length;
164
165			if (remaining_length) {
166				if (direction) {
167					dev_dbg(&udc->dev->dev,
168						"TX dTD remains data\n");
169					retval = -EPROTO;
170					break;
171				} else
172					break;
173			}
174		} else {
175			dev_info(&udc->dev->dev,
176				"complete_tr error: ep=%d %s: error = 0x%x\n",
177				index >> 1, direction ? "SEND" : "RECV",
178				errors);
179			if (errors & DTD_STATUS_HALTED) {
180				/* Clear the errors and Halt condition */
181				curr_dqh->size_ioc_int_sts &= ~errors;
182				retval = -EPIPE;
183			} else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
184				retval = -EPROTO;
185			} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
186				retval = -EILSEQ;
187			}
188		}
189		if (i != curr_req->dtd_count - 1)
190			curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
191	}
192	if (retval)
193		return retval;
194
195	if (direction == EP_DIR_OUT)
196		bit_pos = 1 << curr_req->ep->ep_num;
197	else
198		bit_pos = 1 << (16 + curr_req->ep->ep_num);
199
200	while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
201		if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
202			while (readl(&udc->op_regs->epstatus) & bit_pos)
203				udelay(1);
204			break;
205		}
206		udelay(1);
207	}
208
209	curr_req->req.actual = actual;
210
211	return 0;
212}
213
214/*
215 * done() - retire a request; caller blocked irqs
216 * @status : request status to be set, only works when
217 * request is still in progress.
218 */
219static void done(struct mv_ep *ep, struct mv_req *req, int status)
220{
221	struct mv_udc *udc = NULL;
222	unsigned char stopped = ep->stopped;
223	struct mv_dtd *curr_td, *next_td;
224	int j;
225
226	udc = (struct mv_udc *)ep->udc;
227	/* Removed the req from fsl_ep->queue */
228	list_del_init(&req->queue);
229
230	/* req.status should be set as -EINPROGRESS in ep_queue() */
231	if (req->req.status == -EINPROGRESS)
232		req->req.status = status;
233	else
234		status = req->req.status;
235
236	/* Free dtd for the request */
237	next_td = req->head;
238	for (j = 0; j < req->dtd_count; j++) {
239		curr_td = next_td;
240		if (j != req->dtd_count - 1)
241			next_td = curr_td->next_dtd_virt;
242		dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
243	}
244
245	if (req->mapped) {
246		dma_unmap_single(ep->udc->gadget.dev.parent,
247			req->req.dma, req->req.length,
248			((ep_dir(ep) == EP_DIR_IN) ?
249				DMA_TO_DEVICE : DMA_FROM_DEVICE));
250		req->req.dma = DMA_ADDR_INVALID;
251		req->mapped = 0;
252	} else
253		dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
254			req->req.dma, req->req.length,
255			((ep_dir(ep) == EP_DIR_IN) ?
256				DMA_TO_DEVICE : DMA_FROM_DEVICE));
257
258	if (status && (status != -ESHUTDOWN))
259		dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
260			ep->ep.name, &req->req, status,
261			req->req.actual, req->req.length);
262
263	ep->stopped = 1;
264
265	spin_unlock(&ep->udc->lock);
266	/*
267	 * complete() is from gadget layer,
268	 * eg fsg->bulk_in_complete()
269	 */
270	if (req->req.complete)
271		req->req.complete(&ep->ep, &req->req);
272
273	spin_lock(&ep->udc->lock);
274	ep->stopped = stopped;
275}
276
277static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
278{
279	struct mv_udc *udc;
280	struct mv_dqh *dqh;
281	u32 bit_pos, direction;
282	u32 usbcmd, epstatus;
283	unsigned int loops;
284	int retval = 0;
285
286	udc = ep->udc;
287	direction = ep_dir(ep);
288	dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
289	bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
290
291	/* check if the pipe is empty */
292	if (!(list_empty(&ep->queue))) {
293		struct mv_req *lastreq;
294		lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
295		lastreq->tail->dtd_next =
296			req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
297
298		wmb();
299
300		if (readl(&udc->op_regs->epprime) & bit_pos)
301			goto done;
302
303		loops = LOOPS(READSAFE_TIMEOUT);
304		while (1) {
305			/* start with setting the semaphores */
306			usbcmd = readl(&udc->op_regs->usbcmd);
307			usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
308			writel(usbcmd, &udc->op_regs->usbcmd);
309
310			/* read the endpoint status */
311			epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
312
313			/*
314			 * Reread the ATDTW semaphore bit to check if it is
315			 * cleared. When hardware see a hazard, it will clear
316			 * the bit or else we remain set to 1 and we can
317			 * proceed with priming of endpoint if not already
318			 * primed.
319			 */
320			if (readl(&udc->op_regs->usbcmd)
321				& USBCMD_ATDTW_TRIPWIRE_SET)
322				break;
323
324			loops--;
325			if (loops == 0) {
326				dev_err(&udc->dev->dev,
327					"Timeout for ATDTW_TRIPWIRE...\n");
328				retval = -ETIME;
329				goto done;
330			}
331			udelay(LOOPS_USEC);
332		}
333
334		/* Clear the semaphore */
335		usbcmd = readl(&udc->op_regs->usbcmd);
336		usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
337		writel(usbcmd, &udc->op_regs->usbcmd);
338
339		if (epstatus)
340			goto done;
341	}
342
343	/* Write dQH next pointer and terminate bit to 0 */
344	dqh->next_dtd_ptr = req->head->td_dma
345				& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
346
347	/* clear active and halt bit, in case set from a previous error */
348	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
349
350	/* Ensure that updates to the QH will occure before priming. */
351	wmb();
352
353	/* Prime the Endpoint */
354	writel(bit_pos, &udc->op_regs->epprime);
355
356done:
357	return retval;
358}
359
360
361static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
362		dma_addr_t *dma, int *is_last)
363{
364	u32 temp;
365	struct mv_dtd *dtd;
366	struct mv_udc *udc;
367
368	/* how big will this transfer be? */
369	*length = min(req->req.length - req->req.actual,
370			(unsigned)EP_MAX_LENGTH_TRANSFER);
371
372	udc = req->ep->udc;
373
374	/*
375	 * Be careful that no _GFP_HIGHMEM is set,
376	 * or we can not use dma_to_virt
377	 */
378	dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
379	if (dtd == NULL)
380		return dtd;
381
382	dtd->td_dma = *dma;
383	/* initialize buffer page pointers */
384	temp = (u32)(req->req.dma + req->req.actual);
385	dtd->buff_ptr0 = cpu_to_le32(temp);
386	temp &= ~0xFFF;
387	dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
388	dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
389	dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
390	dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
391
392	req->req.actual += *length;
393
394	/* zlp is needed if req->req.zero is set */
395	if (req->req.zero) {
396		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
397			*is_last = 1;
398		else
399			*is_last = 0;
400	} else if (req->req.length == req->req.actual)
401		*is_last = 1;
402	else
403		*is_last = 0;
404
405	/* Fill in the transfer size; set active bit */
406	temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
407
408	/* Enable interrupt for the last dtd of a request */
409	if (*is_last && !req->req.no_interrupt)
410		temp |= DTD_IOC;
411
412	dtd->size_ioc_sts = temp;
413
414	mb();
415
416	return dtd;
417}
418
419/* generate dTD linked list for a request */
420static int req_to_dtd(struct mv_req *req)
421{
422	unsigned count;
423	int is_last, is_first = 1;
424	struct mv_dtd *dtd, *last_dtd = NULL;
425	struct mv_udc *udc;
426	dma_addr_t dma;
427
428	udc = req->ep->udc;
429
430	do {
431		dtd = build_dtd(req, &count, &dma, &is_last);
432		if (dtd == NULL)
433			return -ENOMEM;
434
435		if (is_first) {
436			is_first = 0;
437			req->head = dtd;
438		} else {
439			last_dtd->dtd_next = dma;
440			last_dtd->next_dtd_virt = dtd;
441		}
442		last_dtd = dtd;
443		req->dtd_count++;
444	} while (!is_last);
445
446	/* set terminate bit to 1 for the last dTD */
447	dtd->dtd_next = DTD_NEXT_TERMINATE;
448
449	req->tail = dtd;
450
451	return 0;
452}
453
454static int mv_ep_enable(struct usb_ep *_ep,
455		const struct usb_endpoint_descriptor *desc)
456{
457	struct mv_udc *udc;
458	struct mv_ep *ep;
459	struct mv_dqh *dqh;
460	u16 max = 0;
461	u32 bit_pos, epctrlx, direction;
462	unsigned char zlt = 0, ios = 0, mult = 0;
463	unsigned long flags;
464
465	ep = container_of(_ep, struct mv_ep, ep);
466	udc = ep->udc;
467
468	if (!_ep || !desc || ep->desc
469			|| desc->bDescriptorType != USB_DT_ENDPOINT)
470		return -EINVAL;
471
472	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
473		return -ESHUTDOWN;
474
475	direction = ep_dir(ep);
476	max = usb_endpoint_maxp(desc);
477
478	/*
479	 * disable HW zero length termination select
480	 * driver handles zero length packet through req->req.zero
481	 */
482	zlt = 1;
483
484	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
485
486	/* Check if the Endpoint is Primed */
487	if ((readl(&udc->op_regs->epprime) & bit_pos)
488		|| (readl(&udc->op_regs->epstatus) & bit_pos)) {
489		dev_info(&udc->dev->dev,
490			"ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
491			" ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
492			(unsigned)ep->ep_num, direction ? "SEND" : "RECV",
493			(unsigned)readl(&udc->op_regs->epprime),
494			(unsigned)readl(&udc->op_regs->epstatus),
495			(unsigned)bit_pos);
496		goto en_done;
497	}
498	/* Set the max packet length, interrupt on Setup and Mult fields */
499	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
500	case USB_ENDPOINT_XFER_BULK:
501		zlt = 1;
502		mult = 0;
503		break;
504	case USB_ENDPOINT_XFER_CONTROL:
505		ios = 1;
506	case USB_ENDPOINT_XFER_INT:
507		mult = 0;
508		break;
509	case USB_ENDPOINT_XFER_ISOC:
510		/* Calculate transactions needed for high bandwidth iso */
511		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
512		max = max & 0x7ff;	/* bit 0~10 */
513		/* 3 transactions at most */
514		if (mult > 3)
515			goto en_done;
516		break;
517	default:
518		goto en_done;
519	}
520
521	spin_lock_irqsave(&udc->lock, flags);
522	/* Get the endpoint queue head address */
523	dqh = ep->dqh;
524	dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
525		| (mult << EP_QUEUE_HEAD_MULT_POS)
526		| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
527		| (ios ? EP_QUEUE_HEAD_IOS : 0);
528	dqh->next_dtd_ptr = 1;
529	dqh->size_ioc_int_sts = 0;
530
531	ep->ep.maxpacket = max;
532	ep->desc = desc;
533	ep->stopped = 0;
534
535	/* Enable the endpoint for Rx or Tx and set the endpoint type */
536	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
537	if (direction == EP_DIR_IN) {
538		epctrlx &= ~EPCTRL_TX_ALL_MASK;
539		epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
540			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
541				<< EPCTRL_TX_EP_TYPE_SHIFT);
542	} else {
543		epctrlx &= ~EPCTRL_RX_ALL_MASK;
544		epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
545			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
546				<< EPCTRL_RX_EP_TYPE_SHIFT);
547	}
548	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
549
550	/*
551	 * Implement Guideline (GL# USB-7) The unused endpoint type must
552	 * be programmed to bulk.
553	 */
554	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
555	if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
556		epctrlx |= (USB_ENDPOINT_XFER_BULK
557				<< EPCTRL_RX_EP_TYPE_SHIFT);
558		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
559	}
560
561	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
562	if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
563		epctrlx |= (USB_ENDPOINT_XFER_BULK
564				<< EPCTRL_TX_EP_TYPE_SHIFT);
565		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
566	}
567
568	spin_unlock_irqrestore(&udc->lock, flags);
569
570	return 0;
571en_done:
572	return -EINVAL;
573}
574
575static int  mv_ep_disable(struct usb_ep *_ep)
576{
577	struct mv_udc *udc;
578	struct mv_ep *ep;
579	struct mv_dqh *dqh;
580	u32 bit_pos, epctrlx, direction;
581	unsigned long flags;
582
583	ep = container_of(_ep, struct mv_ep, ep);
584	if ((_ep == NULL) || !ep->desc)
585		return -EINVAL;
586
587	udc = ep->udc;
588
589	/* Get the endpoint queue head address */
590	dqh = ep->dqh;
591
592	spin_lock_irqsave(&udc->lock, flags);
593
594	direction = ep_dir(ep);
595	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
596
597	/* Reset the max packet length and the interrupt on Setup */
598	dqh->max_packet_length = 0;
599
600	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
601	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
602	epctrlx &= ~((direction == EP_DIR_IN)
603			? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
604			: (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
605	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
606
607	/* nuke all pending requests (does flush) */
608	nuke(ep, -ESHUTDOWN);
609
610	ep->desc = NULL;
611	ep->stopped = 1;
612
613	spin_unlock_irqrestore(&udc->lock, flags);
614
615	return 0;
616}
617
618static struct usb_request *
619mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
620{
621	struct mv_req *req = NULL;
622
623	req = kzalloc(sizeof *req, gfp_flags);
624	if (!req)
625		return NULL;
626
627	req->req.dma = DMA_ADDR_INVALID;
628	INIT_LIST_HEAD(&req->queue);
629
630	return &req->req;
631}
632
633static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
634{
635	struct mv_req *req = NULL;
636
637	req = container_of(_req, struct mv_req, req);
638
639	if (_req)
640		kfree(req);
641}
642
643static void mv_ep_fifo_flush(struct usb_ep *_ep)
644{
645	struct mv_udc *udc;
646	u32 bit_pos, direction;
647	struct mv_ep *ep;
648	unsigned int loops;
649
650	if (!_ep)
651		return;
652
653	ep = container_of(_ep, struct mv_ep, ep);
654	if (!ep->desc)
655		return;
656
657	udc = ep->udc;
658	direction = ep_dir(ep);
659
660	if (ep->ep_num == 0)
661		bit_pos = (1 << 16) | 1;
662	else if (direction == EP_DIR_OUT)
663		bit_pos = 1 << ep->ep_num;
664	else
665		bit_pos = 1 << (16 + ep->ep_num);
666
667	loops = LOOPS(EPSTATUS_TIMEOUT);
668	do {
669		unsigned int inter_loops;
670
671		if (loops == 0) {
672			dev_err(&udc->dev->dev,
673				"TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
674				(unsigned)readl(&udc->op_regs->epstatus),
675				(unsigned)bit_pos);
676			return;
677		}
678		/* Write 1 to the Flush register */
679		writel(bit_pos, &udc->op_regs->epflush);
680
681		/* Wait until flushing completed */
682		inter_loops = LOOPS(FLUSH_TIMEOUT);
683		while (readl(&udc->op_regs->epflush)) {
684			/*
685			 * ENDPTFLUSH bit should be cleared to indicate this
686			 * operation is complete
687			 */
688			if (inter_loops == 0) {
689				dev_err(&udc->dev->dev,
690					"TIMEOUT for ENDPTFLUSH=0x%x,"
691					"bit_pos=0x%x\n",
692					(unsigned)readl(&udc->op_regs->epflush),
693					(unsigned)bit_pos);
694				return;
695			}
696			inter_loops--;
697			udelay(LOOPS_USEC);
698		}
699		loops--;
700	} while (readl(&udc->op_regs->epstatus) & bit_pos);
701}
702
703/* queues (submits) an I/O request to an endpoint */
704static int
705mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
706{
707	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
708	struct mv_req *req = container_of(_req, struct mv_req, req);
709	struct mv_udc *udc = ep->udc;
710	unsigned long flags;
711
712	/* catch various bogus parameters */
713	if (!_req || !req->req.complete || !req->req.buf
714			|| !list_empty(&req->queue)) {
715		dev_err(&udc->dev->dev, "%s, bad params", __func__);
716		return -EINVAL;
717	}
718	if (unlikely(!_ep || !ep->desc)) {
719		dev_err(&udc->dev->dev, "%s, bad ep", __func__);
720		return -EINVAL;
721	}
722	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
723		if (req->req.length > ep->ep.maxpacket)
724			return -EMSGSIZE;
725	}
726
727	udc = ep->udc;
728	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
729		return -ESHUTDOWN;
730
731	req->ep = ep;
732
733	/* map virtual address to hardware */
734	if (req->req.dma == DMA_ADDR_INVALID) {
735		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
736					req->req.buf,
737					req->req.length, ep_dir(ep)
738						? DMA_TO_DEVICE
739						: DMA_FROM_DEVICE);
740		req->mapped = 1;
741	} else {
742		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
743					req->req.dma, req->req.length,
744					ep_dir(ep)
745						? DMA_TO_DEVICE
746						: DMA_FROM_DEVICE);
747		req->mapped = 0;
748	}
749
750	req->req.status = -EINPROGRESS;
751	req->req.actual = 0;
752	req->dtd_count = 0;
753
754	spin_lock_irqsave(&udc->lock, flags);
755
756	/* build dtds and push them to device queue */
757	if (!req_to_dtd(req)) {
758		int retval;
759		retval = queue_dtd(ep, req);
760		if (retval) {
761			spin_unlock_irqrestore(&udc->lock, flags);
762			return retval;
763		}
764	} else {
765		spin_unlock_irqrestore(&udc->lock, flags);
766		return -ENOMEM;
767	}
768
769	/* Update ep0 state */
770	if (ep->ep_num == 0)
771		udc->ep0_state = DATA_STATE_XMIT;
772
773	/* irq handler advances the queue */
774	if (req != NULL)
775		list_add_tail(&req->queue, &ep->queue);
776	spin_unlock_irqrestore(&udc->lock, flags);
777
778	return 0;
779}
780
781static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
782{
783	struct mv_dqh *dqh = ep->dqh;
784	u32 bit_pos;
785
786	/* Write dQH next pointer and terminate bit to 0 */
787	dqh->next_dtd_ptr = req->head->td_dma
788		& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
789
790	/* clear active and halt bit, in case set from a previous error */
791	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
792
793	/* Ensure that updates to the QH will occure before priming. */
794	wmb();
795
796	bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
797
798	/* Prime the Endpoint */
799	writel(bit_pos, &ep->udc->op_regs->epprime);
800}
801
802/* dequeues (cancels, unlinks) an I/O request from an endpoint */
803static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
804{
805	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
806	struct mv_req *req;
807	struct mv_udc *udc = ep->udc;
808	unsigned long flags;
809	int stopped, ret = 0;
810	u32 epctrlx;
811
812	if (!_ep || !_req)
813		return -EINVAL;
814
815	spin_lock_irqsave(&ep->udc->lock, flags);
816	stopped = ep->stopped;
817
818	/* Stop the ep before we deal with the queue */
819	ep->stopped = 1;
820	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
821	if (ep_dir(ep) == EP_DIR_IN)
822		epctrlx &= ~EPCTRL_TX_ENABLE;
823	else
824		epctrlx &= ~EPCTRL_RX_ENABLE;
825	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
826
827	/* make sure it's actually queued on this endpoint */
828	list_for_each_entry(req, &ep->queue, queue) {
829		if (&req->req == _req)
830			break;
831	}
832	if (&req->req != _req) {
833		ret = -EINVAL;
834		goto out;
835	}
836
837	/* The request is in progress, or completed but not dequeued */
838	if (ep->queue.next == &req->queue) {
839		_req->status = -ECONNRESET;
840		mv_ep_fifo_flush(_ep);	/* flush current transfer */
841
842		/* The request isn't the last request in this ep queue */
843		if (req->queue.next != &ep->queue) {
844			struct mv_req *next_req;
845
846			next_req = list_entry(req->queue.next,
847				struct mv_req, queue);
848
849			/* Point the QH to the first TD of next request */
850			mv_prime_ep(ep, next_req);
851		} else {
852			struct mv_dqh *qh;
853
854			qh = ep->dqh;
855			qh->next_dtd_ptr = 1;
856			qh->size_ioc_int_sts = 0;
857		}
858
859		/* The request hasn't been processed, patch up the TD chain */
860	} else {
861		struct mv_req *prev_req;
862
863		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
864		writel(readl(&req->tail->dtd_next),
865				&prev_req->tail->dtd_next);
866
867	}
868
869	done(ep, req, -ECONNRESET);
870
871	/* Enable EP */
872out:
873	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
874	if (ep_dir(ep) == EP_DIR_IN)
875		epctrlx |= EPCTRL_TX_ENABLE;
876	else
877		epctrlx |= EPCTRL_RX_ENABLE;
878	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
879	ep->stopped = stopped;
880
881	spin_unlock_irqrestore(&ep->udc->lock, flags);
882	return ret;
883}
884
885static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
886{
887	u32 epctrlx;
888
889	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
890
891	if (stall) {
892		if (direction == EP_DIR_IN)
893			epctrlx |= EPCTRL_TX_EP_STALL;
894		else
895			epctrlx |= EPCTRL_RX_EP_STALL;
896	} else {
897		if (direction == EP_DIR_IN) {
898			epctrlx &= ~EPCTRL_TX_EP_STALL;
899			epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
900		} else {
901			epctrlx &= ~EPCTRL_RX_EP_STALL;
902			epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
903		}
904	}
905	writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
906}
907
908static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
909{
910	u32 epctrlx;
911
912	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
913
914	if (direction == EP_DIR_OUT)
915		return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
916	else
917		return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
918}
919
920static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
921{
922	struct mv_ep *ep;
923	unsigned long flags = 0;
924	int status = 0;
925	struct mv_udc *udc;
926
927	ep = container_of(_ep, struct mv_ep, ep);
928	udc = ep->udc;
929	if (!_ep || !ep->desc) {
930		status = -EINVAL;
931		goto out;
932	}
933
934	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
935		status = -EOPNOTSUPP;
936		goto out;
937	}
938
939	/*
940	 * Attempt to halt IN ep will fail if any transfer requests
941	 * are still queue
942	 */
943	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
944		status = -EAGAIN;
945		goto out;
946	}
947
948	spin_lock_irqsave(&ep->udc->lock, flags);
949	ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
950	if (halt && wedge)
951		ep->wedge = 1;
952	else if (!halt)
953		ep->wedge = 0;
954	spin_unlock_irqrestore(&ep->udc->lock, flags);
955
956	if (ep->ep_num == 0) {
957		udc->ep0_state = WAIT_FOR_SETUP;
958		udc->ep0_dir = EP_DIR_OUT;
959	}
960out:
961	return status;
962}
963
964static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
965{
966	return mv_ep_set_halt_wedge(_ep, halt, 0);
967}
968
969static int mv_ep_set_wedge(struct usb_ep *_ep)
970{
971	return mv_ep_set_halt_wedge(_ep, 1, 1);
972}
973
974static struct usb_ep_ops mv_ep_ops = {
975	.enable		= mv_ep_enable,
976	.disable	= mv_ep_disable,
977
978	.alloc_request	= mv_alloc_request,
979	.free_request	= mv_free_request,
980
981	.queue		= mv_ep_queue,
982	.dequeue	= mv_ep_dequeue,
983
984	.set_wedge	= mv_ep_set_wedge,
985	.set_halt	= mv_ep_set_halt,
986	.fifo_flush	= mv_ep_fifo_flush,	/* flush fifo */
987};
988
989static void udc_clock_enable(struct mv_udc *udc)
990{
991	unsigned int i;
992
993	for (i = 0; i < udc->clknum; i++)
994		clk_enable(udc->clk[i]);
995}
996
997static void udc_clock_disable(struct mv_udc *udc)
998{
999	unsigned int i;
1000
1001	for (i = 0; i < udc->clknum; i++)
1002		clk_disable(udc->clk[i]);
1003}
1004
1005static void udc_stop(struct mv_udc *udc)
1006{
1007	u32 tmp;
1008
1009	/* Disable interrupts */
1010	tmp = readl(&udc->op_regs->usbintr);
1011	tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
1012		USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
1013	writel(tmp, &udc->op_regs->usbintr);
1014
1015	udc->stopped = 1;
1016
1017	/* Reset the Run the bit in the command register to stop VUSB */
1018	tmp = readl(&udc->op_regs->usbcmd);
1019	tmp &= ~USBCMD_RUN_STOP;
1020	writel(tmp, &udc->op_regs->usbcmd);
1021}
1022
1023static void udc_start(struct mv_udc *udc)
1024{
1025	u32 usbintr;
1026
1027	usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1028		| USBINTR_PORT_CHANGE_DETECT_EN
1029		| USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1030	/* Enable interrupts */
1031	writel(usbintr, &udc->op_regs->usbintr);
1032
1033	udc->stopped = 0;
1034
1035	/* Set the Run bit in the command register */
1036	writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1037}
1038
1039static int udc_reset(struct mv_udc *udc)
1040{
1041	unsigned int loops;
1042	u32 tmp, portsc;
1043
1044	/* Stop the controller */
1045	tmp = readl(&udc->op_regs->usbcmd);
1046	tmp &= ~USBCMD_RUN_STOP;
1047	writel(tmp, &udc->op_regs->usbcmd);
1048
1049	/* Reset the controller to get default values */
1050	writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1051
1052	/* wait for reset to complete */
1053	loops = LOOPS(RESET_TIMEOUT);
1054	while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1055		if (loops == 0) {
1056			dev_err(&udc->dev->dev,
1057				"Wait for RESET completed TIMEOUT\n");
1058			return -ETIMEDOUT;
1059		}
1060		loops--;
1061		udelay(LOOPS_USEC);
1062	}
1063
1064	/* set controller to device mode */
1065	tmp = readl(&udc->op_regs->usbmode);
1066	tmp |= USBMODE_CTRL_MODE_DEVICE;
1067
1068	/* turn setup lockout off, require setup tripwire in usbcmd */
1069	tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
1070
1071	writel(tmp, &udc->op_regs->usbmode);
1072
1073	writel(0x0, &udc->op_regs->epsetupstat);
1074
1075	/* Configure the Endpoint List Address */
1076	writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1077		&udc->op_regs->eplistaddr);
1078
1079	portsc = readl(&udc->op_regs->portsc[0]);
1080	if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1081		portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1082
1083	if (udc->force_fs)
1084		portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1085	else
1086		portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1087
1088	writel(portsc, &udc->op_regs->portsc[0]);
1089
1090	tmp = readl(&udc->op_regs->epctrlx[0]);
1091	tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1092	writel(tmp, &udc->op_regs->epctrlx[0]);
1093
1094	return 0;
1095}
1096
1097static int mv_udc_enable_internal(struct mv_udc *udc)
1098{
1099	int retval;
1100
1101	if (udc->active)
1102		return 0;
1103
1104	dev_dbg(&udc->dev->dev, "enable udc\n");
1105	udc_clock_enable(udc);
1106	if (udc->pdata->phy_init) {
1107		retval = udc->pdata->phy_init(udc->phy_regs);
1108		if (retval) {
1109			dev_err(&udc->dev->dev,
1110				"init phy error %d\n", retval);
1111			udc_clock_disable(udc);
1112			return retval;
1113		}
1114	}
1115	udc->active = 1;
1116
1117	return 0;
1118}
1119
1120static int mv_udc_enable(struct mv_udc *udc)
1121{
1122	if (udc->clock_gating)
1123		return mv_udc_enable_internal(udc);
1124
1125	return 0;
1126}
1127
1128static void mv_udc_disable_internal(struct mv_udc *udc)
1129{
1130	if (udc->active) {
1131		dev_dbg(&udc->dev->dev, "disable udc\n");
1132		if (udc->pdata->phy_deinit)
1133			udc->pdata->phy_deinit(udc->phy_regs);
1134		udc_clock_disable(udc);
1135		udc->active = 0;
1136	}
1137}
1138
1139static void mv_udc_disable(struct mv_udc *udc)
1140{
1141	if (udc->clock_gating)
1142		mv_udc_disable_internal(udc);
1143}
1144
1145static int mv_udc_get_frame(struct usb_gadget *gadget)
1146{
1147	struct mv_udc *udc;
1148	u16	retval;
1149
1150	if (!gadget)
1151		return -ENODEV;
1152
1153	udc = container_of(gadget, struct mv_udc, gadget);
1154
1155	retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1156
1157	return retval;
1158}
1159
1160/* Tries to wake up the host connected to this gadget */
1161static int mv_udc_wakeup(struct usb_gadget *gadget)
1162{
1163	struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1164	u32 portsc;
1165
1166	/* Remote wakeup feature not enabled by host */
1167	if (!udc->remote_wakeup)
1168		return -ENOTSUPP;
1169
1170	portsc = readl(&udc->op_regs->portsc);
1171	/* not suspended? */
1172	if (!(portsc & PORTSCX_PORT_SUSPEND))
1173		return 0;
1174	/* trigger force resume */
1175	portsc |= PORTSCX_PORT_FORCE_RESUME;
1176	writel(portsc, &udc->op_regs->portsc[0]);
1177	return 0;
1178}
1179
1180static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1181{
1182	struct mv_udc *udc;
1183	unsigned long flags;
1184	int retval = 0;
1185
1186	udc = container_of(gadget, struct mv_udc, gadget);
1187	spin_lock_irqsave(&udc->lock, flags);
1188
1189	udc->vbus_active = (is_active != 0);
1190
1191	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1192		__func__, udc->softconnect, udc->vbus_active);
1193
1194	if (udc->driver && udc->softconnect && udc->vbus_active) {
1195		retval = mv_udc_enable(udc);
1196		if (retval == 0) {
1197			/* Clock is disabled, need re-init registers */
1198			udc_reset(udc);
1199			ep0_reset(udc);
1200			udc_start(udc);
1201		}
1202	} else if (udc->driver && udc->softconnect) {
1203		/* stop all the transfer in queue*/
1204		stop_activity(udc, udc->driver);
1205		udc_stop(udc);
1206		mv_udc_disable(udc);
1207	}
1208
1209	spin_unlock_irqrestore(&udc->lock, flags);
1210	return retval;
1211}
1212
1213static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1214{
1215	struct mv_udc *udc;
1216	unsigned long flags;
1217	int retval = 0;
1218
1219	udc = container_of(gadget, struct mv_udc, gadget);
1220	spin_lock_irqsave(&udc->lock, flags);
1221
1222	udc->softconnect = (is_on != 0);
1223
1224	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1225			__func__, udc->softconnect, udc->vbus_active);
1226
1227	if (udc->driver && udc->softconnect && udc->vbus_active) {
1228		retval = mv_udc_enable(udc);
1229		if (retval == 0) {
1230			/* Clock is disabled, need re-init registers */
1231			udc_reset(udc);
1232			ep0_reset(udc);
1233			udc_start(udc);
1234		}
1235	} else if (udc->driver && udc->vbus_active) {
1236		/* stop all the transfer in queue*/
1237		stop_activity(udc, udc->driver);
1238		udc_stop(udc);
1239		mv_udc_disable(udc);
1240	}
1241
1242	spin_unlock_irqrestore(&udc->lock, flags);
1243	return retval;
1244}
1245
1246static int mv_udc_start(struct usb_gadget_driver *driver,
1247		int (*bind)(struct usb_gadget *));
1248static int mv_udc_stop(struct usb_gadget_driver *driver);
1249/* device controller usb_gadget_ops structure */
1250static const struct usb_gadget_ops mv_ops = {
1251
1252	/* returns the current frame number */
1253	.get_frame	= mv_udc_get_frame,
1254
1255	/* tries to wake up the host connected to this gadget */
1256	.wakeup		= mv_udc_wakeup,
1257
1258	/* notify controller that VBUS is powered or not */
1259	.vbus_session	= mv_udc_vbus_session,
1260
1261	/* D+ pullup, software-controlled connect/disconnect to USB host */
1262	.pullup		= mv_udc_pullup,
1263	.start		= mv_udc_start,
1264	.stop		= mv_udc_stop,
1265};
1266
1267static int eps_init(struct mv_udc *udc)
1268{
1269	struct mv_ep	*ep;
1270	char name[14];
1271	int i;
1272
1273	/* initialize ep0 */
1274	ep = &udc->eps[0];
1275	ep->udc = udc;
1276	strncpy(ep->name, "ep0", sizeof(ep->name));
1277	ep->ep.name = ep->name;
1278	ep->ep.ops = &mv_ep_ops;
1279	ep->wedge = 0;
1280	ep->stopped = 0;
1281	ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1282	ep->ep_num = 0;
1283	ep->desc = &mv_ep0_desc;
1284	INIT_LIST_HEAD(&ep->queue);
1285
1286	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1287
1288	/* initialize other endpoints */
1289	for (i = 2; i < udc->max_eps * 2; i++) {
1290		ep = &udc->eps[i];
1291		if (i % 2) {
1292			snprintf(name, sizeof(name), "ep%din", i / 2);
1293			ep->direction = EP_DIR_IN;
1294		} else {
1295			snprintf(name, sizeof(name), "ep%dout", i / 2);
1296			ep->direction = EP_DIR_OUT;
1297		}
1298		ep->udc = udc;
1299		strncpy(ep->name, name, sizeof(ep->name));
1300		ep->ep.name = ep->name;
1301
1302		ep->ep.ops = &mv_ep_ops;
1303		ep->stopped = 0;
1304		ep->ep.maxpacket = (unsigned short) ~0;
1305		ep->ep_num = i / 2;
1306
1307		INIT_LIST_HEAD(&ep->queue);
1308		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1309
1310		ep->dqh = &udc->ep_dqh[i];
1311	}
1312
1313	return 0;
1314}
1315
1316/* delete all endpoint requests, called with spinlock held */
1317static void nuke(struct mv_ep *ep, int status)
1318{
1319	/* called with spinlock held */
1320	ep->stopped = 1;
1321
1322	/* endpoint fifo flush */
1323	mv_ep_fifo_flush(&ep->ep);
1324
1325	while (!list_empty(&ep->queue)) {
1326		struct mv_req *req = NULL;
1327		req = list_entry(ep->queue.next, struct mv_req, queue);
1328		done(ep, req, status);
1329	}
1330}
1331
1332/* stop all USB activities */
1333static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1334{
1335	struct mv_ep	*ep;
1336
1337	nuke(&udc->eps[0], -ESHUTDOWN);
1338
1339	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1340		nuke(ep, -ESHUTDOWN);
1341	}
1342
1343	/* report disconnect; the driver is already quiesced */
1344	if (driver) {
1345		spin_unlock(&udc->lock);
1346		driver->disconnect(&udc->gadget);
1347		spin_lock(&udc->lock);
1348	}
1349}
1350
1351static int mv_udc_start(struct usb_gadget_driver *driver,
1352		int (*bind)(struct usb_gadget *))
1353{
1354	struct mv_udc *udc = the_controller;
1355	int retval = 0;
1356	unsigned long flags;
1357
1358	if (!udc)
1359		return -ENODEV;
1360
1361	if (udc->driver)
1362		return -EBUSY;
1363
1364	spin_lock_irqsave(&udc->lock, flags);
1365
1366	/* hook up the driver ... */
1367	driver->driver.bus = NULL;
1368	udc->driver = driver;
1369	udc->gadget.dev.driver = &driver->driver;
1370
1371	udc->usb_state = USB_STATE_ATTACHED;
1372	udc->ep0_state = WAIT_FOR_SETUP;
1373	udc->ep0_dir = EP_DIR_OUT;
1374
1375	spin_unlock_irqrestore(&udc->lock, flags);
1376
1377	retval = bind(&udc->gadget);
1378	if (retval) {
1379		dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
1380				driver->driver.name, retval);
1381		udc->driver = NULL;
1382		udc->gadget.dev.driver = NULL;
1383		return retval;
1384	}
1385
1386	if (udc->transceiver) {
1387		retval = otg_set_peripheral(udc->transceiver, &udc->gadget);
1388		if (retval) {
1389			dev_err(&udc->dev->dev,
1390				"unable to register peripheral to otg\n");
1391			if (driver->unbind) {
1392				driver->unbind(&udc->gadget);
1393				udc->gadget.dev.driver = NULL;
1394				udc->driver = NULL;
1395			}
1396			return retval;
1397		}
1398	}
1399
1400	/* pullup is always on */
1401	mv_udc_pullup(&udc->gadget, 1);
1402
1403	/* When boot with cable attached, there will be no vbus irq occurred */
1404	if (udc->qwork)
1405		queue_work(udc->qwork, &udc->vbus_work);
1406
1407	return 0;
1408}
1409
1410static int mv_udc_stop(struct usb_gadget_driver *driver)
1411{
1412	struct mv_udc *udc = the_controller;
1413	unsigned long flags;
1414
1415	if (!udc)
1416		return -ENODEV;
1417
1418	spin_lock_irqsave(&udc->lock, flags);
1419
1420	mv_udc_enable(udc);
1421	udc_stop(udc);
1422
1423	/* stop all usb activities */
1424	udc->gadget.speed = USB_SPEED_UNKNOWN;
1425	stop_activity(udc, driver);
1426	mv_udc_disable(udc);
1427
1428	spin_unlock_irqrestore(&udc->lock, flags);
1429
1430	/* unbind gadget driver */
1431	driver->unbind(&udc->gadget);
1432	udc->gadget.dev.driver = NULL;
1433	udc->driver = NULL;
1434
1435	return 0;
1436}
1437
1438static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1439{
1440	u32 portsc;
1441
1442	portsc = readl(&udc->op_regs->portsc[0]);
1443	portsc |= mode << 16;
1444	writel(portsc, &udc->op_regs->portsc[0]);
1445}
1446
1447static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1448{
1449	struct mv_udc *udc = the_controller;
1450	struct mv_req *req = container_of(_req, struct mv_req, req);
1451	unsigned long flags;
1452
1453	dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1454
1455	spin_lock_irqsave(&udc->lock, flags);
1456	if (req->test_mode) {
1457		mv_set_ptc(udc, req->test_mode);
1458		req->test_mode = 0;
1459	}
1460	spin_unlock_irqrestore(&udc->lock, flags);
1461}
1462
1463static int
1464udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1465{
1466	int retval = 0;
1467	struct mv_req *req;
1468	struct mv_ep *ep;
1469
1470	ep = &udc->eps[0];
1471	udc->ep0_dir = direction;
1472	udc->ep0_state = WAIT_FOR_OUT_STATUS;
1473
1474	req = udc->status_req;
1475
1476	/* fill in the reqest structure */
1477	if (empty == false) {
1478		*((u16 *) req->req.buf) = cpu_to_le16(status);
1479		req->req.length = 2;
1480	} else
1481		req->req.length = 0;
1482
1483	req->ep = ep;
1484	req->req.status = -EINPROGRESS;
1485	req->req.actual = 0;
1486	if (udc->test_mode) {
1487		req->req.complete = prime_status_complete;
1488		req->test_mode = udc->test_mode;
1489		udc->test_mode = 0;
1490	} else
1491		req->req.complete = NULL;
1492	req->dtd_count = 0;
1493
1494	if (req->req.dma == DMA_ADDR_INVALID) {
1495		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1496				req->req.buf, req->req.length,
1497				ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1498		req->mapped = 1;
1499	}
1500
1501	/* prime the data phase */
1502	if (!req_to_dtd(req))
1503		retval = queue_dtd(ep, req);
1504	else{	/* no mem */
1505		retval = -ENOMEM;
1506		goto out;
1507	}
1508
1509	if (retval) {
1510		dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
1511		goto out;
1512	}
1513
1514	list_add_tail(&req->queue, &ep->queue);
1515
1516	return 0;
1517out:
1518	return retval;
1519}
1520
1521static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1522{
1523	if (index <= TEST_FORCE_EN) {
1524		udc->test_mode = index;
1525		if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1526			ep0_stall(udc);
1527	} else
1528		dev_err(&udc->dev->dev,
1529			"This test mode(%d) is not supported\n", index);
1530}
1531
1532static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1533{
1534	udc->dev_addr = (u8)setup->wValue;
1535
1536	/* update usb state */
1537	udc->usb_state = USB_STATE_ADDRESS;
1538
1539	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1540		ep0_stall(udc);
1541}
1542
1543static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1544	struct usb_ctrlrequest *setup)
1545{
1546	u16 status = 0;
1547	int retval;
1548
1549	if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1550		!= (USB_DIR_IN | USB_TYPE_STANDARD))
1551		return;
1552
1553	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1554		status = 1 << USB_DEVICE_SELF_POWERED;
1555		status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1556	} else if ((setup->bRequestType & USB_RECIP_MASK)
1557			== USB_RECIP_INTERFACE) {
1558		/* get interface status */
1559		status = 0;
1560	} else if ((setup->bRequestType & USB_RECIP_MASK)
1561			== USB_RECIP_ENDPOINT) {
1562		u8 ep_num, direction;
1563
1564		ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1565		direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1566				? EP_DIR_IN : EP_DIR_OUT;
1567		status = ep_is_stall(udc, ep_num, direction)
1568				<< USB_ENDPOINT_HALT;
1569	}
1570
1571	retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1572	if (retval)
1573		ep0_stall(udc);
1574	else
1575		udc->ep0_state = DATA_STATE_XMIT;
1576}
1577
1578static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1579{
1580	u8 ep_num;
1581	u8 direction;
1582	struct mv_ep *ep;
1583
1584	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1585		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1586		switch (setup->wValue) {
1587		case USB_DEVICE_REMOTE_WAKEUP:
1588			udc->remote_wakeup = 0;
1589			break;
1590		default:
1591			goto out;
1592		}
1593	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1594		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1595		switch (setup->wValue) {
1596		case USB_ENDPOINT_HALT:
1597			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1598			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1599				? EP_DIR_IN : EP_DIR_OUT;
1600			if (setup->wValue != 0 || setup->wLength != 0
1601				|| ep_num > udc->max_eps)
1602				goto out;
1603			ep = &udc->eps[ep_num * 2 + direction];
1604			if (ep->wedge == 1)
1605				break;
1606			spin_unlock(&udc->lock);
1607			ep_set_stall(udc, ep_num, direction, 0);
1608			spin_lock(&udc->lock);
1609			break;
1610		default:
1611			goto out;
1612		}
1613	} else
1614		goto out;
1615
1616	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1617		ep0_stall(udc);
1618out:
1619	return;
1620}
1621
1622static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1623{
1624	u8 ep_num;
1625	u8 direction;
1626
1627	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1628		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1629		switch (setup->wValue) {
1630		case USB_DEVICE_REMOTE_WAKEUP:
1631			udc->remote_wakeup = 1;
1632			break;
1633		case USB_DEVICE_TEST_MODE:
1634			if (setup->wIndex & 0xFF
1635				||  udc->gadget.speed != USB_SPEED_HIGH)
1636				ep0_stall(udc);
1637
1638			if (udc->usb_state != USB_STATE_CONFIGURED
1639				&& udc->usb_state != USB_STATE_ADDRESS
1640				&& udc->usb_state != USB_STATE_DEFAULT)
1641				ep0_stall(udc);
1642
1643			mv_udc_testmode(udc, (setup->wIndex >> 8));
1644			goto out;
1645		default:
1646			goto out;
1647		}
1648	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1649		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1650		switch (setup->wValue) {
1651		case USB_ENDPOINT_HALT:
1652			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1653			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1654				? EP_DIR_IN : EP_DIR_OUT;
1655			if (setup->wValue != 0 || setup->wLength != 0
1656				|| ep_num > udc->max_eps)
1657				goto out;
1658			spin_unlock(&udc->lock);
1659			ep_set_stall(udc, ep_num, direction, 1);
1660			spin_lock(&udc->lock);
1661			break;
1662		default:
1663			goto out;
1664		}
1665	} else
1666		goto out;
1667
1668	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1669		ep0_stall(udc);
1670out:
1671	return;
1672}
1673
1674static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1675	struct usb_ctrlrequest *setup)
1676{
1677	bool delegate = false;
1678
1679	nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1680
1681	dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1682			setup->bRequestType, setup->bRequest,
1683			setup->wValue, setup->wIndex, setup->wLength);
1684	/* We process some stardard setup requests here */
1685	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1686		switch (setup->bRequest) {
1687		case USB_REQ_GET_STATUS:
1688			ch9getstatus(udc, ep_num, setup);
1689			break;
1690
1691		case USB_REQ_SET_ADDRESS:
1692			ch9setaddress(udc, setup);
1693			break;
1694
1695		case USB_REQ_CLEAR_FEATURE:
1696			ch9clearfeature(udc, setup);
1697			break;
1698
1699		case USB_REQ_SET_FEATURE:
1700			ch9setfeature(udc, setup);
1701			break;
1702
1703		default:
1704			delegate = true;
1705		}
1706	} else
1707		delegate = true;
1708
1709	/* delegate USB standard requests to the gadget driver */
1710	if (delegate == true) {
1711		/* USB requests handled by gadget */
1712		if (setup->wLength) {
1713			/* DATA phase from gadget, STATUS phase from udc */
1714			udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1715					?  EP_DIR_IN : EP_DIR_OUT;
1716			spin_unlock(&udc->lock);
1717			if (udc->driver->setup(&udc->gadget,
1718				&udc->local_setup_buff) < 0)
1719				ep0_stall(udc);
1720			spin_lock(&udc->lock);
1721			udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1722					?  DATA_STATE_XMIT : DATA_STATE_RECV;
1723		} else {
1724			/* no DATA phase, IN STATUS phase from gadget */
1725			udc->ep0_dir = EP_DIR_IN;
1726			spin_unlock(&udc->lock);
1727			if (udc->driver->setup(&udc->gadget,
1728				&udc->local_setup_buff) < 0)
1729				ep0_stall(udc);
1730			spin_lock(&udc->lock);
1731			udc->ep0_state = WAIT_FOR_OUT_STATUS;
1732		}
1733	}
1734}
1735
1736/* complete DATA or STATUS phase of ep0 prime status phase if needed */
1737static void ep0_req_complete(struct mv_udc *udc,
1738	struct mv_ep *ep0, struct mv_req *req)
1739{
1740	u32 new_addr;
1741
1742	if (udc->usb_state == USB_STATE_ADDRESS) {
1743		/* set the new address */
1744		new_addr = (u32)udc->dev_addr;
1745		writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1746			&udc->op_regs->deviceaddr);
1747	}
1748
1749	done(ep0, req, 0);
1750
1751	switch (udc->ep0_state) {
1752	case DATA_STATE_XMIT:
1753		/* receive status phase */
1754		if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1755			ep0_stall(udc);
1756		break;
1757	case DATA_STATE_RECV:
1758		/* send status phase */
1759		if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1760			ep0_stall(udc);
1761		break;
1762	case WAIT_FOR_OUT_STATUS:
1763		udc->ep0_state = WAIT_FOR_SETUP;
1764		break;
1765	case WAIT_FOR_SETUP:
1766		dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1767		break;
1768	default:
1769		ep0_stall(udc);
1770		break;
1771	}
1772}
1773
1774static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1775{
1776	u32 temp;
1777	struct mv_dqh *dqh;
1778
1779	dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1780
1781	/* Clear bit in ENDPTSETUPSTAT */
1782	writel((1 << ep_num), &udc->op_regs->epsetupstat);
1783
1784	/* while a hazard exists when setup package arrives */
1785	do {
1786		/* Set Setup Tripwire */
1787		temp = readl(&udc->op_regs->usbcmd);
1788		writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1789
1790		/* Copy the setup packet to local buffer */
1791		memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1792	} while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1793
1794	/* Clear Setup Tripwire */
1795	temp = readl(&udc->op_regs->usbcmd);
1796	writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1797}
1798
1799static void irq_process_tr_complete(struct mv_udc *udc)
1800{
1801	u32 tmp, bit_pos;
1802	int i, ep_num = 0, direction = 0;
1803	struct mv_ep	*curr_ep;
1804	struct mv_req *curr_req, *temp_req;
1805	int status;
1806
1807	/*
1808	 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1809	 * because the setup packets are to be read ASAP
1810	 */
1811
1812	/* Process all Setup packet received interrupts */
1813	tmp = readl(&udc->op_regs->epsetupstat);
1814
1815	if (tmp) {
1816		for (i = 0; i < udc->max_eps; i++) {
1817			if (tmp & (1 << i)) {
1818				get_setup_data(udc, i,
1819					(u8 *)(&udc->local_setup_buff));
1820				handle_setup_packet(udc, i,
1821					&udc->local_setup_buff);
1822			}
1823		}
1824	}
1825
1826	/* Don't clear the endpoint setup status register here.
1827	 * It is cleared as a setup packet is read out of the buffer
1828	 */
1829
1830	/* Process non-setup transaction complete interrupts */
1831	tmp = readl(&udc->op_regs->epcomplete);
1832
1833	if (!tmp)
1834		return;
1835
1836	writel(tmp, &udc->op_regs->epcomplete);
1837
1838	for (i = 0; i < udc->max_eps * 2; i++) {
1839		ep_num = i >> 1;
1840		direction = i % 2;
1841
1842		bit_pos = 1 << (ep_num + 16 * direction);
1843
1844		if (!(bit_pos & tmp))
1845			continue;
1846
1847		if (i == 1)
1848			curr_ep = &udc->eps[0];
1849		else
1850			curr_ep = &udc->eps[i];
1851		/* process the req queue until an uncomplete request */
1852		list_for_each_entry_safe(curr_req, temp_req,
1853			&curr_ep->queue, queue) {
1854			status = process_ep_req(udc, i, curr_req);
1855			if (status)
1856				break;
1857
1858			/* write back status to req */
1859			curr_req->req.status = status;
1860
1861			/* ep0 request completion */
1862			if (ep_num == 0) {
1863				ep0_req_complete(udc, curr_ep, curr_req);
1864				break;
1865			} else {
1866				done(curr_ep, curr_req, status);
1867			}
1868		}
1869	}
1870}
1871
1872void irq_process_reset(struct mv_udc *udc)
1873{
1874	u32 tmp;
1875	unsigned int loops;
1876
1877	udc->ep0_dir = EP_DIR_OUT;
1878	udc->ep0_state = WAIT_FOR_SETUP;
1879	udc->remote_wakeup = 0;		/* default to 0 on reset */
1880
1881	/* The address bits are past bit 25-31. Set the address */
1882	tmp = readl(&udc->op_regs->deviceaddr);
1883	tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1884	writel(tmp, &udc->op_regs->deviceaddr);
1885
1886	/* Clear all the setup token semaphores */
1887	tmp = readl(&udc->op_regs->epsetupstat);
1888	writel(tmp, &udc->op_regs->epsetupstat);
1889
1890	/* Clear all the endpoint complete status bits */
1891	tmp = readl(&udc->op_regs->epcomplete);
1892	writel(tmp, &udc->op_regs->epcomplete);
1893
1894	/* wait until all endptprime bits cleared */
1895	loops = LOOPS(PRIME_TIMEOUT);
1896	while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1897		if (loops == 0) {
1898			dev_err(&udc->dev->dev,
1899				"Timeout for ENDPTPRIME = 0x%x\n",
1900				readl(&udc->op_regs->epprime));
1901			break;
1902		}
1903		loops--;
1904		udelay(LOOPS_USEC);
1905	}
1906
1907	/* Write 1s to the Flush register */
1908	writel((u32)~0, &udc->op_regs->epflush);
1909
1910	if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1911		dev_info(&udc->dev->dev, "usb bus reset\n");
1912		udc->usb_state = USB_STATE_DEFAULT;
1913		/* reset all the queues, stop all USB activities */
1914		stop_activity(udc, udc->driver);
1915	} else {
1916		dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1917			readl(&udc->op_regs->portsc));
1918
1919		/*
1920		 * re-initialize
1921		 * controller reset
1922		 */
1923		udc_reset(udc);
1924
1925		/* reset all the queues, stop all USB activities */
1926		stop_activity(udc, udc->driver);
1927
1928		/* reset ep0 dQH and endptctrl */
1929		ep0_reset(udc);
1930
1931		/* enable interrupt and set controller to run state */
1932		udc_start(udc);
1933
1934		udc->usb_state = USB_STATE_ATTACHED;
1935	}
1936}
1937
1938static void handle_bus_resume(struct mv_udc *udc)
1939{
1940	udc->usb_state = udc->resume_state;
1941	udc->resume_state = 0;
1942
1943	/* report resume to the driver */
1944	if (udc->driver) {
1945		if (udc->driver->resume) {
1946			spin_unlock(&udc->lock);
1947			udc->driver->resume(&udc->gadget);
1948			spin_lock(&udc->lock);
1949		}
1950	}
1951}
1952
1953static void irq_process_suspend(struct mv_udc *udc)
1954{
1955	udc->resume_state = udc->usb_state;
1956	udc->usb_state = USB_STATE_SUSPENDED;
1957
1958	if (udc->driver->suspend) {
1959		spin_unlock(&udc->lock);
1960		udc->driver->suspend(&udc->gadget);
1961		spin_lock(&udc->lock);
1962	}
1963}
1964
1965static void irq_process_port_change(struct mv_udc *udc)
1966{
1967	u32 portsc;
1968
1969	portsc = readl(&udc->op_regs->portsc[0]);
1970	if (!(portsc & PORTSCX_PORT_RESET)) {
1971		/* Get the speed */
1972		u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1973		switch (speed) {
1974		case PORTSCX_PORT_SPEED_HIGH:
1975			udc->gadget.speed = USB_SPEED_HIGH;
1976			break;
1977		case PORTSCX_PORT_SPEED_FULL:
1978			udc->gadget.speed = USB_SPEED_FULL;
1979			break;
1980		case PORTSCX_PORT_SPEED_LOW:
1981			udc->gadget.speed = USB_SPEED_LOW;
1982			break;
1983		default:
1984			udc->gadget.speed = USB_SPEED_UNKNOWN;
1985			break;
1986		}
1987	}
1988
1989	if (portsc & PORTSCX_PORT_SUSPEND) {
1990		udc->resume_state = udc->usb_state;
1991		udc->usb_state = USB_STATE_SUSPENDED;
1992		if (udc->driver->suspend) {
1993			spin_unlock(&udc->lock);
1994			udc->driver->suspend(&udc->gadget);
1995			spin_lock(&udc->lock);
1996		}
1997	}
1998
1999	if (!(portsc & PORTSCX_PORT_SUSPEND)
2000		&& udc->usb_state == USB_STATE_SUSPENDED) {
2001		handle_bus_resume(udc);
2002	}
2003
2004	if (!udc->resume_state)
2005		udc->usb_state = USB_STATE_DEFAULT;
2006}
2007
2008static void irq_process_error(struct mv_udc *udc)
2009{
2010	/* Increment the error count */
2011	udc->errors++;
2012}
2013
2014static irqreturn_t mv_udc_irq(int irq, void *dev)
2015{
2016	struct mv_udc *udc = (struct mv_udc *)dev;
2017	u32 status, intr;
2018
2019	/* Disable ISR when stopped bit is set */
2020	if (udc->stopped)
2021		return IRQ_NONE;
2022
2023	spin_lock(&udc->lock);
2024
2025	status = readl(&udc->op_regs->usbsts);
2026	intr = readl(&udc->op_regs->usbintr);
2027	status &= intr;
2028
2029	if (status == 0) {
2030		spin_unlock(&udc->lock);
2031		return IRQ_NONE;
2032	}
2033
2034	/* Clear all the interrupts occurred */
2035	writel(status, &udc->op_regs->usbsts);
2036
2037	if (status & USBSTS_ERR)
2038		irq_process_error(udc);
2039
2040	if (status & USBSTS_RESET)
2041		irq_process_reset(udc);
2042
2043	if (status & USBSTS_PORT_CHANGE)
2044		irq_process_port_change(udc);
2045
2046	if (status & USBSTS_INT)
2047		irq_process_tr_complete(udc);
2048
2049	if (status & USBSTS_SUSPEND)
2050		irq_process_suspend(udc);
2051
2052	spin_unlock(&udc->lock);
2053
2054	return IRQ_HANDLED;
2055}
2056
2057static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2058{
2059	struct mv_udc *udc = (struct mv_udc *)dev;
2060
2061	/* polling VBUS and init phy may cause too much time*/
2062	if (udc->qwork)
2063		queue_work(udc->qwork, &udc->vbus_work);
2064
2065	return IRQ_HANDLED;
2066}
2067
2068static void mv_udc_vbus_work(struct work_struct *work)
2069{
2070	struct mv_udc *udc;
2071	unsigned int vbus;
2072
2073	udc = container_of(work, struct mv_udc, vbus_work);
2074	if (!udc->pdata->vbus)
2075		return;
2076
2077	vbus = udc->pdata->vbus->poll();
2078	dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2079
2080	if (vbus == VBUS_HIGH)
2081		mv_udc_vbus_session(&udc->gadget, 1);
2082	else if (vbus == VBUS_LOW)
2083		mv_udc_vbus_session(&udc->gadget, 0);
2084}
2085
2086/* release device structure */
2087static void gadget_release(struct device *_dev)
2088{
2089	struct mv_udc *udc = the_controller;
2090
2091	complete(udc->done);
2092}
2093
2094static int __devexit mv_udc_remove(struct platform_device *dev)
2095{
2096	struct mv_udc *udc = the_controller;
2097	int clk_i;
2098
2099	usb_del_gadget_udc(&udc->gadget);
2100
2101	if (udc->qwork) {
2102		flush_workqueue(udc->qwork);
2103		destroy_workqueue(udc->qwork);
2104	}
2105
2106	/*
2107	 * If we have transceiver inited,
2108	 * then vbus irq will not be requested in udc driver.
2109	 */
2110	if (udc->pdata && udc->pdata->vbus
2111		&& udc->clock_gating && udc->transceiver == NULL)
2112		free_irq(udc->pdata->vbus->irq, &dev->dev);
2113
2114	/* free memory allocated in probe */
2115	if (udc->dtd_pool)
2116		dma_pool_destroy(udc->dtd_pool);
2117
2118	if (udc->ep_dqh)
2119		dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2120			udc->ep_dqh, udc->ep_dqh_dma);
2121
2122	kfree(udc->eps);
2123
2124	if (udc->irq)
2125		free_irq(udc->irq, &dev->dev);
2126
2127	mv_udc_disable(udc);
2128
2129	if (udc->cap_regs)
2130		iounmap(udc->cap_regs);
2131
2132	if (udc->phy_regs)
2133		iounmap(udc->phy_regs);
2134
2135	if (udc->status_req) {
2136		kfree(udc->status_req->req.buf);
2137		kfree(udc->status_req);
2138	}
2139
2140	for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
2141		clk_put(udc->clk[clk_i]);
2142
2143	device_unregister(&udc->gadget.dev);
2144
2145	/* free dev, wait for the release() finished */
2146	wait_for_completion(udc->done);
2147	kfree(udc);
2148
2149	the_controller = NULL;
2150
2151	return 0;
2152}
2153
2154static int __devinit mv_udc_probe(struct platform_device *dev)
2155{
2156	struct mv_usb_platform_data *pdata = dev->dev.platform_data;
2157	struct mv_udc *udc;
2158	int retval = 0;
2159	int clk_i = 0;
2160	struct resource *r;
2161	size_t size;
2162
2163	if (pdata == NULL) {
2164		dev_err(&dev->dev, "missing platform_data\n");
2165		return -ENODEV;
2166	}
2167
2168	size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
2169	udc = kzalloc(size, GFP_KERNEL);
2170	if (udc == NULL) {
2171		dev_err(&dev->dev, "failed to allocate memory for udc\n");
2172		return -ENOMEM;
2173	}
2174
2175	the_controller = udc;
2176	udc->done = &release_done;
2177	udc->pdata = dev->dev.platform_data;
2178	spin_lock_init(&udc->lock);
2179
2180	udc->dev = dev;
2181
2182#ifdef CONFIG_USB_OTG_UTILS
2183	if (pdata->mode == MV_USB_MODE_OTG)
2184		udc->transceiver = otg_get_transceiver();
2185#endif
2186
2187	udc->clknum = pdata->clknum;
2188	for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
2189		udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
2190		if (IS_ERR(udc->clk[clk_i])) {
2191			retval = PTR_ERR(udc->clk[clk_i]);
2192			goto err_put_clk;
2193		}
2194	}
2195
2196	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2197	if (r == NULL) {
2198		dev_err(&dev->dev, "no I/O memory resource defined\n");
2199		retval = -ENODEV;
2200		goto err_put_clk;
2201	}
2202
2203	udc->cap_regs = (struct mv_cap_regs __iomem *)
2204		ioremap(r->start, resource_size(r));
2205	if (udc->cap_regs == NULL) {
2206		dev_err(&dev->dev, "failed to map I/O memory\n");
2207		retval = -EBUSY;
2208		goto err_put_clk;
2209	}
2210
2211	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2212	if (r == NULL) {
2213		dev_err(&dev->dev, "no phy I/O memory resource defined\n");
2214		retval = -ENODEV;
2215		goto err_iounmap_capreg;
2216	}
2217
2218	udc->phy_regs = ioremap(r->start, resource_size(r));
2219	if (udc->phy_regs == NULL) {
2220		dev_err(&dev->dev, "failed to map phy I/O memory\n");
2221		retval = -EBUSY;
2222		goto err_iounmap_capreg;
2223	}
2224
2225	/* we will acces controller register, so enable the clk */
2226	retval = mv_udc_enable_internal(udc);
2227	if (retval)
2228		goto err_iounmap_phyreg;
2229
2230	udc->op_regs =
2231		(struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2232		+ (readl(&udc->cap_regs->caplength_hciversion)
2233			& CAPLENGTH_MASK));
2234	udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2235
2236	/*
2237	 * some platform will use usb to download image, it may not disconnect
2238	 * usb gadget before loading kernel. So first stop udc here.
2239	 */
2240	udc_stop(udc);
2241	writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2242
2243	size = udc->max_eps * sizeof(struct mv_dqh) *2;
2244	size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2245	udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
2246					&udc->ep_dqh_dma, GFP_KERNEL);
2247
2248	if (udc->ep_dqh == NULL) {
2249		dev_err(&dev->dev, "allocate dQH memory failed\n");
2250		retval = -ENOMEM;
2251		goto err_disable_clock;
2252	}
2253	udc->ep_dqh_size = size;
2254
2255	/* create dTD dma_pool resource */
2256	udc->dtd_pool = dma_pool_create("mv_dtd",
2257			&dev->dev,
2258			sizeof(struct mv_dtd),
2259			DTD_ALIGNMENT,
2260			DMA_BOUNDARY);
2261
2262	if (!udc->dtd_pool) {
2263		retval = -ENOMEM;
2264		goto err_free_dma;
2265	}
2266
2267	size = udc->max_eps * sizeof(struct mv_ep) *2;
2268	udc->eps = kzalloc(size, GFP_KERNEL);
2269	if (udc->eps == NULL) {
2270		dev_err(&dev->dev, "allocate ep memory failed\n");
2271		retval = -ENOMEM;
2272		goto err_destroy_dma;
2273	}
2274
2275	/* initialize ep0 status request structure */
2276	udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
2277	if (!udc->status_req) {
2278		dev_err(&dev->dev, "allocate status_req memory failed\n");
2279		retval = -ENOMEM;
2280		goto err_free_eps;
2281	}
2282	INIT_LIST_HEAD(&udc->status_req->queue);
2283
2284	/* allocate a small amount of memory to get valid address */
2285	udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2286	udc->status_req->req.dma = DMA_ADDR_INVALID;
2287
2288	udc->resume_state = USB_STATE_NOTATTACHED;
2289	udc->usb_state = USB_STATE_POWERED;
2290	udc->ep0_dir = EP_DIR_OUT;
2291	udc->remote_wakeup = 0;
2292
2293	r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2294	if (r == NULL) {
2295		dev_err(&dev->dev, "no IRQ resource defined\n");
2296		retval = -ENODEV;
2297		goto err_free_status_req;
2298	}
2299	udc->irq = r->start;
2300	if (request_irq(udc->irq, mv_udc_irq,
2301		IRQF_SHARED, driver_name, udc)) {
2302		dev_err(&dev->dev, "Request irq %d for UDC failed\n",
2303			udc->irq);
2304		retval = -ENODEV;
2305		goto err_free_status_req;
2306	}
2307
2308	/* initialize gadget structure */
2309	udc->gadget.ops = &mv_ops;	/* usb_gadget_ops */
2310	udc->gadget.ep0 = &udc->eps[0].ep;	/* gadget ep0 */
2311	INIT_LIST_HEAD(&udc->gadget.ep_list);	/* ep_list */
2312	udc->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
2313	udc->gadget.max_speed = USB_SPEED_HIGH;	/* support dual speed */
2314
2315	/* the "gadget" abstracts/virtualizes the controller */
2316	dev_set_name(&udc->gadget.dev, "gadget");
2317	udc->gadget.dev.parent = &dev->dev;
2318	udc->gadget.dev.dma_mask = dev->dev.dma_mask;
2319	udc->gadget.dev.release = gadget_release;
2320	udc->gadget.name = driver_name;		/* gadget name */
2321
2322	retval = device_register(&udc->gadget.dev);
2323	if (retval)
2324		goto err_free_irq;
2325
2326	eps_init(udc);
2327
2328	/* VBUS detect: we can disable/enable clock on demand.*/
2329	if (udc->transceiver)
2330		udc->clock_gating = 1;
2331	else if (pdata->vbus) {
2332		udc->clock_gating = 1;
2333		retval = request_threaded_irq(pdata->vbus->irq, NULL,
2334				mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2335		if (retval) {
2336			dev_info(&dev->dev,
2337				"Can not request irq for VBUS, "
2338				"disable clock gating\n");
2339			udc->clock_gating = 0;
2340		}
2341
2342		udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2343		if (!udc->qwork) {
2344			dev_err(&dev->dev, "cannot create workqueue\n");
2345			retval = -ENOMEM;
2346			goto err_unregister;
2347		}
2348
2349		INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2350	}
2351
2352	/*
2353	 * When clock gating is supported, we can disable clk and phy.
2354	 * If not, it means that VBUS detection is not supported, we
2355	 * have to enable vbus active all the time to let controller work.
2356	 */
2357	if (udc->clock_gating)
2358		mv_udc_disable_internal(udc);
2359	else
2360		udc->vbus_active = 1;
2361
2362	retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
2363	if (retval)
2364		goto err_unregister;
2365
2366	dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n",
2367		udc->clock_gating ? "with" : "without");
2368
2369	return 0;
2370
2371err_unregister:
2372	if (udc->pdata && udc->pdata->vbus
2373		&& udc->clock_gating && udc->transceiver == NULL)
2374		free_irq(pdata->vbus->irq, &dev->dev);
2375	device_unregister(&udc->gadget.dev);
2376err_free_irq:
2377	free_irq(udc->irq, &dev->dev);
2378err_free_status_req:
2379	kfree(udc->status_req->req.buf);
2380	kfree(udc->status_req);
2381err_free_eps:
2382	kfree(udc->eps);
2383err_destroy_dma:
2384	dma_pool_destroy(udc->dtd_pool);
2385err_free_dma:
2386	dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2387			udc->ep_dqh, udc->ep_dqh_dma);
2388err_disable_clock:
2389	mv_udc_disable_internal(udc);
2390err_iounmap_phyreg:
2391	iounmap(udc->phy_regs);
2392err_iounmap_capreg:
2393	iounmap(udc->cap_regs);
2394err_put_clk:
2395	for (clk_i--; clk_i >= 0; clk_i--)
2396		clk_put(udc->clk[clk_i]);
2397	the_controller = NULL;
2398	kfree(udc);
2399	return retval;
2400}
2401
2402#ifdef CONFIG_PM
2403static int mv_udc_suspend(struct device *_dev)
2404{
2405	struct mv_udc *udc = the_controller;
2406
2407	/* if OTG is enabled, the following will be done in OTG driver*/
2408	if (udc->transceiver)
2409		return 0;
2410
2411	if (udc->pdata->vbus && udc->pdata->vbus->poll)
2412		if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2413			dev_info(&udc->dev->dev, "USB cable is connected!\n");
2414			return -EAGAIN;
2415		}
2416
2417	/*
2418	 * only cable is unplugged, udc can suspend.
2419	 * So do not care about clock_gating == 1.
2420	 */
2421	if (!udc->clock_gating) {
2422		udc_stop(udc);
2423
2424		spin_lock_irq(&udc->lock);
2425		/* stop all usb activities */
2426		stop_activity(udc, udc->driver);
2427		spin_unlock_irq(&udc->lock);
2428
2429		mv_udc_disable_internal(udc);
2430	}
2431
2432	return 0;
2433}
2434
2435static int mv_udc_resume(struct device *_dev)
2436{
2437	struct mv_udc *udc = the_controller;
2438	int retval;
2439
2440	/* if OTG is enabled, the following will be done in OTG driver*/
2441	if (udc->transceiver)
2442		return 0;
2443
2444	if (!udc->clock_gating) {
2445		retval = mv_udc_enable_internal(udc);
2446		if (retval)
2447			return retval;
2448
2449		if (udc->driver && udc->softconnect) {
2450			udc_reset(udc);
2451			ep0_reset(udc);
2452			udc_start(udc);
2453		}
2454	}
2455
2456	return 0;
2457}
2458
2459static const struct dev_pm_ops mv_udc_pm_ops = {
2460	.suspend	= mv_udc_suspend,
2461	.resume		= mv_udc_resume,
2462};
2463#endif
2464
2465static void mv_udc_shutdown(struct platform_device *dev)
2466{
2467	struct mv_udc *udc = the_controller;
2468	u32 mode;
2469
2470	/* reset controller mode to IDLE */
2471	mode = readl(&udc->op_regs->usbmode);
2472	mode &= ~3;
2473	writel(mode, &udc->op_regs->usbmode);
2474}
2475
2476static struct platform_driver udc_driver = {
2477	.probe		= mv_udc_probe,
2478	.remove		= __exit_p(mv_udc_remove),
2479	.shutdown	= mv_udc_shutdown,
2480	.driver		= {
2481		.owner	= THIS_MODULE,
2482		.name	= "mv-udc",
2483#ifdef CONFIG_PM
2484		.pm	= &mv_udc_pm_ops,
2485#endif
2486	},
2487};
2488MODULE_ALIAS("platform:mv-udc");
2489
2490MODULE_DESCRIPTION(DRIVER_DESC);
2491MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2492MODULE_VERSION(DRIVER_VERSION);
2493MODULE_LICENSE("GPL");
2494
2495
2496static int __init init(void)
2497{
2498	return platform_driver_register(&udc_driver);
2499}
2500module_init(init);
2501
2502
2503static void __exit cleanup(void)
2504{
2505	platform_driver_unregister(&udc_driver);
2506}
2507module_exit(cleanup);
2508
2509