mv_udc_core.c revision 309d6d2be42c895c424a5090fcc2e95ce2d8499a
1/*
2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3 * Author: Chao Xie <chao.xie@marvell.com>
4 *	   Neil Zhang <zhangwm@marvell.com>
5 *
6 * This program is free software; you can redistribute  it and/or modify it
7 * under  the terms of  the GNU General  Public License as published by the
8 * Free Software Foundation;  either version 2 of the  License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/dmapool.h>
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/ioport.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/timer.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/moduleparam.h>
27#include <linux/device.h>
28#include <linux/usb/ch9.h>
29#include <linux/usb/gadget.h>
30#include <linux/usb/otg.h>
31#include <linux/pm.h>
32#include <linux/io.h>
33#include <linux/irq.h>
34#include <linux/platform_device.h>
35#include <linux/clk.h>
36#include <linux/platform_data/mv_usb.h>
37#include <asm/system.h>
38#include <asm/unaligned.h>
39
40#include "mv_udc.h"
41
42#define DRIVER_DESC		"Marvell PXA USB Device Controller driver"
43#define DRIVER_VERSION		"8 Nov 2010"
44
45#define ep_dir(ep)	(((ep)->ep_num == 0) ? \
46				((ep)->udc->ep0_dir) : ((ep)->direction))
47
48/* timeout value -- usec */
49#define RESET_TIMEOUT		10000
50#define FLUSH_TIMEOUT		10000
51#define EPSTATUS_TIMEOUT	10000
52#define PRIME_TIMEOUT		10000
53#define READSAFE_TIMEOUT	1000
54#define DTD_TIMEOUT		1000
55
56#define LOOPS_USEC_SHIFT	4
57#define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
58#define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
59
60static DECLARE_COMPLETION(release_done);
61
62static const char driver_name[] = "mv_udc";
63static const char driver_desc[] = DRIVER_DESC;
64
65/* controller device global variable */
66static struct mv_udc	*the_controller;
67int mv_usb_otgsc;
68
69static void nuke(struct mv_ep *ep, int status);
70static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
71
72/* for endpoint 0 operations */
73static const struct usb_endpoint_descriptor mv_ep0_desc = {
74	.bLength =		USB_DT_ENDPOINT_SIZE,
75	.bDescriptorType =	USB_DT_ENDPOINT,
76	.bEndpointAddress =	0,
77	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
78	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
79};
80
81static void ep0_reset(struct mv_udc *udc)
82{
83	struct mv_ep *ep;
84	u32 epctrlx;
85	int i = 0;
86
87	/* ep0 in and out */
88	for (i = 0; i < 2; i++) {
89		ep = &udc->eps[i];
90		ep->udc = udc;
91
92		/* ep0 dQH */
93		ep->dqh = &udc->ep_dqh[i];
94
95		/* configure ep0 endpoint capabilities in dQH */
96		ep->dqh->max_packet_length =
97			(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
98			| EP_QUEUE_HEAD_IOS;
99
100		ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
101
102		epctrlx = readl(&udc->op_regs->epctrlx[0]);
103		if (i) {	/* TX */
104			epctrlx |= EPCTRL_TX_ENABLE
105				| (USB_ENDPOINT_XFER_CONTROL
106					<< EPCTRL_TX_EP_TYPE_SHIFT);
107
108		} else {	/* RX */
109			epctrlx |= EPCTRL_RX_ENABLE
110				| (USB_ENDPOINT_XFER_CONTROL
111					<< EPCTRL_RX_EP_TYPE_SHIFT);
112		}
113
114		writel(epctrlx, &udc->op_regs->epctrlx[0]);
115	}
116}
117
118/* protocol ep0 stall, will automatically be cleared on new transaction */
119static void ep0_stall(struct mv_udc *udc)
120{
121	u32	epctrlx;
122
123	/* set TX and RX to stall */
124	epctrlx = readl(&udc->op_regs->epctrlx[0]);
125	epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
126	writel(epctrlx, &udc->op_regs->epctrlx[0]);
127
128	/* update ep0 state */
129	udc->ep0_state = WAIT_FOR_SETUP;
130	udc->ep0_dir = EP_DIR_OUT;
131}
132
133static int process_ep_req(struct mv_udc *udc, int index,
134	struct mv_req *curr_req)
135{
136	struct mv_dtd	*curr_dtd;
137	struct mv_dqh	*curr_dqh;
138	int td_complete, actual, remaining_length;
139	int i, direction;
140	int retval = 0;
141	u32 errors;
142	u32 bit_pos;
143
144	curr_dqh = &udc->ep_dqh[index];
145	direction = index % 2;
146
147	curr_dtd = curr_req->head;
148	td_complete = 0;
149	actual = curr_req->req.length;
150
151	for (i = 0; i < curr_req->dtd_count; i++) {
152		if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
153			dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
154				udc->eps[index].name);
155			return 1;
156		}
157
158		errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
159		if (!errors) {
160			remaining_length =
161				(curr_dtd->size_ioc_sts	& DTD_PACKET_SIZE)
162					>> DTD_LENGTH_BIT_POS;
163			actual -= remaining_length;
164
165			if (remaining_length) {
166				if (direction) {
167					dev_dbg(&udc->dev->dev,
168						"TX dTD remains data\n");
169					retval = -EPROTO;
170					break;
171				} else
172					break;
173			}
174		} else {
175			dev_info(&udc->dev->dev,
176				"complete_tr error: ep=%d %s: error = 0x%x\n",
177				index >> 1, direction ? "SEND" : "RECV",
178				errors);
179			if (errors & DTD_STATUS_HALTED) {
180				/* Clear the errors and Halt condition */
181				curr_dqh->size_ioc_int_sts &= ~errors;
182				retval = -EPIPE;
183			} else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
184				retval = -EPROTO;
185			} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
186				retval = -EILSEQ;
187			}
188		}
189		if (i != curr_req->dtd_count - 1)
190			curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
191	}
192	if (retval)
193		return retval;
194
195	if (direction == EP_DIR_OUT)
196		bit_pos = 1 << curr_req->ep->ep_num;
197	else
198		bit_pos = 1 << (16 + curr_req->ep->ep_num);
199
200	while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
201		if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
202			while (readl(&udc->op_regs->epstatus) & bit_pos)
203				udelay(1);
204			break;
205		}
206		udelay(1);
207	}
208
209	curr_req->req.actual = actual;
210
211	return 0;
212}
213
214/*
215 * done() - retire a request; caller blocked irqs
216 * @status : request status to be set, only works when
217 * request is still in progress.
218 */
219static void done(struct mv_ep *ep, struct mv_req *req, int status)
220{
221	struct mv_udc *udc = NULL;
222	unsigned char stopped = ep->stopped;
223	struct mv_dtd *curr_td, *next_td;
224	int j;
225
226	udc = (struct mv_udc *)ep->udc;
227	/* Removed the req from fsl_ep->queue */
228	list_del_init(&req->queue);
229
230	/* req.status should be set as -EINPROGRESS in ep_queue() */
231	if (req->req.status == -EINPROGRESS)
232		req->req.status = status;
233	else
234		status = req->req.status;
235
236	/* Free dtd for the request */
237	next_td = req->head;
238	for (j = 0; j < req->dtd_count; j++) {
239		curr_td = next_td;
240		if (j != req->dtd_count - 1)
241			next_td = curr_td->next_dtd_virt;
242		dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
243	}
244
245	if (req->mapped) {
246		dma_unmap_single(ep->udc->gadget.dev.parent,
247			req->req.dma, req->req.length,
248			((ep_dir(ep) == EP_DIR_IN) ?
249				DMA_TO_DEVICE : DMA_FROM_DEVICE));
250		req->req.dma = DMA_ADDR_INVALID;
251		req->mapped = 0;
252	} else
253		dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
254			req->req.dma, req->req.length,
255			((ep_dir(ep) == EP_DIR_IN) ?
256				DMA_TO_DEVICE : DMA_FROM_DEVICE));
257
258	if (status && (status != -ESHUTDOWN))
259		dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
260			ep->ep.name, &req->req, status,
261			req->req.actual, req->req.length);
262
263	ep->stopped = 1;
264
265	spin_unlock(&ep->udc->lock);
266	/*
267	 * complete() is from gadget layer,
268	 * eg fsg->bulk_in_complete()
269	 */
270	if (req->req.complete)
271		req->req.complete(&ep->ep, &req->req);
272
273	spin_lock(&ep->udc->lock);
274	ep->stopped = stopped;
275}
276
277static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
278{
279	u32 tmp, epstatus, bit_pos, direction;
280	struct mv_udc *udc;
281	struct mv_dqh *dqh;
282	unsigned int loops;
283	int readsafe, retval = 0;
284
285	udc = ep->udc;
286	direction = ep_dir(ep);
287	dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
288	bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
289
290	/* check if the pipe is empty */
291	if (!(list_empty(&ep->queue))) {
292		struct mv_req *lastreq;
293		lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
294		lastreq->tail->dtd_next =
295			req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
296		if (readl(&udc->op_regs->epprime) & bit_pos) {
297			loops = LOOPS(PRIME_TIMEOUT);
298			while (readl(&udc->op_regs->epprime) & bit_pos) {
299				if (loops == 0) {
300					retval = -ETIME;
301					goto done;
302				}
303				udelay(LOOPS_USEC);
304				loops--;
305			}
306			if (readl(&udc->op_regs->epstatus) & bit_pos)
307				goto done;
308		}
309		readsafe = 0;
310		loops = LOOPS(READSAFE_TIMEOUT);
311		while (readsafe == 0) {
312			if (loops == 0) {
313				retval = -ETIME;
314				goto done;
315			}
316			/* start with setting the semaphores */
317			tmp = readl(&udc->op_regs->usbcmd);
318			tmp |= USBCMD_ATDTW_TRIPWIRE_SET;
319			writel(tmp, &udc->op_regs->usbcmd);
320
321			/* read the endpoint status */
322			epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
323
324			/*
325			 * Reread the ATDTW semaphore bit to check if it is
326			 * cleared. When hardware see a hazard, it will clear
327			 * the bit or else we remain set to 1 and we can
328			 * proceed with priming of endpoint if not already
329			 * primed.
330			 */
331			if (readl(&udc->op_regs->usbcmd)
332				& USBCMD_ATDTW_TRIPWIRE_SET) {
333				readsafe = 1;
334			}
335			loops--;
336			udelay(LOOPS_USEC);
337		}
338
339		/* Clear the semaphore */
340		tmp = readl(&udc->op_regs->usbcmd);
341		tmp &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
342		writel(tmp, &udc->op_regs->usbcmd);
343
344		/* If endpoint is not active, we activate it now. */
345		if (!epstatus) {
346			if (direction == EP_DIR_IN) {
347				struct mv_dtd *curr_dtd = dma_to_virt(
348					&udc->dev->dev, dqh->curr_dtd_ptr);
349
350				loops = LOOPS(DTD_TIMEOUT);
351				while (curr_dtd->size_ioc_sts
352					& DTD_STATUS_ACTIVE) {
353					if (loops == 0) {
354						retval = -ETIME;
355						goto done;
356					}
357					loops--;
358					udelay(LOOPS_USEC);
359				}
360			}
361			/* No other transfers on the queue */
362
363			/* Write dQH next pointer and terminate bit to 0 */
364			dqh->next_dtd_ptr = req->head->td_dma
365				& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
366			dqh->size_ioc_int_sts = 0;
367
368			/*
369			 * Ensure that updates to the QH will
370			 * occur before priming.
371			 */
372			wmb();
373
374			/* Prime the Endpoint */
375			writel(bit_pos, &udc->op_regs->epprime);
376		}
377	} else {
378		/* Write dQH next pointer and terminate bit to 0 */
379		dqh->next_dtd_ptr = req->head->td_dma
380			& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
381		dqh->size_ioc_int_sts = 0;
382
383		/* Ensure that updates to the QH will occur before priming. */
384		wmb();
385
386		/* Prime the Endpoint */
387		writel(bit_pos, &udc->op_regs->epprime);
388
389		if (direction == EP_DIR_IN) {
390			/* FIXME add status check after prime the IN ep */
391			int prime_again;
392			u32 curr_dtd_ptr = dqh->curr_dtd_ptr;
393
394			loops = LOOPS(DTD_TIMEOUT);
395			prime_again = 0;
396			while ((curr_dtd_ptr != req->head->td_dma)) {
397				curr_dtd_ptr = dqh->curr_dtd_ptr;
398				if (loops == 0) {
399					dev_err(&udc->dev->dev,
400						"failed to prime %s\n",
401						ep->name);
402					retval = -ETIME;
403					goto done;
404				}
405				loops--;
406				udelay(LOOPS_USEC);
407
408				if (loops == (LOOPS(DTD_TIMEOUT) >> 2)) {
409					if (prime_again)
410						goto done;
411					dev_info(&udc->dev->dev,
412						"prime again\n");
413					writel(bit_pos,
414						&udc->op_regs->epprime);
415					prime_again = 1;
416				}
417			}
418		}
419	}
420done:
421	return retval;
422}
423
424static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
425		dma_addr_t *dma, int *is_last)
426{
427	u32 temp;
428	struct mv_dtd *dtd;
429	struct mv_udc *udc;
430
431	/* how big will this transfer be? */
432	*length = min(req->req.length - req->req.actual,
433			(unsigned)EP_MAX_LENGTH_TRANSFER);
434
435	udc = req->ep->udc;
436
437	/*
438	 * Be careful that no _GFP_HIGHMEM is set,
439	 * or we can not use dma_to_virt
440	 */
441	dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
442	if (dtd == NULL)
443		return dtd;
444
445	dtd->td_dma = *dma;
446	/* initialize buffer page pointers */
447	temp = (u32)(req->req.dma + req->req.actual);
448	dtd->buff_ptr0 = cpu_to_le32(temp);
449	temp &= ~0xFFF;
450	dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
451	dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
452	dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
453	dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
454
455	req->req.actual += *length;
456
457	/* zlp is needed if req->req.zero is set */
458	if (req->req.zero) {
459		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
460			*is_last = 1;
461		else
462			*is_last = 0;
463	} else if (req->req.length == req->req.actual)
464		*is_last = 1;
465	else
466		*is_last = 0;
467
468	/* Fill in the transfer size; set active bit */
469	temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
470
471	/* Enable interrupt for the last dtd of a request */
472	if (*is_last && !req->req.no_interrupt)
473		temp |= DTD_IOC;
474
475	dtd->size_ioc_sts = temp;
476
477	mb();
478
479	return dtd;
480}
481
482/* generate dTD linked list for a request */
483static int req_to_dtd(struct mv_req *req)
484{
485	unsigned count;
486	int is_last, is_first = 1;
487	struct mv_dtd *dtd, *last_dtd = NULL;
488	struct mv_udc *udc;
489	dma_addr_t dma;
490
491	udc = req->ep->udc;
492
493	do {
494		dtd = build_dtd(req, &count, &dma, &is_last);
495		if (dtd == NULL)
496			return -ENOMEM;
497
498		if (is_first) {
499			is_first = 0;
500			req->head = dtd;
501		} else {
502			last_dtd->dtd_next = dma;
503			last_dtd->next_dtd_virt = dtd;
504		}
505		last_dtd = dtd;
506		req->dtd_count++;
507	} while (!is_last);
508
509	/* set terminate bit to 1 for the last dTD */
510	dtd->dtd_next = DTD_NEXT_TERMINATE;
511
512	req->tail = dtd;
513
514	return 0;
515}
516
517static int mv_ep_enable(struct usb_ep *_ep,
518		const struct usb_endpoint_descriptor *desc)
519{
520	struct mv_udc *udc;
521	struct mv_ep *ep;
522	struct mv_dqh *dqh;
523	u16 max = 0;
524	u32 bit_pos, epctrlx, direction;
525	unsigned char zlt = 0, ios = 0, mult = 0;
526	unsigned long flags;
527
528	ep = container_of(_ep, struct mv_ep, ep);
529	udc = ep->udc;
530
531	if (!_ep || !desc || ep->desc
532			|| desc->bDescriptorType != USB_DT_ENDPOINT)
533		return -EINVAL;
534
535	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
536		return -ESHUTDOWN;
537
538	direction = ep_dir(ep);
539	max = usb_endpoint_maxp(desc);
540
541	/*
542	 * disable HW zero length termination select
543	 * driver handles zero length packet through req->req.zero
544	 */
545	zlt = 1;
546
547	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
548
549	/* Check if the Endpoint is Primed */
550	if ((readl(&udc->op_regs->epprime) & bit_pos)
551		|| (readl(&udc->op_regs->epstatus) & bit_pos)) {
552		dev_info(&udc->dev->dev,
553			"ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
554			" ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
555			(unsigned)ep->ep_num, direction ? "SEND" : "RECV",
556			(unsigned)readl(&udc->op_regs->epprime),
557			(unsigned)readl(&udc->op_regs->epstatus),
558			(unsigned)bit_pos);
559		goto en_done;
560	}
561	/* Set the max packet length, interrupt on Setup and Mult fields */
562	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
563	case USB_ENDPOINT_XFER_BULK:
564		zlt = 1;
565		mult = 0;
566		break;
567	case USB_ENDPOINT_XFER_CONTROL:
568		ios = 1;
569	case USB_ENDPOINT_XFER_INT:
570		mult = 0;
571		break;
572	case USB_ENDPOINT_XFER_ISOC:
573		/* Calculate transactions needed for high bandwidth iso */
574		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
575		max = max & 0x7ff;	/* bit 0~10 */
576		/* 3 transactions at most */
577		if (mult > 3)
578			goto en_done;
579		break;
580	default:
581		goto en_done;
582	}
583
584	spin_lock_irqsave(&udc->lock, flags);
585	/* Get the endpoint queue head address */
586	dqh = ep->dqh;
587	dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
588		| (mult << EP_QUEUE_HEAD_MULT_POS)
589		| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
590		| (ios ? EP_QUEUE_HEAD_IOS : 0);
591	dqh->next_dtd_ptr = 1;
592	dqh->size_ioc_int_sts = 0;
593
594	ep->ep.maxpacket = max;
595	ep->desc = desc;
596	ep->stopped = 0;
597
598	/* Enable the endpoint for Rx or Tx and set the endpoint type */
599	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
600	if (direction == EP_DIR_IN) {
601		epctrlx &= ~EPCTRL_TX_ALL_MASK;
602		epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
603			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
604				<< EPCTRL_TX_EP_TYPE_SHIFT);
605	} else {
606		epctrlx &= ~EPCTRL_RX_ALL_MASK;
607		epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
608			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
609				<< EPCTRL_RX_EP_TYPE_SHIFT);
610	}
611	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
612
613	/*
614	 * Implement Guideline (GL# USB-7) The unused endpoint type must
615	 * be programmed to bulk.
616	 */
617	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
618	if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
619		epctrlx |= (USB_ENDPOINT_XFER_BULK
620				<< EPCTRL_RX_EP_TYPE_SHIFT);
621		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
622	}
623
624	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
625	if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
626		epctrlx |= (USB_ENDPOINT_XFER_BULK
627				<< EPCTRL_TX_EP_TYPE_SHIFT);
628		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
629	}
630
631	spin_unlock_irqrestore(&udc->lock, flags);
632
633	return 0;
634en_done:
635	return -EINVAL;
636}
637
638static int  mv_ep_disable(struct usb_ep *_ep)
639{
640	struct mv_udc *udc;
641	struct mv_ep *ep;
642	struct mv_dqh *dqh;
643	u32 bit_pos, epctrlx, direction;
644	unsigned long flags;
645
646	ep = container_of(_ep, struct mv_ep, ep);
647	if ((_ep == NULL) || !ep->desc)
648		return -EINVAL;
649
650	udc = ep->udc;
651
652	/* Get the endpoint queue head address */
653	dqh = ep->dqh;
654
655	spin_lock_irqsave(&udc->lock, flags);
656
657	direction = ep_dir(ep);
658	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
659
660	/* Reset the max packet length and the interrupt on Setup */
661	dqh->max_packet_length = 0;
662
663	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
664	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
665	epctrlx &= ~((direction == EP_DIR_IN)
666			? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
667			: (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
668	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
669
670	/* nuke all pending requests (does flush) */
671	nuke(ep, -ESHUTDOWN);
672
673	ep->desc = NULL;
674	ep->stopped = 1;
675
676	spin_unlock_irqrestore(&udc->lock, flags);
677
678	return 0;
679}
680
681static struct usb_request *
682mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
683{
684	struct mv_req *req = NULL;
685
686	req = kzalloc(sizeof *req, gfp_flags);
687	if (!req)
688		return NULL;
689
690	req->req.dma = DMA_ADDR_INVALID;
691	INIT_LIST_HEAD(&req->queue);
692
693	return &req->req;
694}
695
696static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
697{
698	struct mv_req *req = NULL;
699
700	req = container_of(_req, struct mv_req, req);
701
702	if (_req)
703		kfree(req);
704}
705
706static void mv_ep_fifo_flush(struct usb_ep *_ep)
707{
708	struct mv_udc *udc;
709	u32 bit_pos, direction;
710	struct mv_ep *ep;
711	unsigned int loops;
712
713	if (!_ep)
714		return;
715
716	ep = container_of(_ep, struct mv_ep, ep);
717	if (!ep->desc)
718		return;
719
720	udc = ep->udc;
721	direction = ep_dir(ep);
722
723	if (ep->ep_num == 0)
724		bit_pos = (1 << 16) | 1;
725	else if (direction == EP_DIR_OUT)
726		bit_pos = 1 << ep->ep_num;
727	else
728		bit_pos = 1 << (16 + ep->ep_num);
729
730	loops = LOOPS(EPSTATUS_TIMEOUT);
731	do {
732		unsigned int inter_loops;
733
734		if (loops == 0) {
735			dev_err(&udc->dev->dev,
736				"TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
737				(unsigned)readl(&udc->op_regs->epstatus),
738				(unsigned)bit_pos);
739			return;
740		}
741		/* Write 1 to the Flush register */
742		writel(bit_pos, &udc->op_regs->epflush);
743
744		/* Wait until flushing completed */
745		inter_loops = LOOPS(FLUSH_TIMEOUT);
746		while (readl(&udc->op_regs->epflush)) {
747			/*
748			 * ENDPTFLUSH bit should be cleared to indicate this
749			 * operation is complete
750			 */
751			if (inter_loops == 0) {
752				dev_err(&udc->dev->dev,
753					"TIMEOUT for ENDPTFLUSH=0x%x,"
754					"bit_pos=0x%x\n",
755					(unsigned)readl(&udc->op_regs->epflush),
756					(unsigned)bit_pos);
757				return;
758			}
759			inter_loops--;
760			udelay(LOOPS_USEC);
761		}
762		loops--;
763	} while (readl(&udc->op_regs->epstatus) & bit_pos);
764}
765
766/* queues (submits) an I/O request to an endpoint */
767static int
768mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
769{
770	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
771	struct mv_req *req = container_of(_req, struct mv_req, req);
772	struct mv_udc *udc = ep->udc;
773	unsigned long flags;
774
775	/* catch various bogus parameters */
776	if (!_req || !req->req.complete || !req->req.buf
777			|| !list_empty(&req->queue)) {
778		dev_err(&udc->dev->dev, "%s, bad params", __func__);
779		return -EINVAL;
780	}
781	if (unlikely(!_ep || !ep->desc)) {
782		dev_err(&udc->dev->dev, "%s, bad ep", __func__);
783		return -EINVAL;
784	}
785	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
786		if (req->req.length > ep->ep.maxpacket)
787			return -EMSGSIZE;
788	}
789
790	udc = ep->udc;
791	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
792		return -ESHUTDOWN;
793
794	req->ep = ep;
795
796	/* map virtual address to hardware */
797	if (req->req.dma == DMA_ADDR_INVALID) {
798		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
799					req->req.buf,
800					req->req.length, ep_dir(ep)
801						? DMA_TO_DEVICE
802						: DMA_FROM_DEVICE);
803		req->mapped = 1;
804	} else {
805		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
806					req->req.dma, req->req.length,
807					ep_dir(ep)
808						? DMA_TO_DEVICE
809						: DMA_FROM_DEVICE);
810		req->mapped = 0;
811	}
812
813	req->req.status = -EINPROGRESS;
814	req->req.actual = 0;
815	req->dtd_count = 0;
816
817	spin_lock_irqsave(&udc->lock, flags);
818
819	/* build dtds and push them to device queue */
820	if (!req_to_dtd(req)) {
821		int retval;
822		retval = queue_dtd(ep, req);
823		if (retval) {
824			spin_unlock_irqrestore(&udc->lock, flags);
825			return retval;
826		}
827	} else {
828		spin_unlock_irqrestore(&udc->lock, flags);
829		return -ENOMEM;
830	}
831
832	/* Update ep0 state */
833	if (ep->ep_num == 0)
834		udc->ep0_state = DATA_STATE_XMIT;
835
836	/* irq handler advances the queue */
837	if (req != NULL)
838		list_add_tail(&req->queue, &ep->queue);
839	spin_unlock_irqrestore(&udc->lock, flags);
840
841	return 0;
842}
843
844/* dequeues (cancels, unlinks) an I/O request from an endpoint */
845static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
846{
847	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
848	struct mv_req *req;
849	struct mv_udc *udc = ep->udc;
850	unsigned long flags;
851	int stopped, ret = 0;
852	u32 epctrlx;
853
854	if (!_ep || !_req)
855		return -EINVAL;
856
857	spin_lock_irqsave(&ep->udc->lock, flags);
858	stopped = ep->stopped;
859
860	/* Stop the ep before we deal with the queue */
861	ep->stopped = 1;
862	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
863	if (ep_dir(ep) == EP_DIR_IN)
864		epctrlx &= ~EPCTRL_TX_ENABLE;
865	else
866		epctrlx &= ~EPCTRL_RX_ENABLE;
867	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
868
869	/* make sure it's actually queued on this endpoint */
870	list_for_each_entry(req, &ep->queue, queue) {
871		if (&req->req == _req)
872			break;
873	}
874	if (&req->req != _req) {
875		ret = -EINVAL;
876		goto out;
877	}
878
879	/* The request is in progress, or completed but not dequeued */
880	if (ep->queue.next == &req->queue) {
881		_req->status = -ECONNRESET;
882		mv_ep_fifo_flush(_ep);	/* flush current transfer */
883
884		/* The request isn't the last request in this ep queue */
885		if (req->queue.next != &ep->queue) {
886			struct mv_dqh *qh;
887			struct mv_req *next_req;
888
889			qh = ep->dqh;
890			next_req = list_entry(req->queue.next, struct mv_req,
891					queue);
892
893			/* Point the QH to the first TD of next request */
894			writel((u32) next_req->head, &qh->curr_dtd_ptr);
895		} else {
896			struct mv_dqh *qh;
897
898			qh = ep->dqh;
899			qh->next_dtd_ptr = 1;
900			qh->size_ioc_int_sts = 0;
901		}
902
903		/* The request hasn't been processed, patch up the TD chain */
904	} else {
905		struct mv_req *prev_req;
906
907		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
908		writel(readl(&req->tail->dtd_next),
909				&prev_req->tail->dtd_next);
910
911	}
912
913	done(ep, req, -ECONNRESET);
914
915	/* Enable EP */
916out:
917	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
918	if (ep_dir(ep) == EP_DIR_IN)
919		epctrlx |= EPCTRL_TX_ENABLE;
920	else
921		epctrlx |= EPCTRL_RX_ENABLE;
922	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
923	ep->stopped = stopped;
924
925	spin_unlock_irqrestore(&ep->udc->lock, flags);
926	return ret;
927}
928
929static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
930{
931	u32 epctrlx;
932
933	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
934
935	if (stall) {
936		if (direction == EP_DIR_IN)
937			epctrlx |= EPCTRL_TX_EP_STALL;
938		else
939			epctrlx |= EPCTRL_RX_EP_STALL;
940	} else {
941		if (direction == EP_DIR_IN) {
942			epctrlx &= ~EPCTRL_TX_EP_STALL;
943			epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
944		} else {
945			epctrlx &= ~EPCTRL_RX_EP_STALL;
946			epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
947		}
948	}
949	writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
950}
951
952static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
953{
954	u32 epctrlx;
955
956	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
957
958	if (direction == EP_DIR_OUT)
959		return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
960	else
961		return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
962}
963
964static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
965{
966	struct mv_ep *ep;
967	unsigned long flags = 0;
968	int status = 0;
969	struct mv_udc *udc;
970
971	ep = container_of(_ep, struct mv_ep, ep);
972	udc = ep->udc;
973	if (!_ep || !ep->desc) {
974		status = -EINVAL;
975		goto out;
976	}
977
978	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
979		status = -EOPNOTSUPP;
980		goto out;
981	}
982
983	/*
984	 * Attempt to halt IN ep will fail if any transfer requests
985	 * are still queue
986	 */
987	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
988		status = -EAGAIN;
989		goto out;
990	}
991
992	spin_lock_irqsave(&ep->udc->lock, flags);
993	ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
994	if (halt && wedge)
995		ep->wedge = 1;
996	else if (!halt)
997		ep->wedge = 0;
998	spin_unlock_irqrestore(&ep->udc->lock, flags);
999
1000	if (ep->ep_num == 0) {
1001		udc->ep0_state = WAIT_FOR_SETUP;
1002		udc->ep0_dir = EP_DIR_OUT;
1003	}
1004out:
1005	return status;
1006}
1007
1008static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
1009{
1010	return mv_ep_set_halt_wedge(_ep, halt, 0);
1011}
1012
1013static int mv_ep_set_wedge(struct usb_ep *_ep)
1014{
1015	return mv_ep_set_halt_wedge(_ep, 1, 1);
1016}
1017
1018static struct usb_ep_ops mv_ep_ops = {
1019	.enable		= mv_ep_enable,
1020	.disable	= mv_ep_disable,
1021
1022	.alloc_request	= mv_alloc_request,
1023	.free_request	= mv_free_request,
1024
1025	.queue		= mv_ep_queue,
1026	.dequeue	= mv_ep_dequeue,
1027
1028	.set_wedge	= mv_ep_set_wedge,
1029	.set_halt	= mv_ep_set_halt,
1030	.fifo_flush	= mv_ep_fifo_flush,	/* flush fifo */
1031};
1032
1033static void udc_clock_enable(struct mv_udc *udc)
1034{
1035	unsigned int i;
1036
1037	for (i = 0; i < udc->clknum; i++)
1038		clk_enable(udc->clk[i]);
1039}
1040
1041static void udc_clock_disable(struct mv_udc *udc)
1042{
1043	unsigned int i;
1044
1045	for (i = 0; i < udc->clknum; i++)
1046		clk_disable(udc->clk[i]);
1047}
1048
1049static void udc_stop(struct mv_udc *udc)
1050{
1051	u32 tmp;
1052
1053	/* Disable interrupts */
1054	tmp = readl(&udc->op_regs->usbintr);
1055	tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
1056		USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
1057	writel(tmp, &udc->op_regs->usbintr);
1058
1059	udc->stopped = 1;
1060
1061	/* Reset the Run the bit in the command register to stop VUSB */
1062	tmp = readl(&udc->op_regs->usbcmd);
1063	tmp &= ~USBCMD_RUN_STOP;
1064	writel(tmp, &udc->op_regs->usbcmd);
1065}
1066
1067static void udc_start(struct mv_udc *udc)
1068{
1069	u32 usbintr;
1070
1071	usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1072		| USBINTR_PORT_CHANGE_DETECT_EN
1073		| USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1074	/* Enable interrupts */
1075	writel(usbintr, &udc->op_regs->usbintr);
1076
1077	udc->stopped = 0;
1078
1079	/* Set the Run bit in the command register */
1080	writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1081}
1082
1083static int udc_reset(struct mv_udc *udc)
1084{
1085	unsigned int loops;
1086	u32 tmp, portsc;
1087
1088	/* Stop the controller */
1089	tmp = readl(&udc->op_regs->usbcmd);
1090	tmp &= ~USBCMD_RUN_STOP;
1091	writel(tmp, &udc->op_regs->usbcmd);
1092
1093	/* Reset the controller to get default values */
1094	writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1095
1096	/* wait for reset to complete */
1097	loops = LOOPS(RESET_TIMEOUT);
1098	while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1099		if (loops == 0) {
1100			dev_err(&udc->dev->dev,
1101				"Wait for RESET completed TIMEOUT\n");
1102			return -ETIMEDOUT;
1103		}
1104		loops--;
1105		udelay(LOOPS_USEC);
1106	}
1107
1108	/* set controller to device mode */
1109	tmp = readl(&udc->op_regs->usbmode);
1110	tmp |= USBMODE_CTRL_MODE_DEVICE;
1111
1112	/* turn setup lockout off, require setup tripwire in usbcmd */
1113	tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
1114
1115	writel(tmp, &udc->op_regs->usbmode);
1116
1117	writel(0x0, &udc->op_regs->epsetupstat);
1118
1119	/* Configure the Endpoint List Address */
1120	writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1121		&udc->op_regs->eplistaddr);
1122
1123	portsc = readl(&udc->op_regs->portsc[0]);
1124	if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1125		portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1126
1127	if (udc->force_fs)
1128		portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1129	else
1130		portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1131
1132	writel(portsc, &udc->op_regs->portsc[0]);
1133
1134	tmp = readl(&udc->op_regs->epctrlx[0]);
1135	tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1136	writel(tmp, &udc->op_regs->epctrlx[0]);
1137
1138	return 0;
1139}
1140
1141static int mv_udc_enable(struct mv_udc *udc)
1142{
1143	int retval;
1144
1145	if (udc->clock_gating == 0 || udc->active)
1146		return 0;
1147
1148	dev_dbg(&udc->dev->dev, "enable udc\n");
1149	udc_clock_enable(udc);
1150	if (udc->pdata->phy_init) {
1151		retval = udc->pdata->phy_init(udc->phy_regs);
1152		if (retval) {
1153			dev_err(&udc->dev->dev,
1154				"init phy error %d\n", retval);
1155			udc_clock_disable(udc);
1156			return retval;
1157		}
1158	}
1159	udc->active = 1;
1160
1161	return 0;
1162}
1163
1164static void mv_udc_disable(struct mv_udc *udc)
1165{
1166	if (udc->clock_gating && udc->active) {
1167		dev_dbg(&udc->dev->dev, "disable udc\n");
1168		if (udc->pdata->phy_deinit)
1169			udc->pdata->phy_deinit(udc->phy_regs);
1170		udc_clock_disable(udc);
1171		udc->active = 0;
1172	}
1173}
1174
1175static int mv_udc_get_frame(struct usb_gadget *gadget)
1176{
1177	struct mv_udc *udc;
1178	u16	retval;
1179
1180	if (!gadget)
1181		return -ENODEV;
1182
1183	udc = container_of(gadget, struct mv_udc, gadget);
1184
1185	retval = readl(udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1186
1187	return retval;
1188}
1189
1190/* Tries to wake up the host connected to this gadget */
1191static int mv_udc_wakeup(struct usb_gadget *gadget)
1192{
1193	struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1194	u32 portsc;
1195
1196	/* Remote wakeup feature not enabled by host */
1197	if (!udc->remote_wakeup)
1198		return -ENOTSUPP;
1199
1200	portsc = readl(&udc->op_regs->portsc);
1201	/* not suspended? */
1202	if (!(portsc & PORTSCX_PORT_SUSPEND))
1203		return 0;
1204	/* trigger force resume */
1205	portsc |= PORTSCX_PORT_FORCE_RESUME;
1206	writel(portsc, &udc->op_regs->portsc[0]);
1207	return 0;
1208}
1209
1210static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1211{
1212	struct mv_udc *udc;
1213	unsigned long flags;
1214	int retval = 0;
1215
1216	udc = container_of(gadget, struct mv_udc, gadget);
1217	spin_lock_irqsave(&udc->lock, flags);
1218
1219	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1220		__func__, udc->softconnect, udc->vbus_active);
1221
1222	udc->vbus_active = (is_active != 0);
1223	if (udc->driver && udc->softconnect && udc->vbus_active) {
1224		retval = mv_udc_enable(udc);
1225		if (retval == 0) {
1226			/* Clock is disabled, need re-init registers */
1227			udc_reset(udc);
1228			ep0_reset(udc);
1229			udc_start(udc);
1230		}
1231	} else if (udc->driver && udc->softconnect) {
1232		/* stop all the transfer in queue*/
1233		stop_activity(udc, udc->driver);
1234		udc_stop(udc);
1235		mv_udc_disable(udc);
1236	}
1237
1238	spin_unlock_irqrestore(&udc->lock, flags);
1239	return retval;
1240}
1241
1242static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1243{
1244	struct mv_udc *udc;
1245	unsigned long flags;
1246	int retval = 0;
1247
1248	udc = container_of(gadget, struct mv_udc, gadget);
1249	spin_lock_irqsave(&udc->lock, flags);
1250
1251	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1252			__func__, udc->softconnect, udc->vbus_active);
1253
1254	udc->softconnect = (is_on != 0);
1255	if (udc->driver && udc->softconnect && udc->vbus_active) {
1256		retval = mv_udc_enable(udc);
1257		if (retval == 0) {
1258			/* Clock is disabled, need re-init registers */
1259			udc_reset(udc);
1260			ep0_reset(udc);
1261			udc_start(udc);
1262		}
1263	} else if (udc->driver && udc->vbus_active) {
1264		/* stop all the transfer in queue*/
1265		stop_activity(udc, udc->driver);
1266		udc_stop(udc);
1267		mv_udc_disable(udc);
1268	}
1269
1270	spin_unlock_irqrestore(&udc->lock, flags);
1271	return retval;
1272}
1273
1274static int mv_udc_start(struct usb_gadget_driver *driver,
1275		int (*bind)(struct usb_gadget *));
1276static int mv_udc_stop(struct usb_gadget_driver *driver);
1277/* device controller usb_gadget_ops structure */
1278static const struct usb_gadget_ops mv_ops = {
1279
1280	/* returns the current frame number */
1281	.get_frame	= mv_udc_get_frame,
1282
1283	/* tries to wake up the host connected to this gadget */
1284	.wakeup		= mv_udc_wakeup,
1285
1286	/* notify controller that VBUS is powered or not */
1287	.vbus_session	= mv_udc_vbus_session,
1288
1289	/* D+ pullup, software-controlled connect/disconnect to USB host */
1290	.pullup		= mv_udc_pullup,
1291	.start		= mv_udc_start,
1292	.stop		= mv_udc_stop,
1293};
1294
1295static int eps_init(struct mv_udc *udc)
1296{
1297	struct mv_ep	*ep;
1298	char name[14];
1299	int i;
1300
1301	/* initialize ep0 */
1302	ep = &udc->eps[0];
1303	ep->udc = udc;
1304	strncpy(ep->name, "ep0", sizeof(ep->name));
1305	ep->ep.name = ep->name;
1306	ep->ep.ops = &mv_ep_ops;
1307	ep->wedge = 0;
1308	ep->stopped = 0;
1309	ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1310	ep->ep_num = 0;
1311	ep->desc = &mv_ep0_desc;
1312	INIT_LIST_HEAD(&ep->queue);
1313
1314	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1315
1316	/* initialize other endpoints */
1317	for (i = 2; i < udc->max_eps * 2; i++) {
1318		ep = &udc->eps[i];
1319		if (i % 2) {
1320			snprintf(name, sizeof(name), "ep%din", i / 2);
1321			ep->direction = EP_DIR_IN;
1322		} else {
1323			snprintf(name, sizeof(name), "ep%dout", i / 2);
1324			ep->direction = EP_DIR_OUT;
1325		}
1326		ep->udc = udc;
1327		strncpy(ep->name, name, sizeof(ep->name));
1328		ep->ep.name = ep->name;
1329
1330		ep->ep.ops = &mv_ep_ops;
1331		ep->stopped = 0;
1332		ep->ep.maxpacket = (unsigned short) ~0;
1333		ep->ep_num = i / 2;
1334
1335		INIT_LIST_HEAD(&ep->queue);
1336		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1337
1338		ep->dqh = &udc->ep_dqh[i];
1339	}
1340
1341	return 0;
1342}
1343
1344/* delete all endpoint requests, called with spinlock held */
1345static void nuke(struct mv_ep *ep, int status)
1346{
1347	/* called with spinlock held */
1348	ep->stopped = 1;
1349
1350	/* endpoint fifo flush */
1351	mv_ep_fifo_flush(&ep->ep);
1352
1353	while (!list_empty(&ep->queue)) {
1354		struct mv_req *req = NULL;
1355		req = list_entry(ep->queue.next, struct mv_req, queue);
1356		done(ep, req, status);
1357	}
1358}
1359
1360/* stop all USB activities */
1361static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1362{
1363	struct mv_ep	*ep;
1364
1365	nuke(&udc->eps[0], -ESHUTDOWN);
1366
1367	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1368		nuke(ep, -ESHUTDOWN);
1369	}
1370
1371	/* report disconnect; the driver is already quiesced */
1372	if (driver) {
1373		spin_unlock(&udc->lock);
1374		driver->disconnect(&udc->gadget);
1375		spin_lock(&udc->lock);
1376	}
1377}
1378
1379static int mv_udc_start(struct usb_gadget_driver *driver,
1380		int (*bind)(struct usb_gadget *))
1381{
1382	struct mv_udc *udc = the_controller;
1383	int retval = 0;
1384	unsigned long flags;
1385
1386	if (!udc)
1387		return -ENODEV;
1388
1389	if (udc->driver)
1390		return -EBUSY;
1391
1392	spin_lock_irqsave(&udc->lock, flags);
1393
1394	/* hook up the driver ... */
1395	driver->driver.bus = NULL;
1396	udc->driver = driver;
1397	udc->gadget.dev.driver = &driver->driver;
1398
1399	udc->usb_state = USB_STATE_ATTACHED;
1400	udc->ep0_state = WAIT_FOR_SETUP;
1401	udc->ep0_dir = EP_DIR_OUT;
1402
1403	spin_unlock_irqrestore(&udc->lock, flags);
1404
1405	retval = bind(&udc->gadget);
1406	if (retval) {
1407		dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
1408				driver->driver.name, retval);
1409		udc->driver = NULL;
1410		udc->gadget.dev.driver = NULL;
1411		return retval;
1412	}
1413
1414	if (udc->transceiver) {
1415		retval = otg_set_peripheral(udc->transceiver, &udc->gadget);
1416		if (retval) {
1417			dev_err(&udc->dev->dev,
1418				"unable to register peripheral to otg\n");
1419			if (driver->unbind) {
1420				driver->unbind(&udc->gadget);
1421				udc->gadget.dev.driver = NULL;
1422				udc->driver = NULL;
1423			}
1424			return retval;
1425		}
1426	}
1427
1428	/* pullup is always on */
1429	mv_udc_pullup(&udc->gadget, 1);
1430
1431	/* When boot with cable attached, there will be no vbus irq occurred */
1432	if (udc->qwork)
1433		queue_work(udc->qwork, &udc->vbus_work);
1434
1435	return 0;
1436}
1437
1438static int mv_udc_stop(struct usb_gadget_driver *driver)
1439{
1440	struct mv_udc *udc = the_controller;
1441	unsigned long flags;
1442
1443	if (!udc)
1444		return -ENODEV;
1445
1446	spin_lock_irqsave(&udc->lock, flags);
1447
1448	mv_udc_enable(udc);
1449	udc_stop(udc);
1450
1451	/* stop all usb activities */
1452	udc->gadget.speed = USB_SPEED_UNKNOWN;
1453	stop_activity(udc, driver);
1454	mv_udc_disable(udc);
1455
1456	spin_unlock_irqrestore(&udc->lock, flags);
1457
1458	/* unbind gadget driver */
1459	driver->unbind(&udc->gadget);
1460	udc->gadget.dev.driver = NULL;
1461	udc->driver = NULL;
1462
1463	return 0;
1464}
1465
1466static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1467{
1468	u32 portsc;
1469
1470	portsc = readl(&udc->op_regs->portsc[0]);
1471	portsc |= mode << 16;
1472	writel(portsc, &udc->op_regs->portsc[0]);
1473}
1474
1475static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1476{
1477	struct mv_udc *udc = the_controller;
1478	struct mv_req *req = container_of(_req, struct mv_req, req);
1479	unsigned long flags;
1480
1481	dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1482
1483	spin_lock_irqsave(&udc->lock, flags);
1484	if (req->test_mode) {
1485		mv_set_ptc(udc, req->test_mode);
1486		req->test_mode = 0;
1487	}
1488	spin_unlock_irqrestore(&udc->lock, flags);
1489}
1490
1491static int
1492udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1493{
1494	int retval = 0;
1495	struct mv_req *req;
1496	struct mv_ep *ep;
1497
1498	ep = &udc->eps[0];
1499	udc->ep0_dir = direction;
1500	udc->ep0_state = WAIT_FOR_OUT_STATUS;
1501
1502	req = udc->status_req;
1503
1504	/* fill in the reqest structure */
1505	if (empty == false) {
1506		*((u16 *) req->req.buf) = cpu_to_le16(status);
1507		req->req.length = 2;
1508	} else
1509		req->req.length = 0;
1510
1511	req->ep = ep;
1512	req->req.status = -EINPROGRESS;
1513	req->req.actual = 0;
1514	if (udc->test_mode) {
1515		req->req.complete = prime_status_complete;
1516		req->test_mode = udc->test_mode;
1517		udc->test_mode = 0;
1518	} else
1519		req->req.complete = NULL;
1520	req->dtd_count = 0;
1521
1522	if (req->req.dma == DMA_ADDR_INVALID) {
1523		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1524				req->req.buf, req->req.length,
1525				ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1526		req->mapped = 1;
1527	}
1528
1529	/* prime the data phase */
1530	if (!req_to_dtd(req))
1531		retval = queue_dtd(ep, req);
1532	else{	/* no mem */
1533		retval = -ENOMEM;
1534		goto out;
1535	}
1536
1537	if (retval) {
1538		dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
1539		goto out;
1540	}
1541
1542	list_add_tail(&req->queue, &ep->queue);
1543
1544	return 0;
1545out:
1546	return retval;
1547}
1548
1549static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1550{
1551	if (index <= TEST_FORCE_EN) {
1552		udc->test_mode = index;
1553		if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1554			ep0_stall(udc);
1555	} else
1556		dev_err(&udc->dev->dev,
1557			"This test mode(%d) is not supported\n", index);
1558}
1559
1560static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1561{
1562	udc->dev_addr = (u8)setup->wValue;
1563
1564	/* update usb state */
1565	udc->usb_state = USB_STATE_ADDRESS;
1566
1567	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1568		ep0_stall(udc);
1569}
1570
1571static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1572	struct usb_ctrlrequest *setup)
1573{
1574	u16 status = 0;
1575	int retval;
1576
1577	if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1578		!= (USB_DIR_IN | USB_TYPE_STANDARD))
1579		return;
1580
1581	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1582		status = 1 << USB_DEVICE_SELF_POWERED;
1583		status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1584	} else if ((setup->bRequestType & USB_RECIP_MASK)
1585			== USB_RECIP_INTERFACE) {
1586		/* get interface status */
1587		status = 0;
1588	} else if ((setup->bRequestType & USB_RECIP_MASK)
1589			== USB_RECIP_ENDPOINT) {
1590		u8 ep_num, direction;
1591
1592		ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1593		direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1594				? EP_DIR_IN : EP_DIR_OUT;
1595		status = ep_is_stall(udc, ep_num, direction)
1596				<< USB_ENDPOINT_HALT;
1597	}
1598
1599	retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1600	if (retval)
1601		ep0_stall(udc);
1602	else
1603		udc->ep0_state = DATA_STATE_XMIT;
1604}
1605
1606static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1607{
1608	u8 ep_num;
1609	u8 direction;
1610	struct mv_ep *ep;
1611
1612	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1613		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1614		switch (setup->wValue) {
1615		case USB_DEVICE_REMOTE_WAKEUP:
1616			udc->remote_wakeup = 0;
1617			break;
1618		default:
1619			goto out;
1620		}
1621	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1622		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1623		switch (setup->wValue) {
1624		case USB_ENDPOINT_HALT:
1625			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1626			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1627				? EP_DIR_IN : EP_DIR_OUT;
1628			if (setup->wValue != 0 || setup->wLength != 0
1629				|| ep_num > udc->max_eps)
1630				goto out;
1631			ep = &udc->eps[ep_num * 2 + direction];
1632			if (ep->wedge == 1)
1633				break;
1634			spin_unlock(&udc->lock);
1635			ep_set_stall(udc, ep_num, direction, 0);
1636			spin_lock(&udc->lock);
1637			break;
1638		default:
1639			goto out;
1640		}
1641	} else
1642		goto out;
1643
1644	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1645		ep0_stall(udc);
1646out:
1647	return;
1648}
1649
1650static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1651{
1652	u8 ep_num;
1653	u8 direction;
1654
1655	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1656		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1657		switch (setup->wValue) {
1658		case USB_DEVICE_REMOTE_WAKEUP:
1659			udc->remote_wakeup = 1;
1660			break;
1661		case USB_DEVICE_TEST_MODE:
1662			if (setup->wIndex & 0xFF
1663				||  udc->gadget.speed != USB_SPEED_HIGH)
1664				ep0_stall(udc);
1665
1666			if (udc->usb_state != USB_STATE_CONFIGURED
1667				&& udc->usb_state != USB_STATE_ADDRESS
1668				&& udc->usb_state != USB_STATE_DEFAULT)
1669				ep0_stall(udc);
1670
1671			mv_udc_testmode(udc, (setup->wIndex >> 8));
1672			goto out;
1673		default:
1674			goto out;
1675		}
1676	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1677		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1678		switch (setup->wValue) {
1679		case USB_ENDPOINT_HALT:
1680			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1681			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1682				? EP_DIR_IN : EP_DIR_OUT;
1683			if (setup->wValue != 0 || setup->wLength != 0
1684				|| ep_num > udc->max_eps)
1685				goto out;
1686			spin_unlock(&udc->lock);
1687			ep_set_stall(udc, ep_num, direction, 1);
1688			spin_lock(&udc->lock);
1689			break;
1690		default:
1691			goto out;
1692		}
1693	} else
1694		goto out;
1695
1696	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1697		ep0_stall(udc);
1698out:
1699	return;
1700}
1701
1702static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1703	struct usb_ctrlrequest *setup)
1704{
1705	bool delegate = false;
1706
1707	nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1708
1709	dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1710			setup->bRequestType, setup->bRequest,
1711			setup->wValue, setup->wIndex, setup->wLength);
1712	/* We process some stardard setup requests here */
1713	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1714		switch (setup->bRequest) {
1715		case USB_REQ_GET_STATUS:
1716			ch9getstatus(udc, ep_num, setup);
1717			break;
1718
1719		case USB_REQ_SET_ADDRESS:
1720			ch9setaddress(udc, setup);
1721			break;
1722
1723		case USB_REQ_CLEAR_FEATURE:
1724			ch9clearfeature(udc, setup);
1725			break;
1726
1727		case USB_REQ_SET_FEATURE:
1728			ch9setfeature(udc, setup);
1729			break;
1730
1731		default:
1732			delegate = true;
1733		}
1734	} else
1735		delegate = true;
1736
1737	/* delegate USB standard requests to the gadget driver */
1738	if (delegate == true) {
1739		/* USB requests handled by gadget */
1740		if (setup->wLength) {
1741			/* DATA phase from gadget, STATUS phase from udc */
1742			udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1743					?  EP_DIR_IN : EP_DIR_OUT;
1744			spin_unlock(&udc->lock);
1745			if (udc->driver->setup(&udc->gadget,
1746				&udc->local_setup_buff) < 0)
1747				ep0_stall(udc);
1748			spin_lock(&udc->lock);
1749			udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1750					?  DATA_STATE_XMIT : DATA_STATE_RECV;
1751		} else {
1752			/* no DATA phase, IN STATUS phase from gadget */
1753			udc->ep0_dir = EP_DIR_IN;
1754			spin_unlock(&udc->lock);
1755			if (udc->driver->setup(&udc->gadget,
1756				&udc->local_setup_buff) < 0)
1757				ep0_stall(udc);
1758			spin_lock(&udc->lock);
1759			udc->ep0_state = WAIT_FOR_OUT_STATUS;
1760		}
1761	}
1762}
1763
1764/* complete DATA or STATUS phase of ep0 prime status phase if needed */
1765static void ep0_req_complete(struct mv_udc *udc,
1766	struct mv_ep *ep0, struct mv_req *req)
1767{
1768	u32 new_addr;
1769
1770	if (udc->usb_state == USB_STATE_ADDRESS) {
1771		/* set the new address */
1772		new_addr = (u32)udc->dev_addr;
1773		writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1774			&udc->op_regs->deviceaddr);
1775	}
1776
1777	done(ep0, req, 0);
1778
1779	switch (udc->ep0_state) {
1780	case DATA_STATE_XMIT:
1781		/* receive status phase */
1782		if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1783			ep0_stall(udc);
1784		break;
1785	case DATA_STATE_RECV:
1786		/* send status phase */
1787		if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1788			ep0_stall(udc);
1789		break;
1790	case WAIT_FOR_OUT_STATUS:
1791		udc->ep0_state = WAIT_FOR_SETUP;
1792		break;
1793	case WAIT_FOR_SETUP:
1794		dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1795		break;
1796	default:
1797		ep0_stall(udc);
1798		break;
1799	}
1800}
1801
1802static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1803{
1804	u32 temp;
1805	struct mv_dqh *dqh;
1806
1807	dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1808
1809	/* Clear bit in ENDPTSETUPSTAT */
1810	writel((1 << ep_num), &udc->op_regs->epsetupstat);
1811
1812	/* while a hazard exists when setup package arrives */
1813	do {
1814		/* Set Setup Tripwire */
1815		temp = readl(&udc->op_regs->usbcmd);
1816		writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1817
1818		/* Copy the setup packet to local buffer */
1819		memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1820	} while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1821
1822	/* Clear Setup Tripwire */
1823	temp = readl(&udc->op_regs->usbcmd);
1824	writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1825}
1826
1827static void irq_process_tr_complete(struct mv_udc *udc)
1828{
1829	u32 tmp, bit_pos;
1830	int i, ep_num = 0, direction = 0;
1831	struct mv_ep	*curr_ep;
1832	struct mv_req *curr_req, *temp_req;
1833	int status;
1834
1835	/*
1836	 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1837	 * because the setup packets are to be read ASAP
1838	 */
1839
1840	/* Process all Setup packet received interrupts */
1841	tmp = readl(&udc->op_regs->epsetupstat);
1842
1843	if (tmp) {
1844		for (i = 0; i < udc->max_eps; i++) {
1845			if (tmp & (1 << i)) {
1846				get_setup_data(udc, i,
1847					(u8 *)(&udc->local_setup_buff));
1848				handle_setup_packet(udc, i,
1849					&udc->local_setup_buff);
1850			}
1851		}
1852	}
1853
1854	/* Don't clear the endpoint setup status register here.
1855	 * It is cleared as a setup packet is read out of the buffer
1856	 */
1857
1858	/* Process non-setup transaction complete interrupts */
1859	tmp = readl(&udc->op_regs->epcomplete);
1860
1861	if (!tmp)
1862		return;
1863
1864	writel(tmp, &udc->op_regs->epcomplete);
1865
1866	for (i = 0; i < udc->max_eps * 2; i++) {
1867		ep_num = i >> 1;
1868		direction = i % 2;
1869
1870		bit_pos = 1 << (ep_num + 16 * direction);
1871
1872		if (!(bit_pos & tmp))
1873			continue;
1874
1875		if (i == 1)
1876			curr_ep = &udc->eps[0];
1877		else
1878			curr_ep = &udc->eps[i];
1879		/* process the req queue until an uncomplete request */
1880		list_for_each_entry_safe(curr_req, temp_req,
1881			&curr_ep->queue, queue) {
1882			status = process_ep_req(udc, i, curr_req);
1883			if (status)
1884				break;
1885
1886			/* write back status to req */
1887			curr_req->req.status = status;
1888
1889			/* ep0 request completion */
1890			if (ep_num == 0) {
1891				ep0_req_complete(udc, curr_ep, curr_req);
1892				break;
1893			} else {
1894				done(curr_ep, curr_req, status);
1895			}
1896		}
1897	}
1898}
1899
1900void irq_process_reset(struct mv_udc *udc)
1901{
1902	u32 tmp;
1903	unsigned int loops;
1904
1905	udc->ep0_dir = EP_DIR_OUT;
1906	udc->ep0_state = WAIT_FOR_SETUP;
1907	udc->remote_wakeup = 0;		/* default to 0 on reset */
1908
1909	/* The address bits are past bit 25-31. Set the address */
1910	tmp = readl(&udc->op_regs->deviceaddr);
1911	tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1912	writel(tmp, &udc->op_regs->deviceaddr);
1913
1914	/* Clear all the setup token semaphores */
1915	tmp = readl(&udc->op_regs->epsetupstat);
1916	writel(tmp, &udc->op_regs->epsetupstat);
1917
1918	/* Clear all the endpoint complete status bits */
1919	tmp = readl(&udc->op_regs->epcomplete);
1920	writel(tmp, &udc->op_regs->epcomplete);
1921
1922	/* wait until all endptprime bits cleared */
1923	loops = LOOPS(PRIME_TIMEOUT);
1924	while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1925		if (loops == 0) {
1926			dev_err(&udc->dev->dev,
1927				"Timeout for ENDPTPRIME = 0x%x\n",
1928				readl(&udc->op_regs->epprime));
1929			break;
1930		}
1931		loops--;
1932		udelay(LOOPS_USEC);
1933	}
1934
1935	/* Write 1s to the Flush register */
1936	writel((u32)~0, &udc->op_regs->epflush);
1937
1938	if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1939		dev_info(&udc->dev->dev, "usb bus reset\n");
1940		udc->usb_state = USB_STATE_DEFAULT;
1941		/* reset all the queues, stop all USB activities */
1942		stop_activity(udc, udc->driver);
1943	} else {
1944		dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1945			readl(&udc->op_regs->portsc));
1946
1947		/*
1948		 * re-initialize
1949		 * controller reset
1950		 */
1951		udc_reset(udc);
1952
1953		/* reset all the queues, stop all USB activities */
1954		stop_activity(udc, udc->driver);
1955
1956		/* reset ep0 dQH and endptctrl */
1957		ep0_reset(udc);
1958
1959		/* enable interrupt and set controller to run state */
1960		udc_start(udc);
1961
1962		udc->usb_state = USB_STATE_ATTACHED;
1963	}
1964}
1965
1966static void handle_bus_resume(struct mv_udc *udc)
1967{
1968	udc->usb_state = udc->resume_state;
1969	udc->resume_state = 0;
1970
1971	/* report resume to the driver */
1972	if (udc->driver) {
1973		if (udc->driver->resume) {
1974			spin_unlock(&udc->lock);
1975			udc->driver->resume(&udc->gadget);
1976			spin_lock(&udc->lock);
1977		}
1978	}
1979}
1980
1981static void irq_process_suspend(struct mv_udc *udc)
1982{
1983	udc->resume_state = udc->usb_state;
1984	udc->usb_state = USB_STATE_SUSPENDED;
1985
1986	if (udc->driver->suspend) {
1987		spin_unlock(&udc->lock);
1988		udc->driver->suspend(&udc->gadget);
1989		spin_lock(&udc->lock);
1990	}
1991}
1992
1993static void irq_process_port_change(struct mv_udc *udc)
1994{
1995	u32 portsc;
1996
1997	portsc = readl(&udc->op_regs->portsc[0]);
1998	if (!(portsc & PORTSCX_PORT_RESET)) {
1999		/* Get the speed */
2000		u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
2001		switch (speed) {
2002		case PORTSCX_PORT_SPEED_HIGH:
2003			udc->gadget.speed = USB_SPEED_HIGH;
2004			break;
2005		case PORTSCX_PORT_SPEED_FULL:
2006			udc->gadget.speed = USB_SPEED_FULL;
2007			break;
2008		case PORTSCX_PORT_SPEED_LOW:
2009			udc->gadget.speed = USB_SPEED_LOW;
2010			break;
2011		default:
2012			udc->gadget.speed = USB_SPEED_UNKNOWN;
2013			break;
2014		}
2015	}
2016
2017	if (portsc & PORTSCX_PORT_SUSPEND) {
2018		udc->resume_state = udc->usb_state;
2019		udc->usb_state = USB_STATE_SUSPENDED;
2020		if (udc->driver->suspend) {
2021			spin_unlock(&udc->lock);
2022			udc->driver->suspend(&udc->gadget);
2023			spin_lock(&udc->lock);
2024		}
2025	}
2026
2027	if (!(portsc & PORTSCX_PORT_SUSPEND)
2028		&& udc->usb_state == USB_STATE_SUSPENDED) {
2029		handle_bus_resume(udc);
2030	}
2031
2032	if (!udc->resume_state)
2033		udc->usb_state = USB_STATE_DEFAULT;
2034}
2035
2036static void irq_process_error(struct mv_udc *udc)
2037{
2038	/* Increment the error count */
2039	udc->errors++;
2040}
2041
2042static irqreturn_t mv_udc_irq(int irq, void *dev)
2043{
2044	struct mv_udc *udc = (struct mv_udc *)dev;
2045	u32 status, intr;
2046
2047	/* Disable ISR when stopped bit is set */
2048	if (udc->stopped)
2049		return IRQ_NONE;
2050
2051	spin_lock(&udc->lock);
2052
2053	status = readl(&udc->op_regs->usbsts);
2054	intr = readl(&udc->op_regs->usbintr);
2055	status &= intr;
2056
2057	if (status == 0) {
2058		spin_unlock(&udc->lock);
2059		return IRQ_NONE;
2060	}
2061
2062	/* Clear all the interrupts occurred */
2063	writel(status, &udc->op_regs->usbsts);
2064
2065	if (status & USBSTS_ERR)
2066		irq_process_error(udc);
2067
2068	if (status & USBSTS_RESET)
2069		irq_process_reset(udc);
2070
2071	if (status & USBSTS_PORT_CHANGE)
2072		irq_process_port_change(udc);
2073
2074	if (status & USBSTS_INT)
2075		irq_process_tr_complete(udc);
2076
2077	if (status & USBSTS_SUSPEND)
2078		irq_process_suspend(udc);
2079
2080	spin_unlock(&udc->lock);
2081
2082	return IRQ_HANDLED;
2083}
2084
2085static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2086{
2087	struct mv_udc *udc = (struct mv_udc *)dev;
2088
2089	/* polling VBUS and init phy may cause too much time*/
2090	if (udc->qwork)
2091		queue_work(udc->qwork, &udc->vbus_work);
2092
2093	return IRQ_HANDLED;
2094}
2095
2096static void mv_udc_vbus_work(struct work_struct *work)
2097{
2098	struct mv_udc *udc;
2099	unsigned int vbus;
2100
2101	udc = container_of(work, struct mv_udc, vbus_work);
2102	if (!udc->pdata->vbus)
2103		return;
2104
2105	vbus = udc->pdata->vbus->poll();
2106	dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2107
2108	if (vbus == VBUS_HIGH)
2109		mv_udc_vbus_session(&udc->gadget, 1);
2110	else if (vbus == VBUS_LOW)
2111		mv_udc_vbus_session(&udc->gadget, 0);
2112}
2113
2114/* release device structure */
2115static void gadget_release(struct device *_dev)
2116{
2117	struct mv_udc *udc = the_controller;
2118
2119	complete(udc->done);
2120}
2121
2122static int __devexit mv_udc_remove(struct platform_device *dev)
2123{
2124	struct mv_udc *udc = the_controller;
2125	int clk_i;
2126
2127	usb_del_gadget_udc(&udc->gadget);
2128
2129	if (udc->qwork) {
2130		flush_workqueue(udc->qwork);
2131		destroy_workqueue(udc->qwork);
2132	}
2133
2134	/*
2135	 * If we have transceiver inited,
2136	 * then vbus irq will not be requested in udc driver.
2137	 */
2138	if (udc->pdata && udc->pdata->vbus
2139		&& udc->clock_gating && udc->transceiver == NULL)
2140		free_irq(udc->pdata->vbus->irq, &dev->dev);
2141
2142	/* free memory allocated in probe */
2143	if (udc->dtd_pool)
2144		dma_pool_destroy(udc->dtd_pool);
2145
2146	if (udc->ep_dqh)
2147		dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2148			udc->ep_dqh, udc->ep_dqh_dma);
2149
2150	kfree(udc->eps);
2151
2152	if (udc->irq)
2153		free_irq(udc->irq, &dev->dev);
2154
2155	mv_udc_disable(udc);
2156
2157	if (udc->cap_regs)
2158		iounmap(udc->cap_regs);
2159	udc->cap_regs = NULL;
2160
2161	if (udc->phy_regs)
2162		iounmap((void *)udc->phy_regs);
2163	udc->phy_regs = 0;
2164
2165	if (udc->status_req) {
2166		kfree(udc->status_req->req.buf);
2167		kfree(udc->status_req);
2168	}
2169
2170	for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
2171		clk_put(udc->clk[clk_i]);
2172
2173	device_unregister(&udc->gadget.dev);
2174
2175	/* free dev, wait for the release() finished */
2176	wait_for_completion(udc->done);
2177	kfree(udc);
2178
2179	the_controller = NULL;
2180
2181	return 0;
2182}
2183
2184static int __devinit mv_udc_probe(struct platform_device *dev)
2185{
2186	struct mv_usb_platform_data *pdata = dev->dev.platform_data;
2187	struct mv_udc *udc;
2188	int retval = 0;
2189	int clk_i = 0;
2190	struct resource *r;
2191	size_t size;
2192
2193	if (pdata == NULL) {
2194		dev_err(&dev->dev, "missing platform_data\n");
2195		return -ENODEV;
2196	}
2197
2198	size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
2199	udc = kzalloc(size, GFP_KERNEL);
2200	if (udc == NULL) {
2201		dev_err(&dev->dev, "failed to allocate memory for udc\n");
2202		return -ENOMEM;
2203	}
2204
2205	the_controller = udc;
2206	udc->done = &release_done;
2207	udc->pdata = dev->dev.platform_data;
2208	spin_lock_init(&udc->lock);
2209
2210	udc->dev = dev;
2211
2212#ifdef CONFIG_USB_OTG_UTILS
2213	if (pdata->mode == MV_USB_MODE_OTG)
2214		udc->transceiver = otg_get_transceiver();
2215#endif
2216
2217	udc->clknum = pdata->clknum;
2218	for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
2219		udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
2220		if (IS_ERR(udc->clk[clk_i])) {
2221			retval = PTR_ERR(udc->clk[clk_i]);
2222			goto err_put_clk;
2223		}
2224	}
2225
2226	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2227	if (r == NULL) {
2228		dev_err(&dev->dev, "no I/O memory resource defined\n");
2229		retval = -ENODEV;
2230		goto err_put_clk;
2231	}
2232
2233	udc->cap_regs = (struct mv_cap_regs __iomem *)
2234		ioremap(r->start, resource_size(r));
2235	if (udc->cap_regs == NULL) {
2236		dev_err(&dev->dev, "failed to map I/O memory\n");
2237		retval = -EBUSY;
2238		goto err_put_clk;
2239	}
2240
2241	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2242	if (r == NULL) {
2243		dev_err(&dev->dev, "no phy I/O memory resource defined\n");
2244		retval = -ENODEV;
2245		goto err_iounmap_capreg;
2246	}
2247
2248	udc->phy_regs = (unsigned int)ioremap(r->start, resource_size(r));
2249	if (udc->phy_regs == 0) {
2250		dev_err(&dev->dev, "failed to map phy I/O memory\n");
2251		retval = -EBUSY;
2252		goto err_iounmap_capreg;
2253	}
2254
2255	/* we will acces controller register, so enable the clk */
2256	udc_clock_enable(udc);
2257	if (pdata->phy_init) {
2258		retval = pdata->phy_init(udc->phy_regs);
2259		if (retval) {
2260			dev_err(&dev->dev, "phy init error %d\n", retval);
2261			goto err_iounmap_phyreg;
2262		}
2263	}
2264
2265	udc->op_regs = (struct mv_op_regs __iomem *)((u32)udc->cap_regs
2266		+ (readl(&udc->cap_regs->caplength_hciversion)
2267			& CAPLENGTH_MASK));
2268	udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2269
2270	/*
2271	 * some platform will use usb to download image, it may not disconnect
2272	 * usb gadget before loading kernel. So first stop udc here.
2273	 */
2274	udc_stop(udc);
2275	writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2276
2277	size = udc->max_eps * sizeof(struct mv_dqh) *2;
2278	size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2279	udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
2280					&udc->ep_dqh_dma, GFP_KERNEL);
2281
2282	if (udc->ep_dqh == NULL) {
2283		dev_err(&dev->dev, "allocate dQH memory failed\n");
2284		retval = -ENOMEM;
2285		goto err_disable_clock;
2286	}
2287	udc->ep_dqh_size = size;
2288
2289	/* create dTD dma_pool resource */
2290	udc->dtd_pool = dma_pool_create("mv_dtd",
2291			&dev->dev,
2292			sizeof(struct mv_dtd),
2293			DTD_ALIGNMENT,
2294			DMA_BOUNDARY);
2295
2296	if (!udc->dtd_pool) {
2297		retval = -ENOMEM;
2298		goto err_free_dma;
2299	}
2300
2301	size = udc->max_eps * sizeof(struct mv_ep) *2;
2302	udc->eps = kzalloc(size, GFP_KERNEL);
2303	if (udc->eps == NULL) {
2304		dev_err(&dev->dev, "allocate ep memory failed\n");
2305		retval = -ENOMEM;
2306		goto err_destroy_dma;
2307	}
2308
2309	/* initialize ep0 status request structure */
2310	udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
2311	if (!udc->status_req) {
2312		dev_err(&dev->dev, "allocate status_req memory failed\n");
2313		retval = -ENOMEM;
2314		goto err_free_eps;
2315	}
2316	INIT_LIST_HEAD(&udc->status_req->queue);
2317
2318	/* allocate a small amount of memory to get valid address */
2319	udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2320	udc->status_req->req.dma = DMA_ADDR_INVALID;
2321
2322	udc->resume_state = USB_STATE_NOTATTACHED;
2323	udc->usb_state = USB_STATE_POWERED;
2324	udc->ep0_dir = EP_DIR_OUT;
2325	udc->remote_wakeup = 0;
2326
2327	r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2328	if (r == NULL) {
2329		dev_err(&dev->dev, "no IRQ resource defined\n");
2330		retval = -ENODEV;
2331		goto err_free_status_req;
2332	}
2333	udc->irq = r->start;
2334	if (request_irq(udc->irq, mv_udc_irq,
2335		IRQF_SHARED, driver_name, udc)) {
2336		dev_err(&dev->dev, "Request irq %d for UDC failed\n",
2337			udc->irq);
2338		retval = -ENODEV;
2339		goto err_free_status_req;
2340	}
2341
2342	/* initialize gadget structure */
2343	udc->gadget.ops = &mv_ops;	/* usb_gadget_ops */
2344	udc->gadget.ep0 = &udc->eps[0].ep;	/* gadget ep0 */
2345	INIT_LIST_HEAD(&udc->gadget.ep_list);	/* ep_list */
2346	udc->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
2347	udc->gadget.max_speed = USB_SPEED_HIGH;	/* support dual speed */
2348
2349	/* the "gadget" abstracts/virtualizes the controller */
2350	dev_set_name(&udc->gadget.dev, "gadget");
2351	udc->gadget.dev.parent = &dev->dev;
2352	udc->gadget.dev.dma_mask = dev->dev.dma_mask;
2353	udc->gadget.dev.release = gadget_release;
2354	udc->gadget.name = driver_name;		/* gadget name */
2355
2356	retval = device_register(&udc->gadget.dev);
2357	if (retval)
2358		goto err_free_irq;
2359
2360	eps_init(udc);
2361
2362	/* VBUS detect: we can disable/enable clock on demand.*/
2363	if (udc->transceiver)
2364		udc->clock_gating = 1;
2365	else if (pdata->vbus) {
2366		udc->clock_gating = 1;
2367		retval = request_threaded_irq(pdata->vbus->irq, NULL,
2368				mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2369		if (retval) {
2370			dev_info(&dev->dev,
2371				"Can not request irq for VBUS, "
2372				"disable clock gating\n");
2373			udc->clock_gating = 0;
2374		}
2375
2376		udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2377		if (!udc->qwork) {
2378			dev_err(&dev->dev, "cannot create workqueue\n");
2379			retval = -ENOMEM;
2380			goto err_unregister;
2381		}
2382
2383		INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2384	}
2385
2386	/*
2387	 * When clock gating is supported, we can disable clk and phy.
2388	 * If not, it means that VBUS detection is not supported, we
2389	 * have to enable vbus active all the time to let controller work.
2390	 */
2391	if (udc->clock_gating) {
2392		if (udc->pdata->phy_deinit)
2393			udc->pdata->phy_deinit(udc->phy_regs);
2394		udc_clock_disable(udc);
2395	} else
2396		udc->vbus_active = 1;
2397
2398	retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
2399	if (retval)
2400		goto err_unregister;
2401
2402	dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n",
2403		udc->clock_gating ? "with" : "without");
2404
2405	return 0;
2406
2407err_unregister:
2408	if (udc->pdata && udc->pdata->vbus
2409		&& udc->clock_gating && udc->transceiver == NULL)
2410		free_irq(pdata->vbus->irq, &dev->dev);
2411	device_unregister(&udc->gadget.dev);
2412err_free_irq:
2413	free_irq(udc->irq, &dev->dev);
2414err_free_status_req:
2415	kfree(udc->status_req->req.buf);
2416	kfree(udc->status_req);
2417err_free_eps:
2418	kfree(udc->eps);
2419err_destroy_dma:
2420	dma_pool_destroy(udc->dtd_pool);
2421err_free_dma:
2422	dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2423			udc->ep_dqh, udc->ep_dqh_dma);
2424err_disable_clock:
2425	if (udc->pdata->phy_deinit)
2426		udc->pdata->phy_deinit(udc->phy_regs);
2427	udc_clock_disable(udc);
2428err_iounmap_phyreg:
2429	iounmap((void *)udc->phy_regs);
2430err_iounmap_capreg:
2431	iounmap(udc->cap_regs);
2432err_put_clk:
2433	for (clk_i--; clk_i >= 0; clk_i--)
2434		clk_put(udc->clk[clk_i]);
2435	the_controller = NULL;
2436	kfree(udc);
2437	return retval;
2438}
2439
2440#ifdef CONFIG_PM
2441static int mv_udc_suspend(struct device *_dev)
2442{
2443	struct mv_udc *udc = the_controller;
2444
2445	udc_stop(udc);
2446
2447	return 0;
2448}
2449
2450static int mv_udc_resume(struct device *_dev)
2451{
2452	struct mv_udc *udc = the_controller;
2453	int retval;
2454
2455	if (udc->pdata->phy_init) {
2456		retval = udc->pdata->phy_init(udc->phy_regs);
2457		if (retval) {
2458			dev_err(&udc->dev->dev,
2459				"init phy error %d when resume back\n",
2460				retval);
2461			return retval;
2462		}
2463	}
2464
2465	udc_reset(udc);
2466	ep0_reset(udc);
2467	udc_start(udc);
2468
2469	return 0;
2470}
2471
2472static const struct dev_pm_ops mv_udc_pm_ops = {
2473	.suspend	= mv_udc_suspend,
2474	.resume		= mv_udc_resume,
2475};
2476#endif
2477
2478static void mv_udc_shutdown(struct platform_device *dev)
2479{
2480	struct mv_udc *udc = the_controller;
2481	u32 mode;
2482
2483	/* reset controller mode to IDLE */
2484	mode = readl(&udc->op_regs->usbmode);
2485	mode &= ~3;
2486	writel(mode, &udc->op_regs->usbmode);
2487}
2488
2489static struct platform_driver udc_driver = {
2490	.probe		= mv_udc_probe,
2491	.remove		= __exit_p(mv_udc_remove),
2492	.shutdown	= mv_udc_shutdown,
2493	.driver		= {
2494		.owner	= THIS_MODULE,
2495		.name	= "pxa-u2o",
2496#ifdef CONFIG_PM
2497		.pm	= &mv_udc_pm_ops,
2498#endif
2499	},
2500};
2501MODULE_ALIAS("platform:pxa-u2o");
2502
2503MODULE_DESCRIPTION(DRIVER_DESC);
2504MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2505MODULE_VERSION(DRIVER_VERSION);
2506MODULE_LICENSE("GPL");
2507
2508
2509static int __init init(void)
2510{
2511	return platform_driver_register(&udc_driver);
2512}
2513module_init(init);
2514
2515
2516static void __exit cleanup(void)
2517{
2518	platform_driver_unregister(&udc_driver);
2519}
2520module_exit(cleanup);
2521
2522