mv_udc_core.c revision 366162245e619d59c9d615774ab3aa639deb7725
1/*
2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3 * Author: Chao Xie <chao.xie@marvell.com>
4 *	   Neil Zhang <zhangwm@marvell.com>
5 *
6 * This program is free software; you can redistribute  it and/or modify it
7 * under  the terms of  the GNU General  Public License as published by the
8 * Free Software Foundation;  either version 2 of the  License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/dmapool.h>
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/ioport.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/timer.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/moduleparam.h>
27#include <linux/device.h>
28#include <linux/usb/ch9.h>
29#include <linux/usb/gadget.h>
30#include <linux/usb/otg.h>
31#include <linux/pm.h>
32#include <linux/io.h>
33#include <linux/irq.h>
34#include <linux/platform_device.h>
35#include <linux/clk.h>
36#include <linux/platform_data/mv_usb.h>
37#include <asm/system.h>
38#include <asm/unaligned.h>
39
40#include "mv_udc.h"
41
42#define DRIVER_DESC		"Marvell PXA USB Device Controller driver"
43#define DRIVER_VERSION		"8 Nov 2010"
44
45#define ep_dir(ep)	(((ep)->ep_num == 0) ? \
46				((ep)->udc->ep0_dir) : ((ep)->direction))
47
48/* timeout value -- usec */
49#define RESET_TIMEOUT		10000
50#define FLUSH_TIMEOUT		10000
51#define EPSTATUS_TIMEOUT	10000
52#define PRIME_TIMEOUT		10000
53#define READSAFE_TIMEOUT	1000
54#define DTD_TIMEOUT		1000
55
56#define LOOPS_USEC_SHIFT	4
57#define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
58#define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
59
60static DECLARE_COMPLETION(release_done);
61
62static const char driver_name[] = "mv_udc";
63static const char driver_desc[] = DRIVER_DESC;
64
65/* controller device global variable */
66static struct mv_udc	*the_controller;
67int mv_usb_otgsc;
68
69static void nuke(struct mv_ep *ep, int status);
70
71/* for endpoint 0 operations */
72static const struct usb_endpoint_descriptor mv_ep0_desc = {
73	.bLength =		USB_DT_ENDPOINT_SIZE,
74	.bDescriptorType =	USB_DT_ENDPOINT,
75	.bEndpointAddress =	0,
76	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
77	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
78};
79
80static void ep0_reset(struct mv_udc *udc)
81{
82	struct mv_ep *ep;
83	u32 epctrlx;
84	int i = 0;
85
86	/* ep0 in and out */
87	for (i = 0; i < 2; i++) {
88		ep = &udc->eps[i];
89		ep->udc = udc;
90
91		/* ep0 dQH */
92		ep->dqh = &udc->ep_dqh[i];
93
94		/* configure ep0 endpoint capabilities in dQH */
95		ep->dqh->max_packet_length =
96			(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
97			| EP_QUEUE_HEAD_IOS;
98
99		ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
100
101		epctrlx = readl(&udc->op_regs->epctrlx[0]);
102		if (i) {	/* TX */
103			epctrlx |= EPCTRL_TX_ENABLE
104				| (USB_ENDPOINT_XFER_CONTROL
105					<< EPCTRL_TX_EP_TYPE_SHIFT);
106
107		} else {	/* RX */
108			epctrlx |= EPCTRL_RX_ENABLE
109				| (USB_ENDPOINT_XFER_CONTROL
110					<< EPCTRL_RX_EP_TYPE_SHIFT);
111		}
112
113		writel(epctrlx, &udc->op_regs->epctrlx[0]);
114	}
115}
116
117/* protocol ep0 stall, will automatically be cleared on new transaction */
118static void ep0_stall(struct mv_udc *udc)
119{
120	u32	epctrlx;
121
122	/* set TX and RX to stall */
123	epctrlx = readl(&udc->op_regs->epctrlx[0]);
124	epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
125	writel(epctrlx, &udc->op_regs->epctrlx[0]);
126
127	/* update ep0 state */
128	udc->ep0_state = WAIT_FOR_SETUP;
129	udc->ep0_dir = EP_DIR_OUT;
130}
131
132static int process_ep_req(struct mv_udc *udc, int index,
133	struct mv_req *curr_req)
134{
135	struct mv_dtd	*curr_dtd;
136	struct mv_dqh	*curr_dqh;
137	int td_complete, actual, remaining_length;
138	int i, direction;
139	int retval = 0;
140	u32 errors;
141
142	curr_dqh = &udc->ep_dqh[index];
143	direction = index % 2;
144
145	curr_dtd = curr_req->head;
146	td_complete = 0;
147	actual = curr_req->req.length;
148
149	for (i = 0; i < curr_req->dtd_count; i++) {
150		if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
151			dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
152				udc->eps[index].name);
153			return 1;
154		}
155
156		errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
157		if (!errors) {
158			remaining_length +=
159				(curr_dtd->size_ioc_sts	& DTD_PACKET_SIZE)
160					>> DTD_LENGTH_BIT_POS;
161			actual -= remaining_length;
162		} else {
163			dev_info(&udc->dev->dev,
164				"complete_tr error: ep=%d %s: error = 0x%x\n",
165				index >> 1, direction ? "SEND" : "RECV",
166				errors);
167			if (errors & DTD_STATUS_HALTED) {
168				/* Clear the errors and Halt condition */
169				curr_dqh->size_ioc_int_sts &= ~errors;
170				retval = -EPIPE;
171			} else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
172				retval = -EPROTO;
173			} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
174				retval = -EILSEQ;
175			}
176		}
177		if (i != curr_req->dtd_count - 1)
178			curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
179	}
180	if (retval)
181		return retval;
182
183	curr_req->req.actual = actual;
184
185	return 0;
186}
187
188/*
189 * done() - retire a request; caller blocked irqs
190 * @status : request status to be set, only works when
191 * request is still in progress.
192 */
193static void done(struct mv_ep *ep, struct mv_req *req, int status)
194{
195	struct mv_udc *udc = NULL;
196	unsigned char stopped = ep->stopped;
197	struct mv_dtd *curr_td, *next_td;
198	int j;
199
200	udc = (struct mv_udc *)ep->udc;
201	/* Removed the req from fsl_ep->queue */
202	list_del_init(&req->queue);
203
204	/* req.status should be set as -EINPROGRESS in ep_queue() */
205	if (req->req.status == -EINPROGRESS)
206		req->req.status = status;
207	else
208		status = req->req.status;
209
210	/* Free dtd for the request */
211	next_td = req->head;
212	for (j = 0; j < req->dtd_count; j++) {
213		curr_td = next_td;
214		if (j != req->dtd_count - 1)
215			next_td = curr_td->next_dtd_virt;
216		dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
217	}
218
219	if (req->mapped) {
220		dma_unmap_single(ep->udc->gadget.dev.parent,
221			req->req.dma, req->req.length,
222			((ep_dir(ep) == EP_DIR_IN) ?
223				DMA_TO_DEVICE : DMA_FROM_DEVICE));
224		req->req.dma = DMA_ADDR_INVALID;
225		req->mapped = 0;
226	} else
227		dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
228			req->req.dma, req->req.length,
229			((ep_dir(ep) == EP_DIR_IN) ?
230				DMA_TO_DEVICE : DMA_FROM_DEVICE));
231
232	if (status && (status != -ESHUTDOWN))
233		dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
234			ep->ep.name, &req->req, status,
235			req->req.actual, req->req.length);
236
237	ep->stopped = 1;
238
239	spin_unlock(&ep->udc->lock);
240	/*
241	 * complete() is from gadget layer,
242	 * eg fsg->bulk_in_complete()
243	 */
244	if (req->req.complete)
245		req->req.complete(&ep->ep, &req->req);
246
247	spin_lock(&ep->udc->lock);
248	ep->stopped = stopped;
249}
250
251static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
252{
253	u32 tmp, epstatus, bit_pos, direction;
254	struct mv_udc *udc;
255	struct mv_dqh *dqh;
256	unsigned int loops;
257	int readsafe, retval = 0;
258
259	udc = ep->udc;
260	direction = ep_dir(ep);
261	dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
262	bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
263
264	/* check if the pipe is empty */
265	if (!(list_empty(&ep->queue))) {
266		struct mv_req *lastreq;
267		lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
268		lastreq->tail->dtd_next =
269			req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
270		if (readl(&udc->op_regs->epprime) & bit_pos) {
271			loops = LOOPS(PRIME_TIMEOUT);
272			while (readl(&udc->op_regs->epprime) & bit_pos) {
273				if (loops == 0) {
274					retval = -ETIME;
275					goto done;
276				}
277				udelay(LOOPS_USEC);
278				loops--;
279			}
280			if (readl(&udc->op_regs->epstatus) & bit_pos)
281				goto done;
282		}
283		readsafe = 0;
284		loops = LOOPS(READSAFE_TIMEOUT);
285		while (readsafe == 0) {
286			if (loops == 0) {
287				retval = -ETIME;
288				goto done;
289			}
290			/* start with setting the semaphores */
291			tmp = readl(&udc->op_regs->usbcmd);
292			tmp |= USBCMD_ATDTW_TRIPWIRE_SET;
293			writel(tmp, &udc->op_regs->usbcmd);
294
295			/* read the endpoint status */
296			epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
297
298			/*
299			 * Reread the ATDTW semaphore bit to check if it is
300			 * cleared. When hardware see a hazard, it will clear
301			 * the bit or else we remain set to 1 and we can
302			 * proceed with priming of endpoint if not already
303			 * primed.
304			 */
305			if (readl(&udc->op_regs->usbcmd)
306				& USBCMD_ATDTW_TRIPWIRE_SET) {
307				readsafe = 1;
308			}
309			loops--;
310			udelay(LOOPS_USEC);
311		}
312
313		/* Clear the semaphore */
314		tmp = readl(&udc->op_regs->usbcmd);
315		tmp &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
316		writel(tmp, &udc->op_regs->usbcmd);
317
318		/* If endpoint is not active, we activate it now. */
319		if (!epstatus) {
320			if (direction == EP_DIR_IN) {
321				struct mv_dtd *curr_dtd = dma_to_virt(
322					&udc->dev->dev, dqh->curr_dtd_ptr);
323
324				loops = LOOPS(DTD_TIMEOUT);
325				while (curr_dtd->size_ioc_sts
326					& DTD_STATUS_ACTIVE) {
327					if (loops == 0) {
328						retval = -ETIME;
329						goto done;
330					}
331					loops--;
332					udelay(LOOPS_USEC);
333				}
334			}
335			/* No other transfers on the queue */
336
337			/* Write dQH next pointer and terminate bit to 0 */
338			dqh->next_dtd_ptr = req->head->td_dma
339				& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
340			dqh->size_ioc_int_sts = 0;
341
342			/*
343			 * Ensure that updates to the QH will
344			 * occur before priming.
345			 */
346			wmb();
347
348			/* Prime the Endpoint */
349			writel(bit_pos, &udc->op_regs->epprime);
350		}
351	} else {
352		/* Write dQH next pointer and terminate bit to 0 */
353		dqh->next_dtd_ptr = req->head->td_dma
354			& EP_QUEUE_HEAD_NEXT_POINTER_MASK;;
355		dqh->size_ioc_int_sts = 0;
356
357		/* Ensure that updates to the QH will occur before priming. */
358		wmb();
359
360		/* Prime the Endpoint */
361		writel(bit_pos, &udc->op_regs->epprime);
362
363		if (direction == EP_DIR_IN) {
364			/* FIXME add status check after prime the IN ep */
365			int prime_again;
366			u32 curr_dtd_ptr = dqh->curr_dtd_ptr;
367
368			loops = LOOPS(DTD_TIMEOUT);
369			prime_again = 0;
370			while ((curr_dtd_ptr != req->head->td_dma)) {
371				curr_dtd_ptr = dqh->curr_dtd_ptr;
372				if (loops == 0) {
373					dev_err(&udc->dev->dev,
374						"failed to prime %s\n",
375						ep->name);
376					retval = -ETIME;
377					goto done;
378				}
379				loops--;
380				udelay(LOOPS_USEC);
381
382				if (loops == (LOOPS(DTD_TIMEOUT) >> 2)) {
383					if (prime_again)
384						goto done;
385					dev_info(&udc->dev->dev,
386						"prime again\n");
387					writel(bit_pos,
388						&udc->op_regs->epprime);
389					prime_again = 1;
390				}
391			}
392		}
393	}
394done:
395	return retval;;
396}
397
398static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
399		dma_addr_t *dma, int *is_last)
400{
401	u32 temp;
402	struct mv_dtd *dtd;
403	struct mv_udc *udc;
404
405	/* how big will this transfer be? */
406	*length = min(req->req.length - req->req.actual,
407			(unsigned)EP_MAX_LENGTH_TRANSFER);
408
409	udc = req->ep->udc;
410
411	/*
412	 * Be careful that no _GFP_HIGHMEM is set,
413	 * or we can not use dma_to_virt
414	 */
415	dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
416	if (dtd == NULL)
417		return dtd;
418
419	dtd->td_dma = *dma;
420	/* initialize buffer page pointers */
421	temp = (u32)(req->req.dma + req->req.actual);
422	dtd->buff_ptr0 = cpu_to_le32(temp);
423	temp &= ~0xFFF;
424	dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
425	dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
426	dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
427	dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
428
429	req->req.actual += *length;
430
431	/* zlp is needed if req->req.zero is set */
432	if (req->req.zero) {
433		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
434			*is_last = 1;
435		else
436			*is_last = 0;
437	} else if (req->req.length == req->req.actual)
438		*is_last = 1;
439	else
440		*is_last = 0;
441
442	/* Fill in the transfer size; set active bit */
443	temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
444
445	/* Enable interrupt for the last dtd of a request */
446	if (*is_last && !req->req.no_interrupt)
447		temp |= DTD_IOC;
448
449	dtd->size_ioc_sts = temp;
450
451	mb();
452
453	return dtd;
454}
455
456/* generate dTD linked list for a request */
457static int req_to_dtd(struct mv_req *req)
458{
459	unsigned count;
460	int is_last, is_first = 1;
461	struct mv_dtd *dtd, *last_dtd = NULL;
462	struct mv_udc *udc;
463	dma_addr_t dma;
464
465	udc = req->ep->udc;
466
467	do {
468		dtd = build_dtd(req, &count, &dma, &is_last);
469		if (dtd == NULL)
470			return -ENOMEM;
471
472		if (is_first) {
473			is_first = 0;
474			req->head = dtd;
475		} else {
476			last_dtd->dtd_next = dma;
477			last_dtd->next_dtd_virt = dtd;
478		}
479		last_dtd = dtd;
480		req->dtd_count++;
481	} while (!is_last);
482
483	/* set terminate bit to 1 for the last dTD */
484	dtd->dtd_next = DTD_NEXT_TERMINATE;
485
486	req->tail = dtd;
487
488	return 0;
489}
490
491static int mv_ep_enable(struct usb_ep *_ep,
492		const struct usb_endpoint_descriptor *desc)
493{
494	struct mv_udc *udc;
495	struct mv_ep *ep;
496	struct mv_dqh *dqh;
497	u16 max = 0;
498	u32 bit_pos, epctrlx, direction;
499	unsigned char zlt = 0, ios = 0, mult = 0;
500	unsigned long flags;
501
502	ep = container_of(_ep, struct mv_ep, ep);
503	udc = ep->udc;
504
505	if (!_ep || !desc || ep->desc
506			|| desc->bDescriptorType != USB_DT_ENDPOINT)
507		return -EINVAL;
508
509	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
510		return -ESHUTDOWN;
511
512	direction = ep_dir(ep);
513	max = usb_endpoint_maxp(desc);
514
515	/*
516	 * disable HW zero length termination select
517	 * driver handles zero length packet through req->req.zero
518	 */
519	zlt = 1;
520
521	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
522
523	/* Check if the Endpoint is Primed */
524	if ((readl(&udc->op_regs->epprime) & bit_pos)
525		|| (readl(&udc->op_regs->epstatus) & bit_pos)) {
526		dev_info(&udc->dev->dev,
527			"ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
528			" ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
529			(unsigned)ep->ep_num, direction ? "SEND" : "RECV",
530			(unsigned)readl(&udc->op_regs->epprime),
531			(unsigned)readl(&udc->op_regs->epstatus),
532			(unsigned)bit_pos);
533		goto en_done;
534	}
535	/* Set the max packet length, interrupt on Setup and Mult fields */
536	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
537	case USB_ENDPOINT_XFER_BULK:
538		zlt = 1;
539		mult = 0;
540		break;
541	case USB_ENDPOINT_XFER_CONTROL:
542		ios = 1;
543	case USB_ENDPOINT_XFER_INT:
544		mult = 0;
545		break;
546	case USB_ENDPOINT_XFER_ISOC:
547		/* Calculate transactions needed for high bandwidth iso */
548		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
549		max = max & 0x7ff;	/* bit 0~10 */
550		/* 3 transactions at most */
551		if (mult > 3)
552			goto en_done;
553		break;
554	default:
555		goto en_done;
556	}
557
558	spin_lock_irqsave(&udc->lock, flags);
559	/* Get the endpoint queue head address */
560	dqh = ep->dqh;
561	dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
562		| (mult << EP_QUEUE_HEAD_MULT_POS)
563		| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
564		| (ios ? EP_QUEUE_HEAD_IOS : 0);
565	dqh->next_dtd_ptr = 1;
566	dqh->size_ioc_int_sts = 0;
567
568	ep->ep.maxpacket = max;
569	ep->desc = desc;
570	ep->stopped = 0;
571
572	/* Enable the endpoint for Rx or Tx and set the endpoint type */
573	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
574	if (direction == EP_DIR_IN) {
575		epctrlx &= ~EPCTRL_TX_ALL_MASK;
576		epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
577			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
578				<< EPCTRL_TX_EP_TYPE_SHIFT);
579	} else {
580		epctrlx &= ~EPCTRL_RX_ALL_MASK;
581		epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
582			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
583				<< EPCTRL_RX_EP_TYPE_SHIFT);
584	}
585	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
586
587	/*
588	 * Implement Guideline (GL# USB-7) The unused endpoint type must
589	 * be programmed to bulk.
590	 */
591	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
592	if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
593		epctrlx |= (USB_ENDPOINT_XFER_BULK
594				<< EPCTRL_RX_EP_TYPE_SHIFT);
595		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
596	}
597
598	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
599	if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
600		epctrlx |= (USB_ENDPOINT_XFER_BULK
601				<< EPCTRL_TX_EP_TYPE_SHIFT);
602		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
603	}
604
605	spin_unlock_irqrestore(&udc->lock, flags);
606
607	return 0;
608en_done:
609	return -EINVAL;
610}
611
612static int  mv_ep_disable(struct usb_ep *_ep)
613{
614	struct mv_udc *udc;
615	struct mv_ep *ep;
616	struct mv_dqh *dqh;
617	u32 bit_pos, epctrlx, direction;
618	unsigned long flags;
619
620	ep = container_of(_ep, struct mv_ep, ep);
621	if ((_ep == NULL) || !ep->desc)
622		return -EINVAL;
623
624	udc = ep->udc;
625
626	/* Get the endpoint queue head address */
627	dqh = ep->dqh;
628
629	spin_lock_irqsave(&udc->lock, flags);
630
631	direction = ep_dir(ep);
632	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
633
634	/* Reset the max packet length and the interrupt on Setup */
635	dqh->max_packet_length = 0;
636
637	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
638	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
639	epctrlx &= ~((direction == EP_DIR_IN)
640			? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
641			: (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
642	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
643
644	/* nuke all pending requests (does flush) */
645	nuke(ep, -ESHUTDOWN);
646
647	ep->desc = NULL;
648	ep->stopped = 1;
649
650	spin_unlock_irqrestore(&udc->lock, flags);
651
652	return 0;
653}
654
655static struct usb_request *
656mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
657{
658	struct mv_req *req = NULL;
659
660	req = kzalloc(sizeof *req, gfp_flags);
661	if (!req)
662		return NULL;
663
664	req->req.dma = DMA_ADDR_INVALID;
665	INIT_LIST_HEAD(&req->queue);
666
667	return &req->req;
668}
669
670static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
671{
672	struct mv_req *req = NULL;
673
674	req = container_of(_req, struct mv_req, req);
675
676	if (_req)
677		kfree(req);
678}
679
680static void mv_ep_fifo_flush(struct usb_ep *_ep)
681{
682	struct mv_udc *udc;
683	u32 bit_pos, direction;
684	struct mv_ep *ep;
685	unsigned int loops;
686
687	if (!_ep)
688		return;
689
690	ep = container_of(_ep, struct mv_ep, ep);
691	if (!ep->desc)
692		return;
693
694	udc = ep->udc;
695	direction = ep_dir(ep);
696
697	if (ep->ep_num == 0)
698		bit_pos = (1 << 16) | 1;
699	else if (direction == EP_DIR_OUT)
700		bit_pos = 1 << ep->ep_num;
701	else
702		bit_pos = 1 << (16 + ep->ep_num);
703
704	loops = LOOPS(EPSTATUS_TIMEOUT);
705	do {
706		unsigned int inter_loops;
707
708		if (loops == 0) {
709			dev_err(&udc->dev->dev,
710				"TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
711				(unsigned)readl(&udc->op_regs->epstatus),
712				(unsigned)bit_pos);
713			return;
714		}
715		/* Write 1 to the Flush register */
716		writel(bit_pos, &udc->op_regs->epflush);
717
718		/* Wait until flushing completed */
719		inter_loops = LOOPS(FLUSH_TIMEOUT);
720		while (readl(&udc->op_regs->epflush)) {
721			/*
722			 * ENDPTFLUSH bit should be cleared to indicate this
723			 * operation is complete
724			 */
725			if (inter_loops == 0) {
726				dev_err(&udc->dev->dev,
727					"TIMEOUT for ENDPTFLUSH=0x%x,"
728					"bit_pos=0x%x\n",
729					(unsigned)readl(&udc->op_regs->epflush),
730					(unsigned)bit_pos);
731				return;
732			}
733			inter_loops--;
734			udelay(LOOPS_USEC);
735		}
736		loops--;
737	} while (readl(&udc->op_regs->epstatus) & bit_pos);
738}
739
740/* queues (submits) an I/O request to an endpoint */
741static int
742mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
743{
744	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
745	struct mv_req *req = container_of(_req, struct mv_req, req);
746	struct mv_udc *udc = ep->udc;
747	unsigned long flags;
748
749	/* catch various bogus parameters */
750	if (!_req || !req->req.complete || !req->req.buf
751			|| !list_empty(&req->queue)) {
752		dev_err(&udc->dev->dev, "%s, bad params", __func__);
753		return -EINVAL;
754	}
755	if (unlikely(!_ep || !ep->desc)) {
756		dev_err(&udc->dev->dev, "%s, bad ep", __func__);
757		return -EINVAL;
758	}
759	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
760		if (req->req.length > ep->ep.maxpacket)
761			return -EMSGSIZE;
762	}
763
764	udc = ep->udc;
765	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
766		return -ESHUTDOWN;
767
768	req->ep = ep;
769
770	/* map virtual address to hardware */
771	if (req->req.dma == DMA_ADDR_INVALID) {
772		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
773					req->req.buf,
774					req->req.length, ep_dir(ep)
775						? DMA_TO_DEVICE
776						: DMA_FROM_DEVICE);
777		req->mapped = 1;
778	} else {
779		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
780					req->req.dma, req->req.length,
781					ep_dir(ep)
782						? DMA_TO_DEVICE
783						: DMA_FROM_DEVICE);
784		req->mapped = 0;
785	}
786
787	req->req.status = -EINPROGRESS;
788	req->req.actual = 0;
789	req->dtd_count = 0;
790
791	spin_lock_irqsave(&udc->lock, flags);
792
793	/* build dtds and push them to device queue */
794	if (!req_to_dtd(req)) {
795		int retval;
796		retval = queue_dtd(ep, req);
797		if (retval) {
798			spin_unlock_irqrestore(&udc->lock, flags);
799			return retval;
800		}
801	} else {
802		spin_unlock_irqrestore(&udc->lock, flags);
803		return -ENOMEM;
804	}
805
806	/* Update ep0 state */
807	if (ep->ep_num == 0)
808		udc->ep0_state = DATA_STATE_XMIT;
809
810	/* irq handler advances the queue */
811	if (req != NULL)
812		list_add_tail(&req->queue, &ep->queue);
813	spin_unlock_irqrestore(&udc->lock, flags);
814
815	return 0;
816}
817
818/* dequeues (cancels, unlinks) an I/O request from an endpoint */
819static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
820{
821	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
822	struct mv_req *req;
823	struct mv_udc *udc = ep->udc;
824	unsigned long flags;
825	int stopped, ret = 0;
826	u32 epctrlx;
827
828	if (!_ep || !_req)
829		return -EINVAL;
830
831	spin_lock_irqsave(&ep->udc->lock, flags);
832	stopped = ep->stopped;
833
834	/* Stop the ep before we deal with the queue */
835	ep->stopped = 1;
836	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
837	if (ep_dir(ep) == EP_DIR_IN)
838		epctrlx &= ~EPCTRL_TX_ENABLE;
839	else
840		epctrlx &= ~EPCTRL_RX_ENABLE;
841	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
842
843	/* make sure it's actually queued on this endpoint */
844	list_for_each_entry(req, &ep->queue, queue) {
845		if (&req->req == _req)
846			break;
847	}
848	if (&req->req != _req) {
849		ret = -EINVAL;
850		goto out;
851	}
852
853	/* The request is in progress, or completed but not dequeued */
854	if (ep->queue.next == &req->queue) {
855		_req->status = -ECONNRESET;
856		mv_ep_fifo_flush(_ep);	/* flush current transfer */
857
858		/* The request isn't the last request in this ep queue */
859		if (req->queue.next != &ep->queue) {
860			struct mv_dqh *qh;
861			struct mv_req *next_req;
862
863			qh = ep->dqh;
864			next_req = list_entry(req->queue.next, struct mv_req,
865					queue);
866
867			/* Point the QH to the first TD of next request */
868			writel((u32) next_req->head, &qh->curr_dtd_ptr);
869		} else {
870			struct mv_dqh *qh;
871
872			qh = ep->dqh;
873			qh->next_dtd_ptr = 1;
874			qh->size_ioc_int_sts = 0;
875		}
876
877		/* The request hasn't been processed, patch up the TD chain */
878	} else {
879		struct mv_req *prev_req;
880
881		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
882		writel(readl(&req->tail->dtd_next),
883				&prev_req->tail->dtd_next);
884
885	}
886
887	done(ep, req, -ECONNRESET);
888
889	/* Enable EP */
890out:
891	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
892	if (ep_dir(ep) == EP_DIR_IN)
893		epctrlx |= EPCTRL_TX_ENABLE;
894	else
895		epctrlx |= EPCTRL_RX_ENABLE;
896	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
897	ep->stopped = stopped;
898
899	spin_unlock_irqrestore(&ep->udc->lock, flags);
900	return ret;
901}
902
903static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
904{
905	u32 epctrlx;
906
907	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
908
909	if (stall) {
910		if (direction == EP_DIR_IN)
911			epctrlx |= EPCTRL_TX_EP_STALL;
912		else
913			epctrlx |= EPCTRL_RX_EP_STALL;
914	} else {
915		if (direction == EP_DIR_IN) {
916			epctrlx &= ~EPCTRL_TX_EP_STALL;
917			epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
918		} else {
919			epctrlx &= ~EPCTRL_RX_EP_STALL;
920			epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
921		}
922	}
923	writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
924}
925
926static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
927{
928	u32 epctrlx;
929
930	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
931
932	if (direction == EP_DIR_OUT)
933		return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
934	else
935		return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
936}
937
938static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
939{
940	struct mv_ep *ep;
941	unsigned long flags = 0;
942	int status = 0;
943	struct mv_udc *udc;
944
945	ep = container_of(_ep, struct mv_ep, ep);
946	udc = ep->udc;
947	if (!_ep || !ep->desc) {
948		status = -EINVAL;
949		goto out;
950	}
951
952	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
953		status = -EOPNOTSUPP;
954		goto out;
955	}
956
957	/*
958	 * Attempt to halt IN ep will fail if any transfer requests
959	 * are still queue
960	 */
961	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
962		status = -EAGAIN;
963		goto out;
964	}
965
966	spin_lock_irqsave(&ep->udc->lock, flags);
967	ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
968	if (halt && wedge)
969		ep->wedge = 1;
970	else if (!halt)
971		ep->wedge = 0;
972	spin_unlock_irqrestore(&ep->udc->lock, flags);
973
974	if (ep->ep_num == 0) {
975		udc->ep0_state = WAIT_FOR_SETUP;
976		udc->ep0_dir = EP_DIR_OUT;
977	}
978out:
979	return status;
980}
981
982static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
983{
984	return mv_ep_set_halt_wedge(_ep, halt, 0);
985}
986
987static int mv_ep_set_wedge(struct usb_ep *_ep)
988{
989	return mv_ep_set_halt_wedge(_ep, 1, 1);
990}
991
992static struct usb_ep_ops mv_ep_ops = {
993	.enable		= mv_ep_enable,
994	.disable	= mv_ep_disable,
995
996	.alloc_request	= mv_alloc_request,
997	.free_request	= mv_free_request,
998
999	.queue		= mv_ep_queue,
1000	.dequeue	= mv_ep_dequeue,
1001
1002	.set_wedge	= mv_ep_set_wedge,
1003	.set_halt	= mv_ep_set_halt,
1004	.fifo_flush	= mv_ep_fifo_flush,	/* flush fifo */
1005};
1006
1007static void udc_clock_enable(struct mv_udc *udc)
1008{
1009	unsigned int i;
1010
1011	for (i = 0; i < udc->clknum; i++)
1012		clk_enable(udc->clk[i]);
1013}
1014
1015static void udc_clock_disable(struct mv_udc *udc)
1016{
1017	unsigned int i;
1018
1019	for (i = 0; i < udc->clknum; i++)
1020		clk_disable(udc->clk[i]);
1021}
1022
1023static void udc_stop(struct mv_udc *udc)
1024{
1025	u32 tmp;
1026
1027	/* Disable interrupts */
1028	tmp = readl(&udc->op_regs->usbintr);
1029	tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
1030		USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
1031	writel(tmp, &udc->op_regs->usbintr);
1032
1033	/* Reset the Run the bit in the command register to stop VUSB */
1034	tmp = readl(&udc->op_regs->usbcmd);
1035	tmp &= ~USBCMD_RUN_STOP;
1036	writel(tmp, &udc->op_regs->usbcmd);
1037}
1038
1039static void udc_start(struct mv_udc *udc)
1040{
1041	u32 usbintr;
1042
1043	usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1044		| USBINTR_PORT_CHANGE_DETECT_EN
1045		| USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1046	/* Enable interrupts */
1047	writel(usbintr, &udc->op_regs->usbintr);
1048
1049	/* Set the Run bit in the command register */
1050	writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1051}
1052
1053static int udc_reset(struct mv_udc *udc)
1054{
1055	unsigned int loops;
1056	u32 tmp, portsc;
1057
1058	/* Stop the controller */
1059	tmp = readl(&udc->op_regs->usbcmd);
1060	tmp &= ~USBCMD_RUN_STOP;
1061	writel(tmp, &udc->op_regs->usbcmd);
1062
1063	/* Reset the controller to get default values */
1064	writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1065
1066	/* wait for reset to complete */
1067	loops = LOOPS(RESET_TIMEOUT);
1068	while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1069		if (loops == 0) {
1070			dev_err(&udc->dev->dev,
1071				"Wait for RESET completed TIMEOUT\n");
1072			return -ETIMEDOUT;
1073		}
1074		loops--;
1075		udelay(LOOPS_USEC);
1076	}
1077
1078	/* set controller to device mode */
1079	tmp = readl(&udc->op_regs->usbmode);
1080	tmp |= USBMODE_CTRL_MODE_DEVICE;
1081
1082	/* turn setup lockout off, require setup tripwire in usbcmd */
1083	tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
1084
1085	writel(tmp, &udc->op_regs->usbmode);
1086
1087	writel(0x0, &udc->op_regs->epsetupstat);
1088
1089	/* Configure the Endpoint List Address */
1090	writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1091		&udc->op_regs->eplistaddr);
1092
1093	portsc = readl(&udc->op_regs->portsc[0]);
1094	if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1095		portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1096
1097	if (udc->force_fs)
1098		portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1099	else
1100		portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1101
1102	writel(portsc, &udc->op_regs->portsc[0]);
1103
1104	tmp = readl(&udc->op_regs->epctrlx[0]);
1105	tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1106	writel(tmp, &udc->op_regs->epctrlx[0]);
1107
1108	return 0;
1109}
1110
1111static int mv_udc_get_frame(struct usb_gadget *gadget)
1112{
1113	struct mv_udc *udc;
1114	u16	retval;
1115
1116	if (!gadget)
1117		return -ENODEV;
1118
1119	udc = container_of(gadget, struct mv_udc, gadget);
1120
1121	retval = readl(udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1122
1123	return retval;
1124}
1125
1126/* Tries to wake up the host connected to this gadget */
1127static int mv_udc_wakeup(struct usb_gadget *gadget)
1128{
1129	struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1130	u32 portsc;
1131
1132	/* Remote wakeup feature not enabled by host */
1133	if (!udc->remote_wakeup)
1134		return -ENOTSUPP;
1135
1136	portsc = readl(&udc->op_regs->portsc);
1137	/* not suspended? */
1138	if (!(portsc & PORTSCX_PORT_SUSPEND))
1139		return 0;
1140	/* trigger force resume */
1141	portsc |= PORTSCX_PORT_FORCE_RESUME;
1142	writel(portsc, &udc->op_regs->portsc[0]);
1143	return 0;
1144}
1145
1146static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1147{
1148	struct mv_udc *udc;
1149	unsigned long flags;
1150
1151	udc = container_of(gadget, struct mv_udc, gadget);
1152	spin_lock_irqsave(&udc->lock, flags);
1153
1154	udc->softconnect = (is_on != 0);
1155	if (udc->driver && udc->softconnect)
1156		udc_start(udc);
1157	else
1158		udc_stop(udc);
1159
1160	spin_unlock_irqrestore(&udc->lock, flags);
1161	return 0;
1162}
1163
1164static int mv_udc_start(struct usb_gadget_driver *driver,
1165		int (*bind)(struct usb_gadget *));
1166static int mv_udc_stop(struct usb_gadget_driver *driver);
1167/* device controller usb_gadget_ops structure */
1168static const struct usb_gadget_ops mv_ops = {
1169
1170	/* returns the current frame number */
1171	.get_frame	= mv_udc_get_frame,
1172
1173	/* tries to wake up the host connected to this gadget */
1174	.wakeup		= mv_udc_wakeup,
1175
1176	/* D+ pullup, software-controlled connect/disconnect to USB host */
1177	.pullup		= mv_udc_pullup,
1178	.start		= mv_udc_start,
1179	.stop		= mv_udc_stop,
1180};
1181
1182static void mv_udc_testmode(struct mv_udc *udc, u16 index, bool enter)
1183{
1184	dev_info(&udc->dev->dev, "Test Mode is not support yet\n");
1185}
1186
1187static int eps_init(struct mv_udc *udc)
1188{
1189	struct mv_ep	*ep;
1190	char name[14];
1191	int i;
1192
1193	/* initialize ep0 */
1194	ep = &udc->eps[0];
1195	ep->udc = udc;
1196	strncpy(ep->name, "ep0", sizeof(ep->name));
1197	ep->ep.name = ep->name;
1198	ep->ep.ops = &mv_ep_ops;
1199	ep->wedge = 0;
1200	ep->stopped = 0;
1201	ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1202	ep->ep_num = 0;
1203	ep->desc = &mv_ep0_desc;
1204	INIT_LIST_HEAD(&ep->queue);
1205
1206	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1207
1208	/* initialize other endpoints */
1209	for (i = 2; i < udc->max_eps * 2; i++) {
1210		ep = &udc->eps[i];
1211		if (i % 2) {
1212			snprintf(name, sizeof(name), "ep%din", i / 2);
1213			ep->direction = EP_DIR_IN;
1214		} else {
1215			snprintf(name, sizeof(name), "ep%dout", i / 2);
1216			ep->direction = EP_DIR_OUT;
1217		}
1218		ep->udc = udc;
1219		strncpy(ep->name, name, sizeof(ep->name));
1220		ep->ep.name = ep->name;
1221
1222		ep->ep.ops = &mv_ep_ops;
1223		ep->stopped = 0;
1224		ep->ep.maxpacket = (unsigned short) ~0;
1225		ep->ep_num = i / 2;
1226
1227		INIT_LIST_HEAD(&ep->queue);
1228		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1229
1230		ep->dqh = &udc->ep_dqh[i];
1231	}
1232
1233	return 0;
1234}
1235
1236/* delete all endpoint requests, called with spinlock held */
1237static void nuke(struct mv_ep *ep, int status)
1238{
1239	/* called with spinlock held */
1240	ep->stopped = 1;
1241
1242	/* endpoint fifo flush */
1243	mv_ep_fifo_flush(&ep->ep);
1244
1245	while (!list_empty(&ep->queue)) {
1246		struct mv_req *req = NULL;
1247		req = list_entry(ep->queue.next, struct mv_req, queue);
1248		done(ep, req, status);
1249	}
1250}
1251
1252/* stop all USB activities */
1253static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1254{
1255	struct mv_ep	*ep;
1256
1257	nuke(&udc->eps[0], -ESHUTDOWN);
1258
1259	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1260		nuke(ep, -ESHUTDOWN);
1261	}
1262
1263	/* report disconnect; the driver is already quiesced */
1264	if (driver) {
1265		spin_unlock(&udc->lock);
1266		driver->disconnect(&udc->gadget);
1267		spin_lock(&udc->lock);
1268	}
1269}
1270
1271static int mv_udc_start(struct usb_gadget_driver *driver,
1272		int (*bind)(struct usb_gadget *))
1273{
1274	struct mv_udc *udc = the_controller;
1275	int retval = 0;
1276	unsigned long flags;
1277
1278	if (!udc)
1279		return -ENODEV;
1280
1281	if (udc->driver)
1282		return -EBUSY;
1283
1284	spin_lock_irqsave(&udc->lock, flags);
1285
1286	/* hook up the driver ... */
1287	driver->driver.bus = NULL;
1288	udc->driver = driver;
1289	udc->gadget.dev.driver = &driver->driver;
1290
1291	udc->usb_state = USB_STATE_ATTACHED;
1292	udc->ep0_state = WAIT_FOR_SETUP;
1293	udc->ep0_dir = USB_DIR_OUT;
1294
1295	spin_unlock_irqrestore(&udc->lock, flags);
1296
1297	retval = bind(&udc->gadget);
1298	if (retval) {
1299		dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
1300				driver->driver.name, retval);
1301		udc->driver = NULL;
1302		udc->gadget.dev.driver = NULL;
1303		return retval;
1304	}
1305	udc_reset(udc);
1306	ep0_reset(udc);
1307	udc_start(udc);
1308
1309	return 0;
1310}
1311
1312static int mv_udc_stop(struct usb_gadget_driver *driver)
1313{
1314	struct mv_udc *udc = the_controller;
1315	unsigned long flags;
1316
1317	if (!udc)
1318		return -ENODEV;
1319
1320	udc_stop(udc);
1321
1322	spin_lock_irqsave(&udc->lock, flags);
1323
1324	/* stop all usb activities */
1325	udc->gadget.speed = USB_SPEED_UNKNOWN;
1326	stop_activity(udc, driver);
1327	spin_unlock_irqrestore(&udc->lock, flags);
1328
1329	/* unbind gadget driver */
1330	driver->unbind(&udc->gadget);
1331	udc->gadget.dev.driver = NULL;
1332	udc->driver = NULL;
1333
1334	return 0;
1335}
1336
1337static int
1338udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1339{
1340	int retval = 0;
1341	struct mv_req *req;
1342	struct mv_ep *ep;
1343
1344	ep = &udc->eps[0];
1345	udc->ep0_dir = direction;
1346	udc->ep0_state = WAIT_FOR_OUT_STATUS;
1347
1348	req = udc->status_req;
1349
1350	/* fill in the reqest structure */
1351	if (empty == false) {
1352		*((u16 *) req->req.buf) = cpu_to_le16(status);
1353		req->req.length = 2;
1354	} else
1355		req->req.length = 0;
1356
1357	req->ep = ep;
1358	req->req.status = -EINPROGRESS;
1359	req->req.actual = 0;
1360	req->req.complete = NULL;
1361	req->dtd_count = 0;
1362
1363	/* prime the data phase */
1364	if (!req_to_dtd(req))
1365		retval = queue_dtd(ep, req);
1366	else{	/* no mem */
1367		retval = -ENOMEM;
1368		goto out;
1369	}
1370
1371	if (retval) {
1372		dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
1373		goto out;
1374	}
1375
1376	list_add_tail(&req->queue, &ep->queue);
1377
1378	return 0;
1379out:
1380	return retval;
1381}
1382
1383static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1384{
1385	udc->dev_addr = (u8)setup->wValue;
1386
1387	/* update usb state */
1388	udc->usb_state = USB_STATE_ADDRESS;
1389
1390	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1391		ep0_stall(udc);
1392}
1393
1394static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1395	struct usb_ctrlrequest *setup)
1396{
1397	u16 status;
1398	int retval;
1399
1400	if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1401		!= (USB_DIR_IN | USB_TYPE_STANDARD))
1402		return;
1403
1404	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1405		status = 1 << USB_DEVICE_SELF_POWERED;
1406		status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1407	} else if ((setup->bRequestType & USB_RECIP_MASK)
1408			== USB_RECIP_INTERFACE) {
1409		/* get interface status */
1410		status = 0;
1411	} else if ((setup->bRequestType & USB_RECIP_MASK)
1412			== USB_RECIP_ENDPOINT) {
1413		u8 ep_num, direction;
1414
1415		ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1416		direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1417				? EP_DIR_IN : EP_DIR_OUT;
1418		status = ep_is_stall(udc, ep_num, direction)
1419				<< USB_ENDPOINT_HALT;
1420	}
1421
1422	retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1423	if (retval)
1424		ep0_stall(udc);
1425	else
1426		udc->ep0_state = DATA_STATE_XMIT;
1427}
1428
1429static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1430{
1431	u8 ep_num;
1432	u8 direction;
1433	struct mv_ep *ep;
1434
1435	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1436		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1437		switch (setup->wValue) {
1438		case USB_DEVICE_REMOTE_WAKEUP:
1439			udc->remote_wakeup = 0;
1440			break;
1441		case USB_DEVICE_TEST_MODE:
1442			mv_udc_testmode(udc, 0, false);
1443			break;
1444		default:
1445			goto out;
1446		}
1447	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1448		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1449		switch (setup->wValue) {
1450		case USB_ENDPOINT_HALT:
1451			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1452			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1453				? EP_DIR_IN : EP_DIR_OUT;
1454			if (setup->wValue != 0 || setup->wLength != 0
1455				|| ep_num > udc->max_eps)
1456				goto out;
1457			ep = &udc->eps[ep_num * 2 + direction];
1458			if (ep->wedge == 1)
1459				break;
1460			spin_unlock(&udc->lock);
1461			ep_set_stall(udc, ep_num, direction, 0);
1462			spin_lock(&udc->lock);
1463			break;
1464		default:
1465			goto out;
1466		}
1467	} else
1468		goto out;
1469
1470	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1471		ep0_stall(udc);
1472out:
1473	return;
1474}
1475
1476static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1477{
1478	u8 ep_num;
1479	u8 direction;
1480
1481	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1482		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1483		switch (setup->wValue) {
1484		case USB_DEVICE_REMOTE_WAKEUP:
1485			udc->remote_wakeup = 1;
1486			break;
1487		case USB_DEVICE_TEST_MODE:
1488			if (setup->wIndex & 0xFF
1489				&& udc->gadget.speed != USB_SPEED_HIGH)
1490				goto out;
1491			if (udc->usb_state == USB_STATE_CONFIGURED
1492				|| udc->usb_state == USB_STATE_ADDRESS
1493				|| udc->usb_state == USB_STATE_DEFAULT)
1494				mv_udc_testmode(udc,
1495					setup->wIndex & 0xFF00, true);
1496			else
1497				goto out;
1498			break;
1499		default:
1500			goto out;
1501		}
1502	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1503		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1504		switch (setup->wValue) {
1505		case USB_ENDPOINT_HALT:
1506			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1507			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1508				? EP_DIR_IN : EP_DIR_OUT;
1509			if (setup->wValue != 0 || setup->wLength != 0
1510				|| ep_num > udc->max_eps)
1511				goto out;
1512			spin_unlock(&udc->lock);
1513			ep_set_stall(udc, ep_num, direction, 1);
1514			spin_lock(&udc->lock);
1515			break;
1516		default:
1517			goto out;
1518		}
1519	} else
1520		goto out;
1521
1522	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1523		ep0_stall(udc);
1524out:
1525	return;
1526}
1527
1528static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1529	struct usb_ctrlrequest *setup)
1530{
1531	bool delegate = false;
1532
1533	nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1534
1535	dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1536			setup->bRequestType, setup->bRequest,
1537			setup->wValue, setup->wIndex, setup->wLength);
1538	/* We process some stardard setup requests here */
1539	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1540		switch (setup->bRequest) {
1541		case USB_REQ_GET_STATUS:
1542			ch9getstatus(udc, ep_num, setup);
1543			break;
1544
1545		case USB_REQ_SET_ADDRESS:
1546			ch9setaddress(udc, setup);
1547			break;
1548
1549		case USB_REQ_CLEAR_FEATURE:
1550			ch9clearfeature(udc, setup);
1551			break;
1552
1553		case USB_REQ_SET_FEATURE:
1554			ch9setfeature(udc, setup);
1555			break;
1556
1557		default:
1558			delegate = true;
1559		}
1560	} else
1561		delegate = true;
1562
1563	/* delegate USB standard requests to the gadget driver */
1564	if (delegate == true) {
1565		/* USB requests handled by gadget */
1566		if (setup->wLength) {
1567			/* DATA phase from gadget, STATUS phase from udc */
1568			udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1569					?  EP_DIR_IN : EP_DIR_OUT;
1570			spin_unlock(&udc->lock);
1571			if (udc->driver->setup(&udc->gadget,
1572				&udc->local_setup_buff) < 0)
1573				ep0_stall(udc);
1574			spin_lock(&udc->lock);
1575			udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1576					?  DATA_STATE_XMIT : DATA_STATE_RECV;
1577		} else {
1578			/* no DATA phase, IN STATUS phase from gadget */
1579			udc->ep0_dir = EP_DIR_IN;
1580			spin_unlock(&udc->lock);
1581			if (udc->driver->setup(&udc->gadget,
1582				&udc->local_setup_buff) < 0)
1583				ep0_stall(udc);
1584			spin_lock(&udc->lock);
1585			udc->ep0_state = WAIT_FOR_OUT_STATUS;
1586		}
1587	}
1588}
1589
1590/* complete DATA or STATUS phase of ep0 prime status phase if needed */
1591static void ep0_req_complete(struct mv_udc *udc,
1592	struct mv_ep *ep0, struct mv_req *req)
1593{
1594	u32 new_addr;
1595
1596	if (udc->usb_state == USB_STATE_ADDRESS) {
1597		/* set the new address */
1598		new_addr = (u32)udc->dev_addr;
1599		writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1600			&udc->op_regs->deviceaddr);
1601	}
1602
1603	done(ep0, req, 0);
1604
1605	switch (udc->ep0_state) {
1606	case DATA_STATE_XMIT:
1607		/* receive status phase */
1608		if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1609			ep0_stall(udc);
1610		break;
1611	case DATA_STATE_RECV:
1612		/* send status phase */
1613		if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1614			ep0_stall(udc);
1615		break;
1616	case WAIT_FOR_OUT_STATUS:
1617		udc->ep0_state = WAIT_FOR_SETUP;
1618		break;
1619	case WAIT_FOR_SETUP:
1620		dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1621		break;
1622	default:
1623		ep0_stall(udc);
1624		break;
1625	}
1626}
1627
1628static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1629{
1630	u32 temp;
1631	struct mv_dqh *dqh;
1632
1633	dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1634
1635	/* Clear bit in ENDPTSETUPSTAT */
1636	writel((1 << ep_num), &udc->op_regs->epsetupstat);
1637
1638	/* while a hazard exists when setup package arrives */
1639	do {
1640		/* Set Setup Tripwire */
1641		temp = readl(&udc->op_regs->usbcmd);
1642		writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1643
1644		/* Copy the setup packet to local buffer */
1645		memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1646	} while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1647
1648	/* Clear Setup Tripwire */
1649	temp = readl(&udc->op_regs->usbcmd);
1650	writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1651}
1652
1653static void irq_process_tr_complete(struct mv_udc *udc)
1654{
1655	u32 tmp, bit_pos;
1656	int i, ep_num = 0, direction = 0;
1657	struct mv_ep	*curr_ep;
1658	struct mv_req *curr_req, *temp_req;
1659	int status;
1660
1661	/*
1662	 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1663	 * because the setup packets are to be read ASAP
1664	 */
1665
1666	/* Process all Setup packet received interrupts */
1667	tmp = readl(&udc->op_regs->epsetupstat);
1668
1669	if (tmp) {
1670		for (i = 0; i < udc->max_eps; i++) {
1671			if (tmp & (1 << i)) {
1672				get_setup_data(udc, i,
1673					(u8 *)(&udc->local_setup_buff));
1674				handle_setup_packet(udc, i,
1675					&udc->local_setup_buff);
1676			}
1677		}
1678	}
1679
1680	/* Don't clear the endpoint setup status register here.
1681	 * It is cleared as a setup packet is read out of the buffer
1682	 */
1683
1684	/* Process non-setup transaction complete interrupts */
1685	tmp = readl(&udc->op_regs->epcomplete);
1686
1687	if (!tmp)
1688		return;
1689
1690	writel(tmp, &udc->op_regs->epcomplete);
1691
1692	for (i = 0; i < udc->max_eps * 2; i++) {
1693		ep_num = i >> 1;
1694		direction = i % 2;
1695
1696		bit_pos = 1 << (ep_num + 16 * direction);
1697
1698		if (!(bit_pos & tmp))
1699			continue;
1700
1701		if (i == 1)
1702			curr_ep = &udc->eps[0];
1703		else
1704			curr_ep = &udc->eps[i];
1705		/* process the req queue until an uncomplete request */
1706		list_for_each_entry_safe(curr_req, temp_req,
1707			&curr_ep->queue, queue) {
1708			status = process_ep_req(udc, i, curr_req);
1709			if (status)
1710				break;
1711
1712			/* write back status to req */
1713			curr_req->req.status = status;
1714
1715			/* ep0 request completion */
1716			if (ep_num == 0) {
1717				ep0_req_complete(udc, curr_ep, curr_req);
1718				break;
1719			} else {
1720				done(curr_ep, curr_req, status);
1721			}
1722		}
1723	}
1724}
1725
1726void irq_process_reset(struct mv_udc *udc)
1727{
1728	u32 tmp;
1729	unsigned int loops;
1730
1731	udc->ep0_dir = EP_DIR_OUT;
1732	udc->ep0_state = WAIT_FOR_SETUP;
1733	udc->remote_wakeup = 0;		/* default to 0 on reset */
1734
1735	/* The address bits are past bit 25-31. Set the address */
1736	tmp = readl(&udc->op_regs->deviceaddr);
1737	tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1738	writel(tmp, &udc->op_regs->deviceaddr);
1739
1740	/* Clear all the setup token semaphores */
1741	tmp = readl(&udc->op_regs->epsetupstat);
1742	writel(tmp, &udc->op_regs->epsetupstat);
1743
1744	/* Clear all the endpoint complete status bits */
1745	tmp = readl(&udc->op_regs->epcomplete);
1746	writel(tmp, &udc->op_regs->epcomplete);
1747
1748	/* wait until all endptprime bits cleared */
1749	loops = LOOPS(PRIME_TIMEOUT);
1750	while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1751		if (loops == 0) {
1752			dev_err(&udc->dev->dev,
1753				"Timeout for ENDPTPRIME = 0x%x\n",
1754				readl(&udc->op_regs->epprime));
1755			break;
1756		}
1757		loops--;
1758		udelay(LOOPS_USEC);
1759	}
1760
1761	/* Write 1s to the Flush register */
1762	writel((u32)~0, &udc->op_regs->epflush);
1763
1764	if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1765		dev_info(&udc->dev->dev, "usb bus reset\n");
1766		udc->usb_state = USB_STATE_DEFAULT;
1767		/* reset all the queues, stop all USB activities */
1768		stop_activity(udc, udc->driver);
1769	} else {
1770		dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1771			readl(&udc->op_regs->portsc));
1772
1773		/*
1774		 * re-initialize
1775		 * controller reset
1776		 */
1777		udc_reset(udc);
1778
1779		/* reset all the queues, stop all USB activities */
1780		stop_activity(udc, udc->driver);
1781
1782		/* reset ep0 dQH and endptctrl */
1783		ep0_reset(udc);
1784
1785		/* enable interrupt and set controller to run state */
1786		udc_start(udc);
1787
1788		udc->usb_state = USB_STATE_ATTACHED;
1789	}
1790}
1791
1792static void handle_bus_resume(struct mv_udc *udc)
1793{
1794	udc->usb_state = udc->resume_state;
1795	udc->resume_state = 0;
1796
1797	/* report resume to the driver */
1798	if (udc->driver) {
1799		if (udc->driver->resume) {
1800			spin_unlock(&udc->lock);
1801			udc->driver->resume(&udc->gadget);
1802			spin_lock(&udc->lock);
1803		}
1804	}
1805}
1806
1807static void irq_process_suspend(struct mv_udc *udc)
1808{
1809	udc->resume_state = udc->usb_state;
1810	udc->usb_state = USB_STATE_SUSPENDED;
1811
1812	if (udc->driver->suspend) {
1813		spin_unlock(&udc->lock);
1814		udc->driver->suspend(&udc->gadget);
1815		spin_lock(&udc->lock);
1816	}
1817}
1818
1819static void irq_process_port_change(struct mv_udc *udc)
1820{
1821	u32 portsc;
1822
1823	portsc = readl(&udc->op_regs->portsc[0]);
1824	if (!(portsc & PORTSCX_PORT_RESET)) {
1825		/* Get the speed */
1826		u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1827		switch (speed) {
1828		case PORTSCX_PORT_SPEED_HIGH:
1829			udc->gadget.speed = USB_SPEED_HIGH;
1830			break;
1831		case PORTSCX_PORT_SPEED_FULL:
1832			udc->gadget.speed = USB_SPEED_FULL;
1833			break;
1834		case PORTSCX_PORT_SPEED_LOW:
1835			udc->gadget.speed = USB_SPEED_LOW;
1836			break;
1837		default:
1838			udc->gadget.speed = USB_SPEED_UNKNOWN;
1839			break;
1840		}
1841	}
1842
1843	if (portsc & PORTSCX_PORT_SUSPEND) {
1844		udc->resume_state = udc->usb_state;
1845		udc->usb_state = USB_STATE_SUSPENDED;
1846		if (udc->driver->suspend) {
1847			spin_unlock(&udc->lock);
1848			udc->driver->suspend(&udc->gadget);
1849			spin_lock(&udc->lock);
1850		}
1851	}
1852
1853	if (!(portsc & PORTSCX_PORT_SUSPEND)
1854		&& udc->usb_state == USB_STATE_SUSPENDED) {
1855		handle_bus_resume(udc);
1856	}
1857
1858	if (!udc->resume_state)
1859		udc->usb_state = USB_STATE_DEFAULT;
1860}
1861
1862static void irq_process_error(struct mv_udc *udc)
1863{
1864	/* Increment the error count */
1865	udc->errors++;
1866}
1867
1868static irqreturn_t mv_udc_irq(int irq, void *dev)
1869{
1870	struct mv_udc *udc = (struct mv_udc *)dev;
1871	u32 status, intr;
1872
1873	spin_lock(&udc->lock);
1874
1875	status = readl(&udc->op_regs->usbsts);
1876	intr = readl(&udc->op_regs->usbintr);
1877	status &= intr;
1878
1879	if (status == 0) {
1880		spin_unlock(&udc->lock);
1881		return IRQ_NONE;
1882	}
1883
1884	/* Clear all the interrupts occurred */
1885	writel(status, &udc->op_regs->usbsts);
1886
1887	if (status & USBSTS_ERR)
1888		irq_process_error(udc);
1889
1890	if (status & USBSTS_RESET)
1891		irq_process_reset(udc);
1892
1893	if (status & USBSTS_PORT_CHANGE)
1894		irq_process_port_change(udc);
1895
1896	if (status & USBSTS_INT)
1897		irq_process_tr_complete(udc);
1898
1899	if (status & USBSTS_SUSPEND)
1900		irq_process_suspend(udc);
1901
1902	spin_unlock(&udc->lock);
1903
1904	return IRQ_HANDLED;
1905}
1906
1907/* release device structure */
1908static void gadget_release(struct device *_dev)
1909{
1910	struct mv_udc *udc = the_controller;
1911
1912	complete(udc->done);
1913}
1914
1915static int __devexit mv_udc_remove(struct platform_device *dev)
1916{
1917	struct mv_udc *udc = the_controller;
1918	int clk_i;
1919
1920	usb_del_gadget_udc(&udc->gadget);
1921
1922	/* free memory allocated in probe */
1923	if (udc->dtd_pool)
1924		dma_pool_destroy(udc->dtd_pool);
1925
1926	if (udc->ep_dqh)
1927		dma_free_coherent(&dev->dev, udc->ep_dqh_size,
1928			udc->ep_dqh, udc->ep_dqh_dma);
1929
1930	kfree(udc->eps);
1931
1932	if (udc->irq)
1933		free_irq(udc->irq, &dev->dev);
1934
1935	if (udc->cap_regs)
1936		iounmap(udc->cap_regs);
1937	udc->cap_regs = NULL;
1938
1939	if (udc->phy_regs)
1940		iounmap((void *)udc->phy_regs);
1941	udc->phy_regs = 0;
1942
1943	if (udc->status_req) {
1944		kfree(udc->status_req->req.buf);
1945		kfree(udc->status_req);
1946	}
1947
1948	for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
1949		clk_put(udc->clk[clk_i]);
1950
1951	device_unregister(&udc->gadget.dev);
1952
1953	/* free dev, wait for the release() finished */
1954	wait_for_completion(udc->done);
1955	kfree(udc);
1956
1957	the_controller = NULL;
1958
1959	return 0;
1960}
1961
1962static int __devinit mv_udc_probe(struct platform_device *dev)
1963{
1964	struct mv_usb_platform_data *pdata = dev->dev.platform_data;
1965	struct mv_udc *udc;
1966	int retval = 0;
1967	int clk_i = 0;
1968	struct resource *r;
1969	size_t size;
1970
1971	if (pdata == NULL) {
1972		dev_err(&dev->dev, "missing platform_data\n");
1973		return -ENODEV;
1974	}
1975
1976	size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
1977	udc = kzalloc(size, GFP_KERNEL);
1978	if (udc == NULL) {
1979		dev_err(&dev->dev, "failed to allocate memory for udc\n");
1980		return -ENOMEM;
1981	}
1982
1983	the_controller = udc;
1984	udc->done = &release_done;
1985	udc->pdata = dev->dev.platform_data;
1986	spin_lock_init(&udc->lock);
1987
1988	udc->dev = dev;
1989
1990	udc->clknum = pdata->clknum;
1991	for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
1992		udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
1993		if (IS_ERR(udc->clk[clk_i])) {
1994			retval = PTR_ERR(udc->clk[clk_i]);
1995			goto err_put_clk;
1996		}
1997	}
1998
1999	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2000	if (r == NULL) {
2001		dev_err(&dev->dev, "no I/O memory resource defined\n");
2002		retval = -ENODEV;
2003		goto err_put_clk;
2004	}
2005
2006	udc->cap_regs = (struct mv_cap_regs __iomem *)
2007		ioremap(r->start, resource_size(r));
2008	if (udc->cap_regs == NULL) {
2009		dev_err(&dev->dev, "failed to map I/O memory\n");
2010		retval = -EBUSY;
2011		goto err_put_clk;
2012	}
2013
2014	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2015	if (r == NULL) {
2016		dev_err(&dev->dev, "no phy I/O memory resource defined\n");
2017		retval = -ENODEV;
2018		goto err_iounmap_capreg;
2019	}
2020
2021	udc->phy_regs = (unsigned int)ioremap(r->start, resource_size(r));
2022	if (udc->phy_regs == 0) {
2023		dev_err(&dev->dev, "failed to map phy I/O memory\n");
2024		retval = -EBUSY;
2025		goto err_iounmap_capreg;
2026	}
2027
2028	/* we will acces controller register, so enable the clk */
2029	udc_clock_enable(udc);
2030	if (pdata->phy_init) {
2031		retval = pdata->phy_init(udc->phy_regs);
2032		if (retval) {
2033			dev_err(&dev->dev, "phy init error %d\n", retval);
2034			goto err_iounmap_phyreg;
2035		}
2036	}
2037
2038	udc->op_regs = (struct mv_op_regs __iomem *)((u32)udc->cap_regs
2039		+ (readl(&udc->cap_regs->caplength_hciversion)
2040			& CAPLENGTH_MASK));
2041	udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2042
2043	/*
2044	 * some platform will use usb to download image, it may not disconnect
2045	 * usb gadget before loading kernel. So first stop udc here.
2046	 */
2047	udc_stop(udc);
2048	writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2049
2050	size = udc->max_eps * sizeof(struct mv_dqh) *2;
2051	size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2052	udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
2053					&udc->ep_dqh_dma, GFP_KERNEL);
2054
2055	if (udc->ep_dqh == NULL) {
2056		dev_err(&dev->dev, "allocate dQH memory failed\n");
2057		retval = -ENOMEM;
2058		goto err_disable_clock;
2059	}
2060	udc->ep_dqh_size = size;
2061
2062	/* create dTD dma_pool resource */
2063	udc->dtd_pool = dma_pool_create("mv_dtd",
2064			&dev->dev,
2065			sizeof(struct mv_dtd),
2066			DTD_ALIGNMENT,
2067			DMA_BOUNDARY);
2068
2069	if (!udc->dtd_pool) {
2070		retval = -ENOMEM;
2071		goto err_free_dma;
2072	}
2073
2074	size = udc->max_eps * sizeof(struct mv_ep) *2;
2075	udc->eps = kzalloc(size, GFP_KERNEL);
2076	if (udc->eps == NULL) {
2077		dev_err(&dev->dev, "allocate ep memory failed\n");
2078		retval = -ENOMEM;
2079		goto err_destroy_dma;
2080	}
2081
2082	/* initialize ep0 status request structure */
2083	udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
2084	if (!udc->status_req) {
2085		dev_err(&dev->dev, "allocate status_req memory failed\n");
2086		retval = -ENOMEM;
2087		goto err_free_eps;
2088	}
2089	INIT_LIST_HEAD(&udc->status_req->queue);
2090
2091	/* allocate a small amount of memory to get valid address */
2092	udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2093	udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
2094
2095	udc->resume_state = USB_STATE_NOTATTACHED;
2096	udc->usb_state = USB_STATE_POWERED;
2097	udc->ep0_dir = EP_DIR_OUT;
2098	udc->remote_wakeup = 0;
2099
2100	r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2101	if (r == NULL) {
2102		dev_err(&dev->dev, "no IRQ resource defined\n");
2103		retval = -ENODEV;
2104		goto err_free_status_req;
2105	}
2106	udc->irq = r->start;
2107	if (request_irq(udc->irq, mv_udc_irq,
2108		IRQF_SHARED, driver_name, udc)) {
2109		dev_err(&dev->dev, "Request irq %d for UDC failed\n",
2110			udc->irq);
2111		retval = -ENODEV;
2112		goto err_free_status_req;
2113	}
2114
2115	/* initialize gadget structure */
2116	udc->gadget.ops = &mv_ops;	/* usb_gadget_ops */
2117	udc->gadget.ep0 = &udc->eps[0].ep;	/* gadget ep0 */
2118	INIT_LIST_HEAD(&udc->gadget.ep_list);	/* ep_list */
2119	udc->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
2120	udc->gadget.is_dualspeed = 1;		/* support dual speed */
2121
2122	/* the "gadget" abstracts/virtualizes the controller */
2123	dev_set_name(&udc->gadget.dev, "gadget");
2124	udc->gadget.dev.parent = &dev->dev;
2125	udc->gadget.dev.dma_mask = dev->dev.dma_mask;
2126	udc->gadget.dev.release = gadget_release;
2127	udc->gadget.name = driver_name;		/* gadget name */
2128
2129	retval = device_register(&udc->gadget.dev);
2130	if (retval)
2131		goto err_free_irq;
2132
2133	eps_init(udc);
2134
2135	retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
2136	if (retval)
2137		goto err_unregister;
2138
2139	return 0;
2140
2141err_unregister:
2142	device_unregister(&udc->gadget.dev);
2143err_free_irq:
2144	free_irq(udc->irq, &dev->dev);
2145err_free_status_req:
2146	kfree(udc->status_req->req.buf);
2147	kfree(udc->status_req);
2148err_free_eps:
2149	kfree(udc->eps);
2150err_destroy_dma:
2151	dma_pool_destroy(udc->dtd_pool);
2152err_free_dma:
2153	dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2154			udc->ep_dqh, udc->ep_dqh_dma);
2155err_disable_clock:
2156	if (udc->pdata->phy_deinit)
2157		udc->pdata->phy_deinit(udc->phy_regs);
2158	udc_clock_disable(udc);
2159err_iounmap_phyreg:
2160	iounmap((void *)udc->phy_regs);
2161err_iounmap_capreg:
2162	iounmap(udc->cap_regs);
2163err_put_clk:
2164	for (clk_i--; clk_i >= 0; clk_i--)
2165		clk_put(udc->clk[clk_i]);
2166	the_controller = NULL;
2167	kfree(udc);
2168	return retval;
2169}
2170
2171#ifdef CONFIG_PM
2172static int mv_udc_suspend(struct device *_dev)
2173{
2174	struct mv_udc *udc = the_controller;
2175
2176	udc_stop(udc);
2177
2178	return 0;
2179}
2180
2181static int mv_udc_resume(struct device *_dev)
2182{
2183	struct mv_udc *udc = the_controller;
2184	int retval;
2185
2186	if (udc->pdata->phy_init) {
2187		retval = udc->pdata->phy_init(udc->phy_regs);
2188		if (retval) {
2189			dev_err(&udc->dev->dev,
2190				"init phy error %d when resume back\n",
2191				retval);
2192			return retval;
2193		}
2194	}
2195
2196	udc_reset(udc);
2197	ep0_reset(udc);
2198	udc_start(udc);
2199
2200	return 0;
2201}
2202
2203static const struct dev_pm_ops mv_udc_pm_ops = {
2204	.suspend	= mv_udc_suspend,
2205	.resume		= mv_udc_resume,
2206};
2207#endif
2208
2209static struct platform_driver udc_driver = {
2210	.probe		= mv_udc_probe,
2211	.remove		= __exit_p(mv_udc_remove),
2212	.driver		= {
2213		.owner	= THIS_MODULE,
2214		.name	= "pxa-u2o",
2215#ifdef CONFIG_PM
2216		.pm	= &mv_udc_pm_ops,
2217#endif
2218	},
2219};
2220MODULE_ALIAS("platform:pxa-u2o");
2221
2222MODULE_DESCRIPTION(DRIVER_DESC);
2223MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2224MODULE_VERSION(DRIVER_VERSION);
2225MODULE_LICENSE("GPL");
2226
2227
2228static int __init init(void)
2229{
2230	return platform_driver_register(&udc_driver);
2231}
2232module_init(init);
2233
2234
2235static void __exit cleanup(void)
2236{
2237	platform_driver_unregister(&udc_driver);
2238}
2239module_exit(cleanup);
2240
2241