musb_host.c revision a2fd814e6a9e172f7077b68a2a9391bbde777a92
1/*
2 * MUSB OTG driver host support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/delay.h>
38#include <linux/sched.h>
39#include <linux/slab.h>
40#include <linux/errno.h>
41#include <linux/init.h>
42#include <linux/list.h>
43
44#include "musb_core.h"
45#include "musb_host.h"
46
47
48/* MUSB HOST status 22-mar-2006
49 *
50 * - There's still lots of partial code duplication for fault paths, so
51 *   they aren't handled as consistently as they need to be.
52 *
53 * - PIO mostly behaved when last tested.
54 *     + including ep0, with all usbtest cases 9, 10
55 *     + usbtest 14 (ep0out) doesn't seem to run at all
56 *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
57 *       configurations, but otherwise double buffering passes basic tests.
58 *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
59 *
60 * - DMA (CPPI) ... partially behaves, not currently recommended
61 *     + about 1/15 the speed of typical EHCI implementations (PCI)
62 *     + RX, all too often reqpkt seems to misbehave after tx
63 *     + TX, no known issues (other than evident silicon issue)
64 *
65 * - DMA (Mentor/OMAP) ...has at least toggle update problems
66 *
67 * - Still no traffic scheduling code to make NAKing for bulk or control
68 *   transfers unable to starve other requests; or to make efficient use
69 *   of hardware with periodic transfers.  (Note that network drivers
70 *   commonly post bulk reads that stay pending for a long time; these
71 *   would make very visible trouble.)
72 *
73 * - Not tested with HNP, but some SRP paths seem to behave.
74 *
75 * NOTE 24-August-2006:
76 *
77 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
78 *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
79 *   mostly works, except that with "usbnet" it's easy to trigger cases
80 *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
81 *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
82 *   although ARP RX wins.  (That test was done with a full speed link.)
83 */
84
85
86/*
87 * NOTE on endpoint usage:
88 *
89 * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
90 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
91 *
92 * (Yes, bulk _could_ use more of the endpoints than that, and would even
93 * benefit from it ... one remote device may easily be NAKing while others
94 * need to perform transfers in that same direction.  The same thing could
95 * be done in software though, assuming dma cooperates.)
96 *
97 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
98 * So far that scheduling is both dumb and optimistic:  the endpoint will be
99 * "claimed" until its software queue is no longer refilled.  No multiplexing
100 * of transfers between endpoints, or anything clever.
101 */
102
103
104static void musb_ep_program(struct musb *musb, u8 epnum,
105			struct urb *urb, unsigned int nOut,
106			u8 *buf, u32 len);
107
108/*
109 * Clear TX fifo. Needed to avoid BABBLE errors.
110 */
111static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
112{
113	void __iomem	*epio = ep->regs;
114	u16		csr;
115	u16		lastcsr = 0;
116	int		retries = 1000;
117
118	csr = musb_readw(epio, MUSB_TXCSR);
119	while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
120		if (csr != lastcsr)
121			DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
122		lastcsr = csr;
123		csr |= MUSB_TXCSR_FLUSHFIFO;
124		musb_writew(epio, MUSB_TXCSR, csr);
125		csr = musb_readw(epio, MUSB_TXCSR);
126		if (WARN(retries-- < 1,
127				"Could not flush host TX%d fifo: csr: %04x\n",
128				ep->epnum, csr))
129			return;
130		mdelay(1);
131	}
132}
133
134/*
135 * Start transmit. Caller is responsible for locking shared resources.
136 * musb must be locked.
137 */
138static inline void musb_h_tx_start(struct musb_hw_ep *ep)
139{
140	u16	txcsr;
141
142	/* NOTE: no locks here; caller should lock and select EP */
143	if (ep->epnum) {
144		txcsr = musb_readw(ep->regs, MUSB_TXCSR);
145		txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
146		musb_writew(ep->regs, MUSB_TXCSR, txcsr);
147	} else {
148		txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
149		musb_writew(ep->regs, MUSB_CSR0, txcsr);
150	}
151
152}
153
154static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
155{
156	u16	txcsr;
157
158	/* NOTE: no locks here; caller should lock and select EP */
159	txcsr = musb_readw(ep->regs, MUSB_TXCSR);
160	txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
161	musb_writew(ep->regs, MUSB_TXCSR, txcsr);
162}
163
164/*
165 * Start the URB at the front of an endpoint's queue
166 * end must be claimed from the caller.
167 *
168 * Context: controller locked, irqs blocked
169 */
170static void
171musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
172{
173	u16			frame;
174	u32			len;
175	void			*buf;
176	void __iomem		*mbase =  musb->mregs;
177	struct urb		*urb = next_urb(qh);
178	struct musb_hw_ep	*hw_ep = qh->hw_ep;
179	unsigned		pipe = urb->pipe;
180	u8			address = usb_pipedevice(pipe);
181	int			epnum = hw_ep->epnum;
182
183	/* initialize software qh state */
184	qh->offset = 0;
185	qh->segsize = 0;
186
187	/* gather right source of data */
188	switch (qh->type) {
189	case USB_ENDPOINT_XFER_CONTROL:
190		/* control transfers always start with SETUP */
191		is_in = 0;
192		hw_ep->out_qh = qh;
193		musb->ep0_stage = MUSB_EP0_START;
194		buf = urb->setup_packet;
195		len = 8;
196		break;
197	case USB_ENDPOINT_XFER_ISOC:
198		qh->iso_idx = 0;
199		qh->frame = 0;
200		buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
201		len = urb->iso_frame_desc[0].length;
202		break;
203	default:		/* bulk, interrupt */
204		buf = urb->transfer_buffer;
205		len = urb->transfer_buffer_length;
206	}
207
208	DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
209			qh, urb, address, qh->epnum,
210			is_in ? "in" : "out",
211			({char *s; switch (qh->type) {
212			case USB_ENDPOINT_XFER_CONTROL:	s = ""; break;
213			case USB_ENDPOINT_XFER_BULK:	s = "-bulk"; break;
214			case USB_ENDPOINT_XFER_ISOC:	s = "-iso"; break;
215			default:			s = "-intr"; break;
216			}; s; }),
217			epnum, buf, len);
218
219	/* Configure endpoint */
220	if (is_in || hw_ep->is_shared_fifo)
221		hw_ep->in_qh = qh;
222	else
223		hw_ep->out_qh = qh;
224	musb_ep_program(musb, epnum, urb, !is_in, buf, len);
225
226	/* transmit may have more work: start it when it is time */
227	if (is_in)
228		return;
229
230	/* determine if the time is right for a periodic transfer */
231	switch (qh->type) {
232	case USB_ENDPOINT_XFER_ISOC:
233	case USB_ENDPOINT_XFER_INT:
234		DBG(3, "check whether there's still time for periodic Tx\n");
235		qh->iso_idx = 0;
236		frame = musb_readw(mbase, MUSB_FRAME);
237		/* FIXME this doesn't implement that scheduling policy ...
238		 * or handle framecounter wrapping
239		 */
240		if ((urb->transfer_flags & URB_ISO_ASAP)
241				|| (frame >= urb->start_frame)) {
242			/* REVISIT the SOF irq handler shouldn't duplicate
243			 * this code; and we don't init urb->start_frame...
244			 */
245			qh->frame = 0;
246			goto start;
247		} else {
248			qh->frame = urb->start_frame;
249			/* enable SOF interrupt so we can count down */
250			DBG(1, "SOF for %d\n", epnum);
251#if 1 /* ifndef	CONFIG_ARCH_DAVINCI */
252			musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
253#endif
254		}
255		break;
256	default:
257start:
258		DBG(4, "Start TX%d %s\n", epnum,
259			hw_ep->tx_channel ? "dma" : "pio");
260
261		if (!hw_ep->tx_channel)
262			musb_h_tx_start(hw_ep);
263		else if (is_cppi_enabled() || tusb_dma_omap())
264			cppi_host_txdma_start(hw_ep);
265	}
266}
267
268/* caller owns controller lock, irqs are blocked */
269static void
270__musb_giveback(struct musb *musb, struct urb *urb, int status)
271__releases(musb->lock)
272__acquires(musb->lock)
273{
274	DBG(({ int level; switch (status) {
275				case 0:
276					level = 4;
277					break;
278				/* common/boring faults */
279				case -EREMOTEIO:
280				case -ESHUTDOWN:
281				case -ECONNRESET:
282				case -EPIPE:
283					level = 3;
284					break;
285				default:
286					level = 2;
287					break;
288				}; level; }),
289			"complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
290			urb, urb->complete, status,
291			usb_pipedevice(urb->pipe),
292			usb_pipeendpoint(urb->pipe),
293			usb_pipein(urb->pipe) ? "in" : "out",
294			urb->actual_length, urb->transfer_buffer_length
295			);
296
297	usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
298	spin_unlock(&musb->lock);
299	usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
300	spin_lock(&musb->lock);
301}
302
303/* for bulk/interrupt endpoints only */
304static inline void
305musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
306{
307	struct usb_device	*udev = urb->dev;
308	u16			csr;
309	void __iomem		*epio = ep->regs;
310	struct musb_qh		*qh;
311
312	/* FIXME:  the current Mentor DMA code seems to have
313	 * problems getting toggle correct.
314	 */
315
316	if (is_in || ep->is_shared_fifo)
317		qh = ep->in_qh;
318	else
319		qh = ep->out_qh;
320
321	if (!is_in) {
322		csr = musb_readw(epio, MUSB_TXCSR);
323		usb_settoggle(udev, qh->epnum, 1,
324			(csr & MUSB_TXCSR_H_DATATOGGLE)
325				? 1 : 0);
326	} else {
327		csr = musb_readw(epio, MUSB_RXCSR);
328		usb_settoggle(udev, qh->epnum, 0,
329			(csr & MUSB_RXCSR_H_DATATOGGLE)
330				? 1 : 0);
331	}
332}
333
334/* caller owns controller lock, irqs are blocked */
335static struct musb_qh *
336musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
337{
338	int			is_in;
339	struct musb_hw_ep	*ep = qh->hw_ep;
340	struct musb		*musb = ep->musb;
341	int			ready = qh->is_ready;
342
343	if (ep->is_shared_fifo)
344		is_in = 1;
345	else
346		is_in = usb_pipein(urb->pipe);
347
348	/* save toggle eagerly, for paranoia */
349	switch (qh->type) {
350	case USB_ENDPOINT_XFER_BULK:
351	case USB_ENDPOINT_XFER_INT:
352		musb_save_toggle(ep, is_in, urb);
353		break;
354	case USB_ENDPOINT_XFER_ISOC:
355		if (status == 0 && urb->error_count)
356			status = -EXDEV;
357		break;
358	}
359
360	qh->is_ready = 0;
361	__musb_giveback(musb, urb, status);
362	qh->is_ready = ready;
363
364	/* reclaim resources (and bandwidth) ASAP; deschedule it, and
365	 * invalidate qh as soon as list_empty(&hep->urb_list)
366	 */
367	if (list_empty(&qh->hep->urb_list)) {
368		struct list_head	*head;
369
370		if (is_in)
371			ep->rx_reinit = 1;
372		else
373			ep->tx_reinit = 1;
374
375		/* clobber old pointers to this qh */
376		if (is_in || ep->is_shared_fifo)
377			ep->in_qh = NULL;
378		else
379			ep->out_qh = NULL;
380		qh->hep->hcpriv = NULL;
381
382		switch (qh->type) {
383
384		case USB_ENDPOINT_XFER_CONTROL:
385		case USB_ENDPOINT_XFER_BULK:
386			/* fifo policy for these lists, except that NAKing
387			 * should rotate a qh to the end (for fairness).
388			 */
389			if (qh->mux == 1) {
390				head = qh->ring.prev;
391				list_del(&qh->ring);
392				kfree(qh);
393				qh = first_qh(head);
394				break;
395			}
396
397		case USB_ENDPOINT_XFER_ISOC:
398		case USB_ENDPOINT_XFER_INT:
399			/* this is where periodic bandwidth should be
400			 * de-allocated if it's tracked and allocated;
401			 * and where we'd update the schedule tree...
402			 */
403			musb->periodic[ep->epnum] = NULL;
404			kfree(qh);
405			qh = NULL;
406			break;
407		}
408	}
409	return qh;
410}
411
412/*
413 * Advance this hardware endpoint's queue, completing the specified urb and
414 * advancing to either the next urb queued to that qh, or else invalidating
415 * that qh and advancing to the next qh scheduled after the current one.
416 *
417 * Context: caller owns controller lock, irqs are blocked
418 */
419static void
420musb_advance_schedule(struct musb *musb, struct urb *urb,
421		struct musb_hw_ep *hw_ep, int is_in)
422{
423	struct musb_qh	*qh;
424
425	if (is_in || hw_ep->is_shared_fifo)
426		qh = hw_ep->in_qh;
427	else
428		qh = hw_ep->out_qh;
429
430	if (urb->status == -EINPROGRESS)
431		qh = musb_giveback(qh, urb, 0);
432	else
433		qh = musb_giveback(qh, urb, urb->status);
434
435	if (qh != NULL && qh->is_ready) {
436		DBG(4, "... next ep%d %cX urb %p\n",
437				hw_ep->epnum, is_in ? 'R' : 'T',
438				next_urb(qh));
439		musb_start_urb(musb, is_in, qh);
440	}
441}
442
443static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
444{
445	/* we don't want fifo to fill itself again;
446	 * ignore dma (various models),
447	 * leave toggle alone (may not have been saved yet)
448	 */
449	csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
450	csr &= ~(MUSB_RXCSR_H_REQPKT
451		| MUSB_RXCSR_H_AUTOREQ
452		| MUSB_RXCSR_AUTOCLEAR);
453
454	/* write 2x to allow double buffering */
455	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
456	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
457
458	/* flush writebuffer */
459	return musb_readw(hw_ep->regs, MUSB_RXCSR);
460}
461
462/*
463 * PIO RX for a packet (or part of it).
464 */
465static bool
466musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
467{
468	u16			rx_count;
469	u8			*buf;
470	u16			csr;
471	bool			done = false;
472	u32			length;
473	int			do_flush = 0;
474	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
475	void __iomem		*epio = hw_ep->regs;
476	struct musb_qh		*qh = hw_ep->in_qh;
477	int			pipe = urb->pipe;
478	void			*buffer = urb->transfer_buffer;
479
480	/* musb_ep_select(mbase, epnum); */
481	rx_count = musb_readw(epio, MUSB_RXCOUNT);
482	DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
483			urb->transfer_buffer, qh->offset,
484			urb->transfer_buffer_length);
485
486	/* unload FIFO */
487	if (usb_pipeisoc(pipe)) {
488		int					status = 0;
489		struct usb_iso_packet_descriptor	*d;
490
491		if (iso_err) {
492			status = -EILSEQ;
493			urb->error_count++;
494		}
495
496		d = urb->iso_frame_desc + qh->iso_idx;
497		buf = buffer + d->offset;
498		length = d->length;
499		if (rx_count > length) {
500			if (status == 0) {
501				status = -EOVERFLOW;
502				urb->error_count++;
503			}
504			DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
505			do_flush = 1;
506		} else
507			length = rx_count;
508		urb->actual_length += length;
509		d->actual_length = length;
510
511		d->status = status;
512
513		/* see if we are done */
514		done = (++qh->iso_idx >= urb->number_of_packets);
515	} else {
516		/* non-isoch */
517		buf = buffer + qh->offset;
518		length = urb->transfer_buffer_length - qh->offset;
519		if (rx_count > length) {
520			if (urb->status == -EINPROGRESS)
521				urb->status = -EOVERFLOW;
522			DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
523			do_flush = 1;
524		} else
525			length = rx_count;
526		urb->actual_length += length;
527		qh->offset += length;
528
529		/* see if we are done */
530		done = (urb->actual_length == urb->transfer_buffer_length)
531			|| (rx_count < qh->maxpacket)
532			|| (urb->status != -EINPROGRESS);
533		if (done
534				&& (urb->status == -EINPROGRESS)
535				&& (urb->transfer_flags & URB_SHORT_NOT_OK)
536				&& (urb->actual_length
537					< urb->transfer_buffer_length))
538			urb->status = -EREMOTEIO;
539	}
540
541	musb_read_fifo(hw_ep, length, buf);
542
543	csr = musb_readw(epio, MUSB_RXCSR);
544	csr |= MUSB_RXCSR_H_WZC_BITS;
545	if (unlikely(do_flush))
546		musb_h_flush_rxfifo(hw_ep, csr);
547	else {
548		/* REVISIT this assumes AUTOCLEAR is never set */
549		csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
550		if (!done)
551			csr |= MUSB_RXCSR_H_REQPKT;
552		musb_writew(epio, MUSB_RXCSR, csr);
553	}
554
555	return done;
556}
557
558/* we don't always need to reinit a given side of an endpoint...
559 * when we do, use tx/rx reinit routine and then construct a new CSR
560 * to address data toggle, NYET, and DMA or PIO.
561 *
562 * it's possible that driver bugs (especially for DMA) or aborting a
563 * transfer might have left the endpoint busier than it should be.
564 * the busy/not-empty tests are basically paranoia.
565 */
566static void
567musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
568{
569	u16	csr;
570
571	/* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
572	 * That always uses tx_reinit since ep0 repurposes TX register
573	 * offsets; the initial SETUP packet is also a kind of OUT.
574	 */
575
576	/* if programmed for Tx, put it in RX mode */
577	if (ep->is_shared_fifo) {
578		csr = musb_readw(ep->regs, MUSB_TXCSR);
579		if (csr & MUSB_TXCSR_MODE) {
580			musb_h_tx_flush_fifo(ep);
581			musb_writew(ep->regs, MUSB_TXCSR,
582					MUSB_TXCSR_FRCDATATOG);
583		}
584		/* clear mode (and everything else) to enable Rx */
585		musb_writew(ep->regs, MUSB_TXCSR, 0);
586
587	/* scrub all previous state, clearing toggle */
588	} else {
589		csr = musb_readw(ep->regs, MUSB_RXCSR);
590		if (csr & MUSB_RXCSR_RXPKTRDY)
591			WARNING("rx%d, packet/%d ready?\n", ep->epnum,
592				musb_readw(ep->regs, MUSB_RXCOUNT));
593
594		musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
595	}
596
597	/* target addr and (for multipoint) hub addr/port */
598	if (musb->is_multipoint) {
599		musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
600		musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
601		musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
602
603	} else
604		musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
605
606	/* protocol/endpoint, interval/NAKlimit, i/o size */
607	musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
608	musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
609	/* NOTE: bulk combining rewrites high bits of maxpacket */
610	musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
611
612	ep->rx_reinit = 0;
613}
614
615
616/*
617 * Program an HDRC endpoint as per the given URB
618 * Context: irqs blocked, controller lock held
619 */
620static void musb_ep_program(struct musb *musb, u8 epnum,
621			struct urb *urb, unsigned int is_out,
622			u8 *buf, u32 len)
623{
624	struct dma_controller	*dma_controller;
625	struct dma_channel	*dma_channel;
626	u8			dma_ok;
627	void __iomem		*mbase = musb->mregs;
628	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
629	void __iomem		*epio = hw_ep->regs;
630	struct musb_qh		*qh;
631	u16			packet_sz;
632
633	if (!is_out || hw_ep->is_shared_fifo)
634		qh = hw_ep->in_qh;
635	else
636		qh = hw_ep->out_qh;
637
638	packet_sz = qh->maxpacket;
639
640	DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
641				"h_addr%02x h_port%02x bytes %d\n",
642			is_out ? "-->" : "<--",
643			epnum, urb, urb->dev->speed,
644			qh->addr_reg, qh->epnum, is_out ? "out" : "in",
645			qh->h_addr_reg, qh->h_port_reg,
646			len);
647
648	musb_ep_select(mbase, epnum);
649
650	/* candidate for DMA? */
651	dma_controller = musb->dma_controller;
652	if (is_dma_capable() && epnum && dma_controller) {
653		dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
654		if (!dma_channel) {
655			dma_channel = dma_controller->channel_alloc(
656					dma_controller, hw_ep, is_out);
657			if (is_out)
658				hw_ep->tx_channel = dma_channel;
659			else
660				hw_ep->rx_channel = dma_channel;
661		}
662	} else
663		dma_channel = NULL;
664
665	/* make sure we clear DMAEnab, autoSet bits from previous run */
666
667	/* OUT/transmit/EP0 or IN/receive? */
668	if (is_out) {
669		u16	csr;
670		u16	int_txe;
671		u16	load_count;
672
673		csr = musb_readw(epio, MUSB_TXCSR);
674
675		/* disable interrupt in case we flush */
676		int_txe = musb_readw(mbase, MUSB_INTRTXE);
677		musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
678
679		/* general endpoint setup */
680		if (epnum) {
681			/* ASSERT:  TXCSR_DMAENAB was already cleared */
682
683			/* flush all old state, set default */
684			musb_h_tx_flush_fifo(hw_ep);
685			csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
686					| MUSB_TXCSR_DMAMODE
687					| MUSB_TXCSR_FRCDATATOG
688					| MUSB_TXCSR_H_RXSTALL
689					| MUSB_TXCSR_H_ERROR
690					| MUSB_TXCSR_TXPKTRDY
691					);
692			csr |= MUSB_TXCSR_MODE;
693
694			if (usb_gettoggle(urb->dev,
695					qh->epnum, 1))
696				csr |= MUSB_TXCSR_H_WR_DATATOGGLE
697					| MUSB_TXCSR_H_DATATOGGLE;
698			else
699				csr |= MUSB_TXCSR_CLRDATATOG;
700
701			/* twice in case of double packet buffering */
702			musb_writew(epio, MUSB_TXCSR, csr);
703			/* REVISIT may need to clear FLUSHFIFO ... */
704			musb_writew(epio, MUSB_TXCSR, csr);
705			csr = musb_readw(epio, MUSB_TXCSR);
706		} else {
707			/* endpoint 0: just flush */
708			musb_writew(epio, MUSB_CSR0,
709				csr | MUSB_CSR0_FLUSHFIFO);
710			musb_writew(epio, MUSB_CSR0,
711				csr | MUSB_CSR0_FLUSHFIFO);
712		}
713
714		/* target addr and (for multipoint) hub addr/port */
715		if (musb->is_multipoint) {
716			musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
717			musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
718			musb_write_txhubport(mbase, epnum, qh->h_port_reg);
719/* FIXME if !epnum, do the same for RX ... */
720		} else
721			musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
722
723		/* protocol/endpoint/interval/NAKlimit */
724		if (epnum) {
725			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
726			if (can_bulk_split(musb, qh->type))
727				musb_writew(epio, MUSB_TXMAXP,
728					packet_sz
729					| ((hw_ep->max_packet_sz_tx /
730						packet_sz) - 1) << 11);
731			else
732				musb_writew(epio, MUSB_TXMAXP,
733					packet_sz);
734			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
735		} else {
736			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
737			if (musb->is_multipoint)
738				musb_writeb(epio, MUSB_TYPE0,
739						qh->type_reg);
740		}
741
742		if (can_bulk_split(musb, qh->type))
743			load_count = min((u32) hw_ep->max_packet_sz_tx,
744						len);
745		else
746			load_count = min((u32) packet_sz, len);
747
748#ifdef CONFIG_USB_INVENTRA_DMA
749		if (dma_channel) {
750
751			/* clear previous state */
752			csr = musb_readw(epio, MUSB_TXCSR);
753			csr &= ~(MUSB_TXCSR_AUTOSET
754				| MUSB_TXCSR_DMAMODE
755				| MUSB_TXCSR_DMAENAB);
756			csr |= MUSB_TXCSR_MODE;
757			musb_writew(epio, MUSB_TXCSR,
758				csr | MUSB_TXCSR_MODE);
759
760			qh->segsize = min(len, dma_channel->max_len);
761
762			if (qh->segsize <= packet_sz)
763				dma_channel->desired_mode = 0;
764			else
765				dma_channel->desired_mode = 1;
766
767
768			if (dma_channel->desired_mode == 0) {
769				csr &= ~(MUSB_TXCSR_AUTOSET
770					| MUSB_TXCSR_DMAMODE);
771				csr |= (MUSB_TXCSR_DMAENAB);
772					/* against programming guide */
773			} else
774				csr |= (MUSB_TXCSR_AUTOSET
775					| MUSB_TXCSR_DMAENAB
776					| MUSB_TXCSR_DMAMODE);
777
778			musb_writew(epio, MUSB_TXCSR, csr);
779
780			dma_ok = dma_controller->channel_program(
781					dma_channel, packet_sz,
782					dma_channel->desired_mode,
783					urb->transfer_dma,
784					qh->segsize);
785			if (dma_ok) {
786				load_count = 0;
787			} else {
788				dma_controller->channel_release(dma_channel);
789				if (is_out)
790					hw_ep->tx_channel = NULL;
791				else
792					hw_ep->rx_channel = NULL;
793				dma_channel = NULL;
794			}
795		}
796#endif
797
798		/* candidate for DMA */
799		if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
800
801			/* program endpoint CSRs first, then setup DMA.
802			 * assume CPPI setup succeeds.
803			 * defer enabling dma.
804			 */
805			csr = musb_readw(epio, MUSB_TXCSR);
806			csr &= ~(MUSB_TXCSR_AUTOSET
807					| MUSB_TXCSR_DMAMODE
808					| MUSB_TXCSR_DMAENAB);
809			csr |= MUSB_TXCSR_MODE;
810			musb_writew(epio, MUSB_TXCSR,
811				csr | MUSB_TXCSR_MODE);
812
813			dma_channel->actual_len = 0L;
814			qh->segsize = len;
815
816			/* TX uses "rndis" mode automatically, but needs help
817			 * to identify the zero-length-final-packet case.
818			 */
819			dma_ok = dma_controller->channel_program(
820					dma_channel, packet_sz,
821					(urb->transfer_flags
822							& URB_ZERO_PACKET)
823						== URB_ZERO_PACKET,
824					urb->transfer_dma,
825					qh->segsize);
826			if (dma_ok) {
827				load_count = 0;
828			} else {
829				dma_controller->channel_release(dma_channel);
830				hw_ep->tx_channel = NULL;
831				dma_channel = NULL;
832
833				/* REVISIT there's an error path here that
834				 * needs handling:  can't do dma, but
835				 * there's no pio buffer address...
836				 */
837			}
838		}
839
840		if (load_count) {
841			/* ASSERT:  TXCSR_DMAENAB was already cleared */
842
843			/* PIO to load FIFO */
844			qh->segsize = load_count;
845			musb_write_fifo(hw_ep, load_count, buf);
846			csr = musb_readw(epio, MUSB_TXCSR);
847			csr &= ~(MUSB_TXCSR_DMAENAB
848				| MUSB_TXCSR_DMAMODE
849				| MUSB_TXCSR_AUTOSET);
850			/* write CSR */
851			csr |= MUSB_TXCSR_MODE;
852
853			if (epnum)
854				musb_writew(epio, MUSB_TXCSR, csr);
855		}
856
857		/* re-enable interrupt */
858		musb_writew(mbase, MUSB_INTRTXE, int_txe);
859
860	/* IN/receive */
861	} else {
862		u16	csr;
863
864		if (hw_ep->rx_reinit) {
865			musb_rx_reinit(musb, qh, hw_ep);
866
867			/* init new state: toggle and NYET, maybe DMA later */
868			if (usb_gettoggle(urb->dev, qh->epnum, 0))
869				csr = MUSB_RXCSR_H_WR_DATATOGGLE
870					| MUSB_RXCSR_H_DATATOGGLE;
871			else
872				csr = 0;
873			if (qh->type == USB_ENDPOINT_XFER_INT)
874				csr |= MUSB_RXCSR_DISNYET;
875
876		} else {
877			csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
878
879			if (csr & (MUSB_RXCSR_RXPKTRDY
880					| MUSB_RXCSR_DMAENAB
881					| MUSB_RXCSR_H_REQPKT))
882				ERR("broken !rx_reinit, ep%d csr %04x\n",
883						hw_ep->epnum, csr);
884
885			/* scrub any stale state, leaving toggle alone */
886			csr &= MUSB_RXCSR_DISNYET;
887		}
888
889		/* kick things off */
890
891		if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
892			/* candidate for DMA */
893			if (dma_channel) {
894				dma_channel->actual_len = 0L;
895				qh->segsize = len;
896
897				/* AUTOREQ is in a DMA register */
898				musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
899				csr = musb_readw(hw_ep->regs,
900						MUSB_RXCSR);
901
902				/* unless caller treats short rx transfers as
903				 * errors, we dare not queue multiple transfers.
904				 */
905				dma_ok = dma_controller->channel_program(
906						dma_channel, packet_sz,
907						!(urb->transfer_flags
908							& URB_SHORT_NOT_OK),
909						urb->transfer_dma,
910						qh->segsize);
911				if (!dma_ok) {
912					dma_controller->channel_release(
913							dma_channel);
914					hw_ep->rx_channel = NULL;
915					dma_channel = NULL;
916				} else
917					csr |= MUSB_RXCSR_DMAENAB;
918			}
919		}
920
921		csr |= MUSB_RXCSR_H_REQPKT;
922		DBG(7, "RXCSR%d := %04x\n", epnum, csr);
923		musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
924		csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
925	}
926}
927
928
929/*
930 * Service the default endpoint (ep0) as host.
931 * Return true until it's time to start the status stage.
932 */
933static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
934{
935	bool			 more = false;
936	u8			*fifo_dest = NULL;
937	u16			fifo_count = 0;
938	struct musb_hw_ep	*hw_ep = musb->control_ep;
939	struct musb_qh		*qh = hw_ep->in_qh;
940	struct usb_ctrlrequest	*request;
941
942	switch (musb->ep0_stage) {
943	case MUSB_EP0_IN:
944		fifo_dest = urb->transfer_buffer + urb->actual_length;
945		fifo_count = min(len, ((u16) (urb->transfer_buffer_length
946					- urb->actual_length)));
947		if (fifo_count < len)
948			urb->status = -EOVERFLOW;
949
950		musb_read_fifo(hw_ep, fifo_count, fifo_dest);
951
952		urb->actual_length += fifo_count;
953		if (len < qh->maxpacket) {
954			/* always terminate on short read; it's
955			 * rarely reported as an error.
956			 */
957		} else if (urb->actual_length <
958				urb->transfer_buffer_length)
959			more = true;
960		break;
961	case MUSB_EP0_START:
962		request = (struct usb_ctrlrequest *) urb->setup_packet;
963
964		if (!request->wLength) {
965			DBG(4, "start no-DATA\n");
966			break;
967		} else if (request->bRequestType & USB_DIR_IN) {
968			DBG(4, "start IN-DATA\n");
969			musb->ep0_stage = MUSB_EP0_IN;
970			more = true;
971			break;
972		} else {
973			DBG(4, "start OUT-DATA\n");
974			musb->ep0_stage = MUSB_EP0_OUT;
975			more = true;
976		}
977		/* FALLTHROUGH */
978	case MUSB_EP0_OUT:
979		fifo_count = min(qh->maxpacket, ((u16)
980				(urb->transfer_buffer_length
981				- urb->actual_length)));
982
983		if (fifo_count) {
984			fifo_dest = (u8 *) (urb->transfer_buffer
985					+ urb->actual_length);
986			DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
987					fifo_count,
988					(fifo_count == 1) ? "" : "s",
989					fifo_dest);
990			musb_write_fifo(hw_ep, fifo_count, fifo_dest);
991
992			urb->actual_length += fifo_count;
993			more = true;
994		}
995		break;
996	default:
997		ERR("bogus ep0 stage %d\n", musb->ep0_stage);
998		break;
999	}
1000
1001	return more;
1002}
1003
1004/*
1005 * Handle default endpoint interrupt as host. Only called in IRQ time
1006 * from musb_interrupt().
1007 *
1008 * called with controller irqlocked
1009 */
1010irqreturn_t musb_h_ep0_irq(struct musb *musb)
1011{
1012	struct urb		*urb;
1013	u16			csr, len;
1014	int			status = 0;
1015	void __iomem		*mbase = musb->mregs;
1016	struct musb_hw_ep	*hw_ep = musb->control_ep;
1017	void __iomem		*epio = hw_ep->regs;
1018	struct musb_qh		*qh = hw_ep->in_qh;
1019	bool			complete = false;
1020	irqreturn_t		retval = IRQ_NONE;
1021
1022	/* ep0 only has one queue, "in" */
1023	urb = next_urb(qh);
1024
1025	musb_ep_select(mbase, 0);
1026	csr = musb_readw(epio, MUSB_CSR0);
1027	len = (csr & MUSB_CSR0_RXPKTRDY)
1028			? musb_readb(epio, MUSB_COUNT0)
1029			: 0;
1030
1031	DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1032		csr, qh, len, urb, musb->ep0_stage);
1033
1034	/* if we just did status stage, we are done */
1035	if (MUSB_EP0_STATUS == musb->ep0_stage) {
1036		retval = IRQ_HANDLED;
1037		complete = true;
1038	}
1039
1040	/* prepare status */
1041	if (csr & MUSB_CSR0_H_RXSTALL) {
1042		DBG(6, "STALLING ENDPOINT\n");
1043		status = -EPIPE;
1044
1045	} else if (csr & MUSB_CSR0_H_ERROR) {
1046		DBG(2, "no response, csr0 %04x\n", csr);
1047		status = -EPROTO;
1048
1049	} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1050		DBG(2, "control NAK timeout\n");
1051
1052		/* NOTE:  this code path would be a good place to PAUSE a
1053		 * control transfer, if another one is queued, so that
1054		 * ep0 is more likely to stay busy.
1055		 *
1056		 * if (qh->ring.next != &musb->control), then
1057		 * we have a candidate... NAKing is *NOT* an error
1058		 */
1059		musb_writew(epio, MUSB_CSR0, 0);
1060		retval = IRQ_HANDLED;
1061	}
1062
1063	if (status) {
1064		DBG(6, "aborting\n");
1065		retval = IRQ_HANDLED;
1066		if (urb)
1067			urb->status = status;
1068		complete = true;
1069
1070		/* use the proper sequence to abort the transfer */
1071		if (csr & MUSB_CSR0_H_REQPKT) {
1072			csr &= ~MUSB_CSR0_H_REQPKT;
1073			musb_writew(epio, MUSB_CSR0, csr);
1074			csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1075			musb_writew(epio, MUSB_CSR0, csr);
1076		} else {
1077			csr |= MUSB_CSR0_FLUSHFIFO;
1078			musb_writew(epio, MUSB_CSR0, csr);
1079			musb_writew(epio, MUSB_CSR0, csr);
1080			csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1081			musb_writew(epio, MUSB_CSR0, csr);
1082		}
1083
1084		musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1085
1086		/* clear it */
1087		musb_writew(epio, MUSB_CSR0, 0);
1088	}
1089
1090	if (unlikely(!urb)) {
1091		/* stop endpoint since we have no place for its data, this
1092		 * SHOULD NEVER HAPPEN! */
1093		ERR("no URB for end 0\n");
1094
1095		musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
1096		musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
1097		musb_writew(epio, MUSB_CSR0, 0);
1098
1099		goto done;
1100	}
1101
1102	if (!complete) {
1103		/* call common logic and prepare response */
1104		if (musb_h_ep0_continue(musb, len, urb)) {
1105			/* more packets required */
1106			csr = (MUSB_EP0_IN == musb->ep0_stage)
1107				?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1108		} else {
1109			/* data transfer complete; perform status phase */
1110			if (usb_pipeout(urb->pipe)
1111					|| !urb->transfer_buffer_length)
1112				csr = MUSB_CSR0_H_STATUSPKT
1113					| MUSB_CSR0_H_REQPKT;
1114			else
1115				csr = MUSB_CSR0_H_STATUSPKT
1116					| MUSB_CSR0_TXPKTRDY;
1117
1118			/* flag status stage */
1119			musb->ep0_stage = MUSB_EP0_STATUS;
1120
1121			DBG(5, "ep0 STATUS, csr %04x\n", csr);
1122
1123		}
1124		musb_writew(epio, MUSB_CSR0, csr);
1125		retval = IRQ_HANDLED;
1126	} else
1127		musb->ep0_stage = MUSB_EP0_IDLE;
1128
1129	/* call completion handler if done */
1130	if (complete)
1131		musb_advance_schedule(musb, urb, hw_ep, 1);
1132done:
1133	return retval;
1134}
1135
1136
1137#ifdef CONFIG_USB_INVENTRA_DMA
1138
1139/* Host side TX (OUT) using Mentor DMA works as follows:
1140	submit_urb ->
1141		- if queue was empty, Program Endpoint
1142		- ... which starts DMA to fifo in mode 1 or 0
1143
1144	DMA Isr (transfer complete) -> TxAvail()
1145		- Stop DMA (~DmaEnab)	(<--- Alert ... currently happens
1146					only in musb_cleanup_urb)
1147		- TxPktRdy has to be set in mode 0 or for
1148			short packets in mode 1.
1149*/
1150
1151#endif
1152
1153/* Service a Tx-Available or dma completion irq for the endpoint */
1154void musb_host_tx(struct musb *musb, u8 epnum)
1155{
1156	int			pipe;
1157	bool			done = false;
1158	u16			tx_csr;
1159	size_t			wLength = 0;
1160	u8			*buf = NULL;
1161	struct urb		*urb;
1162	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1163	void __iomem		*epio = hw_ep->regs;
1164	struct musb_qh		*qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
1165							    : hw_ep->out_qh;
1166	u32			status = 0;
1167	void __iomem		*mbase = musb->mregs;
1168	struct dma_channel	*dma;
1169
1170	urb = next_urb(qh);
1171
1172	musb_ep_select(mbase, epnum);
1173	tx_csr = musb_readw(epio, MUSB_TXCSR);
1174
1175	/* with CPPI, DMA sometimes triggers "extra" irqs */
1176	if (!urb) {
1177		DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1178		goto finish;
1179	}
1180
1181	pipe = urb->pipe;
1182	dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1183	DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1184			dma ? ", dma" : "");
1185
1186	/* check for errors */
1187	if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1188		/* dma was disabled, fifo flushed */
1189		DBG(3, "TX end %d stall\n", epnum);
1190
1191		/* stall; record URB status */
1192		status = -EPIPE;
1193
1194	} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1195		/* (NON-ISO) dma was disabled, fifo flushed */
1196		DBG(3, "TX 3strikes on ep=%d\n", epnum);
1197
1198		status = -ETIMEDOUT;
1199
1200	} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1201		DBG(6, "TX end=%d device not responding\n", epnum);
1202
1203		/* NOTE:  this code path would be a good place to PAUSE a
1204		 * transfer, if there's some other (nonperiodic) tx urb
1205		 * that could use this fifo.  (dma complicates it...)
1206		 *
1207		 * if (bulk && qh->ring.next != &musb->out_bulk), then
1208		 * we have a candidate... NAKing is *NOT* an error
1209		 */
1210		musb_ep_select(mbase, epnum);
1211		musb_writew(epio, MUSB_TXCSR,
1212				MUSB_TXCSR_H_WZC_BITS
1213				| MUSB_TXCSR_TXPKTRDY);
1214		goto finish;
1215	}
1216
1217	if (status) {
1218		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1219			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1220			(void) musb->dma_controller->channel_abort(dma);
1221		}
1222
1223		/* do the proper sequence to abort the transfer in the
1224		 * usb core; the dma engine should already be stopped.
1225		 */
1226		musb_h_tx_flush_fifo(hw_ep);
1227		tx_csr &= ~(MUSB_TXCSR_AUTOSET
1228				| MUSB_TXCSR_DMAENAB
1229				| MUSB_TXCSR_H_ERROR
1230				| MUSB_TXCSR_H_RXSTALL
1231				| MUSB_TXCSR_H_NAKTIMEOUT
1232				);
1233
1234		musb_ep_select(mbase, epnum);
1235		musb_writew(epio, MUSB_TXCSR, tx_csr);
1236		/* REVISIT may need to clear FLUSHFIFO ... */
1237		musb_writew(epio, MUSB_TXCSR, tx_csr);
1238		musb_writeb(epio, MUSB_TXINTERVAL, 0);
1239
1240		done = true;
1241	}
1242
1243	/* second cppi case */
1244	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1245		DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1246		goto finish;
1247
1248	}
1249
1250	/* REVISIT this looks wrong... */
1251	if (!status || dma || usb_pipeisoc(pipe)) {
1252		if (dma)
1253			wLength = dma->actual_len;
1254		else
1255			wLength = qh->segsize;
1256		qh->offset += wLength;
1257
1258		if (usb_pipeisoc(pipe)) {
1259			struct usb_iso_packet_descriptor	*d;
1260
1261			d = urb->iso_frame_desc + qh->iso_idx;
1262			d->actual_length = qh->segsize;
1263			if (++qh->iso_idx >= urb->number_of_packets) {
1264				done = true;
1265			} else {
1266				d++;
1267				buf = urb->transfer_buffer + d->offset;
1268				wLength = d->length;
1269			}
1270		} else if (dma) {
1271			done = true;
1272		} else {
1273			/* see if we need to send more data, or ZLP */
1274			if (qh->segsize < qh->maxpacket)
1275				done = true;
1276			else if (qh->offset == urb->transfer_buffer_length
1277					&& !(urb->transfer_flags
1278						& URB_ZERO_PACKET))
1279				done = true;
1280			if (!done) {
1281				buf = urb->transfer_buffer
1282						+ qh->offset;
1283				wLength = urb->transfer_buffer_length
1284						- qh->offset;
1285			}
1286		}
1287	}
1288
1289	/* urb->status != -EINPROGRESS means request has been faulted,
1290	 * so we must abort this transfer after cleanup
1291	 */
1292	if (urb->status != -EINPROGRESS) {
1293		done = true;
1294		if (status == 0)
1295			status = urb->status;
1296	}
1297
1298	if (done) {
1299		/* set status */
1300		urb->status = status;
1301		urb->actual_length = qh->offset;
1302		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1303
1304	} else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
1305		/* WARN_ON(!buf); */
1306
1307		/* REVISIT:  some docs say that when hw_ep->tx_double_buffered,
1308		 * (and presumably, fifo is not half-full) we should write TWO
1309		 * packets before updating TXCSR ... other docs disagree ...
1310		 */
1311		/* PIO:  start next packet in this URB */
1312		wLength = min(qh->maxpacket, (u16) wLength);
1313		musb_write_fifo(hw_ep, wLength, buf);
1314		qh->segsize = wLength;
1315
1316		musb_ep_select(mbase, epnum);
1317		musb_writew(epio, MUSB_TXCSR,
1318				MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1319	} else
1320		DBG(1, "not complete, but dma enabled?\n");
1321
1322finish:
1323	return;
1324}
1325
1326
1327#ifdef CONFIG_USB_INVENTRA_DMA
1328
1329/* Host side RX (IN) using Mentor DMA works as follows:
1330	submit_urb ->
1331		- if queue was empty, ProgramEndpoint
1332		- first IN token is sent out (by setting ReqPkt)
1333	LinuxIsr -> RxReady()
1334	/\	=> first packet is received
1335	|	- Set in mode 0 (DmaEnab, ~ReqPkt)
1336	|		-> DMA Isr (transfer complete) -> RxReady()
1337	|		    - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1338	|		    - if urb not complete, send next IN token (ReqPkt)
1339	|			   |		else complete urb.
1340	|			   |
1341	---------------------------
1342 *
1343 * Nuances of mode 1:
1344 *	For short packets, no ack (+RxPktRdy) is sent automatically
1345 *	(even if AutoClear is ON)
1346 *	For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1347 *	automatically => major problem, as collecting the next packet becomes
1348 *	difficult. Hence mode 1 is not used.
1349 *
1350 * REVISIT
1351 *	All we care about at this driver level is that
1352 *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1353 *       (b) termination conditions are: short RX, or buffer full;
1354 *       (c) fault modes include
1355 *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1356 *             (and that endpoint's dma queue stops immediately)
1357 *           - overflow (full, PLUS more bytes in the terminal packet)
1358 *
1359 *	So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1360 *	thus be a great candidate for using mode 1 ... for all but the
1361 *	last packet of one URB's transfer.
1362 */
1363
1364#endif
1365
1366/*
1367 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1368 * and high-bandwidth IN transfer cases.
1369 */
1370void musb_host_rx(struct musb *musb, u8 epnum)
1371{
1372	struct urb		*urb;
1373	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1374	void __iomem		*epio = hw_ep->regs;
1375	struct musb_qh		*qh = hw_ep->in_qh;
1376	size_t			xfer_len;
1377	void __iomem		*mbase = musb->mregs;
1378	int			pipe;
1379	u16			rx_csr, val;
1380	bool			iso_err = false;
1381	bool			done = false;
1382	u32			status;
1383	struct dma_channel	*dma;
1384
1385	musb_ep_select(mbase, epnum);
1386
1387	urb = next_urb(qh);
1388	dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1389	status = 0;
1390	xfer_len = 0;
1391
1392	rx_csr = musb_readw(epio, MUSB_RXCSR);
1393	val = rx_csr;
1394
1395	if (unlikely(!urb)) {
1396		/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1397		 * usbtest #11 (unlinks) triggers it regularly, sometimes
1398		 * with fifo full.  (Only with DMA??)
1399		 */
1400		DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1401			musb_readw(epio, MUSB_RXCOUNT));
1402		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1403		return;
1404	}
1405
1406	pipe = urb->pipe;
1407
1408	DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1409		epnum, rx_csr, urb->actual_length,
1410		dma ? dma->actual_len : 0);
1411
1412	/* check for errors, concurrent stall & unlink is not really
1413	 * handled yet! */
1414	if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1415		DBG(3, "RX end %d STALL\n", epnum);
1416
1417		/* stall; record URB status */
1418		status = -EPIPE;
1419
1420	} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1421		DBG(3, "end %d RX proto error\n", epnum);
1422
1423		status = -EPROTO;
1424		musb_writeb(epio, MUSB_RXINTERVAL, 0);
1425
1426	} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1427
1428		if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1429			/* NOTE this code path would be a good place to PAUSE a
1430			 * transfer, if there's some other (nonperiodic) rx urb
1431			 * that could use this fifo.  (dma complicates it...)
1432			 *
1433			 * if (bulk && qh->ring.next != &musb->in_bulk), then
1434			 * we have a candidate... NAKing is *NOT* an error
1435			 */
1436			DBG(6, "RX end %d NAK timeout\n", epnum);
1437			musb_ep_select(mbase, epnum);
1438			musb_writew(epio, MUSB_RXCSR,
1439					MUSB_RXCSR_H_WZC_BITS
1440					| MUSB_RXCSR_H_REQPKT);
1441
1442			goto finish;
1443		} else {
1444			DBG(4, "RX end %d ISO data error\n", epnum);
1445			/* packet error reported later */
1446			iso_err = true;
1447		}
1448	}
1449
1450	/* faults abort the transfer */
1451	if (status) {
1452		/* clean up dma and collect transfer count */
1453		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1454			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1455			(void) musb->dma_controller->channel_abort(dma);
1456			xfer_len = dma->actual_len;
1457		}
1458		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1459		musb_writeb(epio, MUSB_RXINTERVAL, 0);
1460		done = true;
1461		goto finish;
1462	}
1463
1464	if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1465		/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1466		ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1467		goto finish;
1468	}
1469
1470	/* thorough shutdown for now ... given more precise fault handling
1471	 * and better queueing support, we might keep a DMA pipeline going
1472	 * while processing this irq for earlier completions.
1473	 */
1474
1475	/* FIXME this is _way_ too much in-line logic for Mentor DMA */
1476
1477#ifndef CONFIG_USB_INVENTRA_DMA
1478	if (rx_csr & MUSB_RXCSR_H_REQPKT)  {
1479		/* REVISIT this happened for a while on some short reads...
1480		 * the cleanup still needs investigation... looks bad...
1481		 * and also duplicates dma cleanup code above ... plus,
1482		 * shouldn't this be the "half full" double buffer case?
1483		 */
1484		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1485			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1486			(void) musb->dma_controller->channel_abort(dma);
1487			xfer_len = dma->actual_len;
1488			done = true;
1489		}
1490
1491		DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1492				xfer_len, dma ? ", dma" : "");
1493		rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1494
1495		musb_ep_select(mbase, epnum);
1496		musb_writew(epio, MUSB_RXCSR,
1497				MUSB_RXCSR_H_WZC_BITS | rx_csr);
1498	}
1499#endif
1500	if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1501		xfer_len = dma->actual_len;
1502
1503		val &= ~(MUSB_RXCSR_DMAENAB
1504			| MUSB_RXCSR_H_AUTOREQ
1505			| MUSB_RXCSR_AUTOCLEAR
1506			| MUSB_RXCSR_RXPKTRDY);
1507		musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1508
1509#ifdef CONFIG_USB_INVENTRA_DMA
1510		if (usb_pipeisoc(pipe)) {
1511			struct usb_iso_packet_descriptor *d;
1512
1513			d = urb->iso_frame_desc + qh->iso_idx;
1514			d->actual_length = xfer_len;
1515
1516			/* even if there was an error, we did the dma
1517			 * for iso_frame_desc->length
1518			 */
1519			if (d->status != EILSEQ && d->status != -EOVERFLOW)
1520				d->status = 0;
1521
1522			if (++qh->iso_idx >= urb->number_of_packets)
1523				done = true;
1524			else
1525				done = false;
1526
1527		} else  {
1528		/* done if urb buffer is full or short packet is recd */
1529		done = (urb->actual_length + xfer_len >=
1530				urb->transfer_buffer_length
1531			|| dma->actual_len < qh->maxpacket);
1532		}
1533
1534		/* send IN token for next packet, without AUTOREQ */
1535		if (!done) {
1536			val |= MUSB_RXCSR_H_REQPKT;
1537			musb_writew(epio, MUSB_RXCSR,
1538				MUSB_RXCSR_H_WZC_BITS | val);
1539		}
1540
1541		DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1542			done ? "off" : "reset",
1543			musb_readw(epio, MUSB_RXCSR),
1544			musb_readw(epio, MUSB_RXCOUNT));
1545#else
1546		done = true;
1547#endif
1548	} else if (urb->status == -EINPROGRESS) {
1549		/* if no errors, be sure a packet is ready for unloading */
1550		if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1551			status = -EPROTO;
1552			ERR("Rx interrupt with no errors or packet!\n");
1553
1554			/* FIXME this is another "SHOULD NEVER HAPPEN" */
1555
1556/* SCRUB (RX) */
1557			/* do the proper sequence to abort the transfer */
1558			musb_ep_select(mbase, epnum);
1559			val &= ~MUSB_RXCSR_H_REQPKT;
1560			musb_writew(epio, MUSB_RXCSR, val);
1561			goto finish;
1562		}
1563
1564		/* we are expecting IN packets */
1565#ifdef CONFIG_USB_INVENTRA_DMA
1566		if (dma) {
1567			struct dma_controller	*c;
1568			u16			rx_count;
1569			int			ret, length;
1570			dma_addr_t		buf;
1571
1572			rx_count = musb_readw(epio, MUSB_RXCOUNT);
1573
1574			DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
1575					epnum, rx_count,
1576					urb->transfer_dma
1577						+ urb->actual_length,
1578					qh->offset,
1579					urb->transfer_buffer_length);
1580
1581			c = musb->dma_controller;
1582
1583			if (usb_pipeisoc(pipe)) {
1584				int status = 0;
1585				struct usb_iso_packet_descriptor *d;
1586
1587				d = urb->iso_frame_desc + qh->iso_idx;
1588
1589				if (iso_err) {
1590					status = -EILSEQ;
1591					urb->error_count++;
1592				}
1593				if (rx_count > d->length) {
1594					if (status == 0) {
1595						status = -EOVERFLOW;
1596						urb->error_count++;
1597					}
1598					DBG(2, "** OVERFLOW %d into %d\n",\
1599					    rx_count, d->length);
1600
1601					length = d->length;
1602				} else
1603					length = rx_count;
1604				d->status = status;
1605				buf = urb->transfer_dma + d->offset;
1606			} else {
1607				length = rx_count;
1608				buf = urb->transfer_dma +
1609						urb->actual_length;
1610			}
1611
1612			dma->desired_mode = 0;
1613#ifdef USE_MODE1
1614			/* because of the issue below, mode 1 will
1615			 * only rarely behave with correct semantics.
1616			 */
1617			if ((urb->transfer_flags &
1618						URB_SHORT_NOT_OK)
1619				&& (urb->transfer_buffer_length -
1620						urb->actual_length)
1621					> qh->maxpacket)
1622				dma->desired_mode = 1;
1623			if (rx_count < hw_ep->max_packet_sz_rx) {
1624				length = rx_count;
1625				dma->bDesiredMode = 0;
1626			} else {
1627				length = urb->transfer_buffer_length;
1628			}
1629#endif
1630
1631/* Disadvantage of using mode 1:
1632 *	It's basically usable only for mass storage class; essentially all
1633 *	other protocols also terminate transfers on short packets.
1634 *
1635 * Details:
1636 *	An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1637 *	If you try to use mode 1 for (transfer_buffer_length - 512), and try
1638 *	to use the extra IN token to grab the last packet using mode 0, then
1639 *	the problem is that you cannot be sure when the device will send the
1640 *	last packet and RxPktRdy set. Sometimes the packet is recd too soon
1641 *	such that it gets lost when RxCSR is re-set at the end of the mode 1
1642 *	transfer, while sometimes it is recd just a little late so that if you
1643 *	try to configure for mode 0 soon after the mode 1 transfer is
1644 *	completed, you will find rxcount 0. Okay, so you might think why not
1645 *	wait for an interrupt when the pkt is recd. Well, you won't get any!
1646 */
1647
1648			val = musb_readw(epio, MUSB_RXCSR);
1649			val &= ~MUSB_RXCSR_H_REQPKT;
1650
1651			if (dma->desired_mode == 0)
1652				val &= ~MUSB_RXCSR_H_AUTOREQ;
1653			else
1654				val |= MUSB_RXCSR_H_AUTOREQ;
1655			val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
1656
1657			musb_writew(epio, MUSB_RXCSR,
1658				MUSB_RXCSR_H_WZC_BITS | val);
1659
1660			/* REVISIT if when actual_length != 0,
1661			 * transfer_buffer_length needs to be
1662			 * adjusted first...
1663			 */
1664			ret = c->channel_program(
1665				dma, qh->maxpacket,
1666				dma->desired_mode, buf, length);
1667
1668			if (!ret) {
1669				c->channel_release(dma);
1670				hw_ep->rx_channel = NULL;
1671				dma = NULL;
1672				/* REVISIT reset CSR */
1673			}
1674		}
1675#endif	/* Mentor DMA */
1676
1677		if (!dma) {
1678			done = musb_host_packet_rx(musb, urb,
1679					epnum, iso_err);
1680			DBG(6, "read %spacket\n", done ? "last " : "");
1681		}
1682	}
1683
1684finish:
1685	urb->actual_length += xfer_len;
1686	qh->offset += xfer_len;
1687	if (done) {
1688		if (urb->status == -EINPROGRESS)
1689			urb->status = status;
1690		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1691	}
1692}
1693
1694/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1695 * the software schedule associates multiple such nodes with a given
1696 * host side hardware endpoint + direction; scheduling may activate
1697 * that hardware endpoint.
1698 */
1699static int musb_schedule(
1700	struct musb		*musb,
1701	struct musb_qh		*qh,
1702	int			is_in)
1703{
1704	int			idle;
1705	int			best_diff;
1706	int			best_end, epnum;
1707	struct musb_hw_ep	*hw_ep = NULL;
1708	struct list_head	*head = NULL;
1709
1710	/* use fixed hardware for control and bulk */
1711	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1712		head = &musb->control;
1713		hw_ep = musb->control_ep;
1714		goto success;
1715	}
1716
1717	/* else, periodic transfers get muxed to other endpoints */
1718
1719	/* FIXME this doesn't consider direction, so it can only
1720	 * work for one half of the endpoint hardware, and assumes
1721	 * the previous cases handled all non-shared endpoints...
1722	 */
1723
1724	/* we know this qh hasn't been scheduled, so all we need to do
1725	 * is choose which hardware endpoint to put it on ...
1726	 *
1727	 * REVISIT what we really want here is a regular schedule tree
1728	 * like e.g. OHCI uses, but for now musb->periodic is just an
1729	 * array of the _single_ logical endpoint associated with a
1730	 * given physical one (identity mapping logical->physical).
1731	 *
1732	 * that simplistic approach makes TT scheduling a lot simpler;
1733	 * there is none, and thus none of its complexity...
1734	 */
1735	best_diff = 4096;
1736	best_end = -1;
1737
1738	for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
1739		int	diff;
1740
1741		if (musb->periodic[epnum])
1742			continue;
1743		hw_ep = &musb->endpoints[epnum];
1744		if (hw_ep == musb->bulk_ep)
1745			continue;
1746
1747		if (is_in)
1748			diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
1749		else
1750			diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
1751
1752		if (diff >= 0 && best_diff > diff) {
1753			best_diff = diff;
1754			best_end = epnum;
1755		}
1756	}
1757	/* use bulk reserved ep1 if no other ep is free */
1758	if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1759		hw_ep = musb->bulk_ep;
1760		if (is_in)
1761			head = &musb->in_bulk;
1762		else
1763			head = &musb->out_bulk;
1764		goto success;
1765	} else if (best_end < 0) {
1766		return -ENOSPC;
1767	}
1768
1769	idle = 1;
1770	qh->mux = 0;
1771	hw_ep = musb->endpoints + best_end;
1772	musb->periodic[best_end] = qh;
1773	DBG(4, "qh %p periodic slot %d\n", qh, best_end);
1774success:
1775	if (head) {
1776		idle = list_empty(head);
1777		list_add_tail(&qh->ring, head);
1778		qh->mux = 1;
1779	}
1780	qh->hw_ep = hw_ep;
1781	qh->hep->hcpriv = qh;
1782	if (idle)
1783		musb_start_urb(musb, is_in, qh);
1784	return 0;
1785}
1786
1787static int musb_urb_enqueue(
1788	struct usb_hcd			*hcd,
1789	struct urb			*urb,
1790	gfp_t				mem_flags)
1791{
1792	unsigned long			flags;
1793	struct musb			*musb = hcd_to_musb(hcd);
1794	struct usb_host_endpoint	*hep = urb->ep;
1795	struct musb_qh			*qh = hep->hcpriv;
1796	struct usb_endpoint_descriptor	*epd = &hep->desc;
1797	int				ret;
1798	unsigned			type_reg;
1799	unsigned			interval;
1800
1801	/* host role must be active */
1802	if (!is_host_active(musb) || !musb->is_active)
1803		return -ENODEV;
1804
1805	spin_lock_irqsave(&musb->lock, flags);
1806	ret = usb_hcd_link_urb_to_ep(hcd, urb);
1807	spin_unlock_irqrestore(&musb->lock, flags);
1808	if (ret)
1809		return ret;
1810
1811	/* DMA mapping was already done, if needed, and this urb is on
1812	 * hep->urb_list ... so there's little to do unless hep wasn't
1813	 * yet scheduled onto a live qh.
1814	 *
1815	 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1816	 * disabled, testing for empty qh->ring and avoiding qh setup costs
1817	 * except for the first urb queued after a config change.
1818	 */
1819	if (qh) {
1820		urb->hcpriv = qh;
1821		return 0;
1822	}
1823
1824	/* Allocate and initialize qh, minimizing the work done each time
1825	 * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
1826	 *
1827	 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1828	 * for bugs in other kernel code to break this driver...
1829	 */
1830	qh = kzalloc(sizeof *qh, mem_flags);
1831	if (!qh) {
1832		spin_lock_irqsave(&musb->lock, flags);
1833		usb_hcd_unlink_urb_from_ep(hcd, urb);
1834		spin_unlock_irqrestore(&musb->lock, flags);
1835		return -ENOMEM;
1836	}
1837
1838	qh->hep = hep;
1839	qh->dev = urb->dev;
1840	INIT_LIST_HEAD(&qh->ring);
1841	qh->is_ready = 1;
1842
1843	qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
1844
1845	/* no high bandwidth support yet */
1846	if (qh->maxpacket & ~0x7ff) {
1847		ret = -EMSGSIZE;
1848		goto done;
1849	}
1850
1851	qh->epnum = usb_endpoint_num(epd);
1852	qh->type = usb_endpoint_type(epd);
1853
1854	/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1855	qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
1856
1857	/* precompute rxtype/txtype/type0 register */
1858	type_reg = (qh->type << 4) | qh->epnum;
1859	switch (urb->dev->speed) {
1860	case USB_SPEED_LOW:
1861		type_reg |= 0xc0;
1862		break;
1863	case USB_SPEED_FULL:
1864		type_reg |= 0x80;
1865		break;
1866	default:
1867		type_reg |= 0x40;
1868	}
1869	qh->type_reg = type_reg;
1870
1871	/* precompute rxinterval/txinterval register */
1872	interval = min((u8)16, epd->bInterval);	/* log encoding */
1873	switch (qh->type) {
1874	case USB_ENDPOINT_XFER_INT:
1875		/* fullspeed uses linear encoding */
1876		if (USB_SPEED_FULL == urb->dev->speed) {
1877			interval = epd->bInterval;
1878			if (!interval)
1879				interval = 1;
1880		}
1881		/* FALLTHROUGH */
1882	case USB_ENDPOINT_XFER_ISOC:
1883		/* iso always uses log encoding */
1884		break;
1885	default:
1886		/* REVISIT we actually want to use NAK limits, hinting to the
1887		 * transfer scheduling logic to try some other qh, e.g. try
1888		 * for 2 msec first:
1889		 *
1890		 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
1891		 *
1892		 * The downside of disabling this is that transfer scheduling
1893		 * gets VERY unfair for nonperiodic transfers; a misbehaving
1894		 * peripheral could make that hurt.  Or for reads, one that's
1895		 * perfectly normal:  network and other drivers keep reads
1896		 * posted at all times, having one pending for a week should
1897		 * be perfectly safe.
1898		 *
1899		 * The upside of disabling it is avoidng transfer scheduling
1900		 * code to put this aside for while.
1901		 */
1902		interval = 0;
1903	}
1904	qh->intv_reg = interval;
1905
1906	/* precompute addressing for external hub/tt ports */
1907	if (musb->is_multipoint) {
1908		struct usb_device	*parent = urb->dev->parent;
1909
1910		if (parent != hcd->self.root_hub) {
1911			qh->h_addr_reg = (u8) parent->devnum;
1912
1913			/* set up tt info if needed */
1914			if (urb->dev->tt) {
1915				qh->h_port_reg = (u8) urb->dev->ttport;
1916				if (urb->dev->tt->hub)
1917					qh->h_addr_reg =
1918						(u8) urb->dev->tt->hub->devnum;
1919				if (urb->dev->tt->multi)
1920					qh->h_addr_reg |= 0x80;
1921			}
1922		}
1923	}
1924
1925	/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
1926	 * until we get real dma queues (with an entry for each urb/buffer),
1927	 * we only have work to do in the former case.
1928	 */
1929	spin_lock_irqsave(&musb->lock, flags);
1930	if (hep->hcpriv) {
1931		/* some concurrent activity submitted another urb to hep...
1932		 * odd, rare, error prone, but legal.
1933		 */
1934		kfree(qh);
1935		ret = 0;
1936	} else
1937		ret = musb_schedule(musb, qh,
1938				epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
1939
1940	if (ret == 0) {
1941		urb->hcpriv = qh;
1942		/* FIXME set urb->start_frame for iso/intr, it's tested in
1943		 * musb_start_urb(), but otherwise only konicawc cares ...
1944		 */
1945	}
1946	spin_unlock_irqrestore(&musb->lock, flags);
1947
1948done:
1949	if (ret != 0) {
1950		spin_lock_irqsave(&musb->lock, flags);
1951		usb_hcd_unlink_urb_from_ep(hcd, urb);
1952		spin_unlock_irqrestore(&musb->lock, flags);
1953		kfree(qh);
1954	}
1955	return ret;
1956}
1957
1958
1959/*
1960 * abort a transfer that's at the head of a hardware queue.
1961 * called with controller locked, irqs blocked
1962 * that hardware queue advances to the next transfer, unless prevented
1963 */
1964static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
1965{
1966	struct musb_hw_ep	*ep = qh->hw_ep;
1967	void __iomem		*epio = ep->regs;
1968	unsigned		hw_end = ep->epnum;
1969	void __iomem		*regs = ep->musb->mregs;
1970	u16			csr;
1971	int			status = 0;
1972
1973	musb_ep_select(regs, hw_end);
1974
1975	if (is_dma_capable()) {
1976		struct dma_channel	*dma;
1977
1978		dma = is_in ? ep->rx_channel : ep->tx_channel;
1979		if (dma) {
1980			status = ep->musb->dma_controller->channel_abort(dma);
1981			DBG(status ? 1 : 3,
1982				"abort %cX%d DMA for urb %p --> %d\n",
1983				is_in ? 'R' : 'T', ep->epnum,
1984				urb, status);
1985			urb->actual_length += dma->actual_len;
1986		}
1987	}
1988
1989	/* turn off DMA requests, discard state, stop polling ... */
1990	if (is_in) {
1991		/* giveback saves bulk toggle */
1992		csr = musb_h_flush_rxfifo(ep, 0);
1993
1994		/* REVISIT we still get an irq; should likely clear the
1995		 * endpoint's irq status here to avoid bogus irqs.
1996		 * clearing that status is platform-specific...
1997		 */
1998	} else {
1999		musb_h_tx_flush_fifo(ep);
2000		csr = musb_readw(epio, MUSB_TXCSR);
2001		csr &= ~(MUSB_TXCSR_AUTOSET
2002			| MUSB_TXCSR_DMAENAB
2003			| MUSB_TXCSR_H_RXSTALL
2004			| MUSB_TXCSR_H_NAKTIMEOUT
2005			| MUSB_TXCSR_H_ERROR
2006			| MUSB_TXCSR_TXPKTRDY);
2007		musb_writew(epio, MUSB_TXCSR, csr);
2008		/* REVISIT may need to clear FLUSHFIFO ... */
2009		musb_writew(epio, MUSB_TXCSR, csr);
2010		/* flush cpu writebuffer */
2011		csr = musb_readw(epio, MUSB_TXCSR);
2012	}
2013	if (status == 0)
2014		musb_advance_schedule(ep->musb, urb, ep, is_in);
2015	return status;
2016}
2017
2018static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2019{
2020	struct musb		*musb = hcd_to_musb(hcd);
2021	struct musb_qh		*qh;
2022	struct list_head	*sched;
2023	unsigned long		flags;
2024	int			ret;
2025
2026	DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
2027			usb_pipedevice(urb->pipe),
2028			usb_pipeendpoint(urb->pipe),
2029			usb_pipein(urb->pipe) ? "in" : "out");
2030
2031	spin_lock_irqsave(&musb->lock, flags);
2032	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2033	if (ret)
2034		goto done;
2035
2036	qh = urb->hcpriv;
2037	if (!qh)
2038		goto done;
2039
2040	/* Any URB not actively programmed into endpoint hardware can be
2041	 * immediately given back; that's any URB not at the head of an
2042	 * endpoint queue, unless someday we get real DMA queues.  And even
2043	 * if it's at the head, it might not be known to the hardware...
2044	 *
2045	 * Otherwise abort current transfer, pending dma, etc.; urb->status
2046	 * has already been updated.  This is a synchronous abort; it'd be
2047	 * OK to hold off until after some IRQ, though.
2048	 */
2049	if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
2050		ret = -EINPROGRESS;
2051	else {
2052		switch (qh->type) {
2053		case USB_ENDPOINT_XFER_CONTROL:
2054			sched = &musb->control;
2055			break;
2056		case USB_ENDPOINT_XFER_BULK:
2057			if (qh->mux == 1) {
2058				if (usb_pipein(urb->pipe))
2059					sched = &musb->in_bulk;
2060				else
2061					sched = &musb->out_bulk;
2062				break;
2063			}
2064		default:
2065			/* REVISIT when we get a schedule tree, periodic
2066			 * transfers won't always be at the head of a
2067			 * singleton queue...
2068			 */
2069			sched = NULL;
2070			break;
2071		}
2072	}
2073
2074	/* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
2075	if (ret < 0 || (sched && qh != first_qh(sched))) {
2076		int	ready = qh->is_ready;
2077
2078		ret = 0;
2079		qh->is_ready = 0;
2080		__musb_giveback(musb, urb, 0);
2081		qh->is_ready = ready;
2082
2083		/* If nothing else (usually musb_giveback) is using it
2084		 * and its URB list has emptied, recycle this qh.
2085		 */
2086		if (ready && list_empty(&qh->hep->urb_list)) {
2087			qh->hep->hcpriv = NULL;
2088			list_del(&qh->ring);
2089			kfree(qh);
2090		}
2091	} else
2092		ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2093done:
2094	spin_unlock_irqrestore(&musb->lock, flags);
2095	return ret;
2096}
2097
2098/* disable an endpoint */
2099static void
2100musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2101{
2102	u8			epnum = hep->desc.bEndpointAddress;
2103	unsigned long		flags;
2104	struct musb		*musb = hcd_to_musb(hcd);
2105	u8			is_in = epnum & USB_DIR_IN;
2106	struct musb_qh		*qh = hep->hcpriv;
2107	struct urb		*urb, *tmp;
2108	struct list_head	*sched;
2109
2110	if (!qh)
2111		return;
2112
2113	spin_lock_irqsave(&musb->lock, flags);
2114
2115	switch (qh->type) {
2116	case USB_ENDPOINT_XFER_CONTROL:
2117		sched = &musb->control;
2118		break;
2119	case USB_ENDPOINT_XFER_BULK:
2120		if (qh->mux == 1) {
2121			if (is_in)
2122				sched = &musb->in_bulk;
2123			else
2124				sched = &musb->out_bulk;
2125			break;
2126		}
2127	default:
2128		/* REVISIT when we get a schedule tree, periodic transfers
2129		 * won't always be at the head of a singleton queue...
2130		 */
2131		sched = NULL;
2132		break;
2133	}
2134
2135	/* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
2136
2137	/* kick first urb off the hardware, if needed */
2138	qh->is_ready = 0;
2139	if (!sched || qh == first_qh(sched)) {
2140		urb = next_urb(qh);
2141
2142		/* make software (then hardware) stop ASAP */
2143		if (!urb->unlinked)
2144			urb->status = -ESHUTDOWN;
2145
2146		/* cleanup */
2147		musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2148	} else
2149		urb = NULL;
2150
2151	/* then just nuke all the others */
2152	list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
2153		musb_giveback(qh, urb, -ESHUTDOWN);
2154
2155	spin_unlock_irqrestore(&musb->lock, flags);
2156}
2157
2158static int musb_h_get_frame_number(struct usb_hcd *hcd)
2159{
2160	struct musb	*musb = hcd_to_musb(hcd);
2161
2162	return musb_readw(musb->mregs, MUSB_FRAME);
2163}
2164
2165static int musb_h_start(struct usb_hcd *hcd)
2166{
2167	struct musb	*musb = hcd_to_musb(hcd);
2168
2169	/* NOTE: musb_start() is called when the hub driver turns
2170	 * on port power, or when (OTG) peripheral starts.
2171	 */
2172	hcd->state = HC_STATE_RUNNING;
2173	musb->port1_status = 0;
2174	return 0;
2175}
2176
2177static void musb_h_stop(struct usb_hcd *hcd)
2178{
2179	musb_stop(hcd_to_musb(hcd));
2180	hcd->state = HC_STATE_HALT;
2181}
2182
2183static int musb_bus_suspend(struct usb_hcd *hcd)
2184{
2185	struct musb	*musb = hcd_to_musb(hcd);
2186
2187	if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
2188		return 0;
2189
2190	if (is_host_active(musb) && musb->is_active) {
2191		WARNING("trying to suspend as %s is_active=%i\n",
2192			otg_state_string(musb), musb->is_active);
2193		return -EBUSY;
2194	} else
2195		return 0;
2196}
2197
2198static int musb_bus_resume(struct usb_hcd *hcd)
2199{
2200	/* resuming child port does the work */
2201	return 0;
2202}
2203
2204const struct hc_driver musb_hc_driver = {
2205	.description		= "musb-hcd",
2206	.product_desc		= "MUSB HDRC host driver",
2207	.hcd_priv_size		= sizeof(struct musb),
2208	.flags			= HCD_USB2 | HCD_MEMORY,
2209
2210	/* not using irq handler or reset hooks from usbcore, since
2211	 * those must be shared with peripheral code for OTG configs
2212	 */
2213
2214	.start			= musb_h_start,
2215	.stop			= musb_h_stop,
2216
2217	.get_frame_number	= musb_h_get_frame_number,
2218
2219	.urb_enqueue		= musb_urb_enqueue,
2220	.urb_dequeue		= musb_urb_dequeue,
2221	.endpoint_disable	= musb_h_disable,
2222
2223	.hub_status_data	= musb_hub_status_data,
2224	.hub_control		= musb_hub_control,
2225	.bus_suspend		= musb_bus_suspend,
2226	.bus_resume		= musb_bus_resume,
2227	/* .start_port_reset	= NULL, */
2228	/* .hub_irq_enable	= NULL, */
2229};
2230