musb_host.c revision 5d67a851bca63d30cde0474bfc4fc4f03db1a1b8
1/* 2 * MUSB OTG driver host support 3 * 4 * Copyright 2005 Mentor Graphics Corporation 5 * Copyright (C) 2005-2006 by Texas Instruments 6 * Copyright (C) 2006-2007 Nokia Corporation 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * version 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 20 * 02110-1301 USA 21 * 22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35#include <linux/module.h> 36#include <linux/kernel.h> 37#include <linux/delay.h> 38#include <linux/sched.h> 39#include <linux/slab.h> 40#include <linux/errno.h> 41#include <linux/init.h> 42#include <linux/list.h> 43 44#include "musb_core.h" 45#include "musb_host.h" 46 47 48/* MUSB HOST status 22-mar-2006 49 * 50 * - There's still lots of partial code duplication for fault paths, so 51 * they aren't handled as consistently as they need to be. 52 * 53 * - PIO mostly behaved when last tested. 54 * + including ep0, with all usbtest cases 9, 10 55 * + usbtest 14 (ep0out) doesn't seem to run at all 56 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest 57 * configurations, but otherwise double buffering passes basic tests. 58 * + for 2.6.N, for N > ~10, needs API changes for hcd framework. 59 * 60 * - DMA (CPPI) ... partially behaves, not currently recommended 61 * + about 1/15 the speed of typical EHCI implementations (PCI) 62 * + RX, all too often reqpkt seems to misbehave after tx 63 * + TX, no known issues (other than evident silicon issue) 64 * 65 * - DMA (Mentor/OMAP) ...has at least toggle update problems 66 * 67 * - Still no traffic scheduling code to make NAKing for bulk or control 68 * transfers unable to starve other requests; or to make efficient use 69 * of hardware with periodic transfers. (Note that network drivers 70 * commonly post bulk reads that stay pending for a long time; these 71 * would make very visible trouble.) 72 * 73 * - Not tested with HNP, but some SRP paths seem to behave. 74 * 75 * NOTE 24-August-2006: 76 * 77 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an 78 * extra endpoint for periodic use enabling hub + keybd + mouse. That 79 * mostly works, except that with "usbnet" it's easy to trigger cases 80 * with "ping" where RX loses. (a) ping to davinci, even "ping -f", 81 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses 82 * although ARP RX wins. (That test was done with a full speed link.) 83 */ 84 85 86/* 87 * NOTE on endpoint usage: 88 * 89 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN 90 * and OUT endpoints ... hardware is dedicated for those "async" queue(s). 91 * 92 * (Yes, bulk _could_ use more of the endpoints than that, and would even 93 * benefit from it ... one remote device may easily be NAKing while others 94 * need to perform transfers in that same direction. The same thing could 95 * be done in software though, assuming dma cooperates.) 96 * 97 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. 98 * So far that scheduling is both dumb and optimistic: the endpoint will be 99 * "claimed" until its software queue is no longer refilled. No multiplexing 100 * of transfers between endpoints, or anything clever. 101 */ 102 103 104static void musb_ep_program(struct musb *musb, u8 epnum, 105 struct urb *urb, unsigned int nOut, 106 u8 *buf, u32 len); 107 108/* 109 * Clear TX fifo. Needed to avoid BABBLE errors. 110 */ 111static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) 112{ 113 void __iomem *epio = ep->regs; 114 u16 csr; 115 u16 lastcsr = 0; 116 int retries = 1000; 117 118 csr = musb_readw(epio, MUSB_TXCSR); 119 while (csr & MUSB_TXCSR_FIFONOTEMPTY) { 120 if (csr != lastcsr) 121 DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr); 122 lastcsr = csr; 123 csr |= MUSB_TXCSR_FLUSHFIFO; 124 musb_writew(epio, MUSB_TXCSR, csr); 125 csr = musb_readw(epio, MUSB_TXCSR); 126 if (WARN(retries-- < 1, 127 "Could not flush host TX%d fifo: csr: %04x\n", 128 ep->epnum, csr)) 129 return; 130 mdelay(1); 131 } 132} 133 134/* 135 * Start transmit. Caller is responsible for locking shared resources. 136 * musb must be locked. 137 */ 138static inline void musb_h_tx_start(struct musb_hw_ep *ep) 139{ 140 u16 txcsr; 141 142 /* NOTE: no locks here; caller should lock and select EP */ 143 if (ep->epnum) { 144 txcsr = musb_readw(ep->regs, MUSB_TXCSR); 145 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; 146 musb_writew(ep->regs, MUSB_TXCSR, txcsr); 147 } else { 148 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; 149 musb_writew(ep->regs, MUSB_CSR0, txcsr); 150 } 151 152} 153 154static inline void cppi_host_txdma_start(struct musb_hw_ep *ep) 155{ 156 u16 txcsr; 157 158 /* NOTE: no locks here; caller should lock and select EP */ 159 txcsr = musb_readw(ep->regs, MUSB_TXCSR); 160 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; 161 musb_writew(ep->regs, MUSB_TXCSR, txcsr); 162} 163 164/* 165 * Start the URB at the front of an endpoint's queue 166 * end must be claimed from the caller. 167 * 168 * Context: controller locked, irqs blocked 169 */ 170static void 171musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) 172{ 173 u16 frame; 174 u32 len; 175 void *buf; 176 void __iomem *mbase = musb->mregs; 177 struct urb *urb = next_urb(qh); 178 struct musb_hw_ep *hw_ep = qh->hw_ep; 179 unsigned pipe = urb->pipe; 180 u8 address = usb_pipedevice(pipe); 181 int epnum = hw_ep->epnum; 182 183 /* initialize software qh state */ 184 qh->offset = 0; 185 qh->segsize = 0; 186 187 /* gather right source of data */ 188 switch (qh->type) { 189 case USB_ENDPOINT_XFER_CONTROL: 190 /* control transfers always start with SETUP */ 191 is_in = 0; 192 hw_ep->out_qh = qh; 193 musb->ep0_stage = MUSB_EP0_START; 194 buf = urb->setup_packet; 195 len = 8; 196 break; 197 case USB_ENDPOINT_XFER_ISOC: 198 qh->iso_idx = 0; 199 qh->frame = 0; 200 buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset; 201 len = urb->iso_frame_desc[0].length; 202 break; 203 default: /* bulk, interrupt */ 204 buf = urb->transfer_buffer; 205 len = urb->transfer_buffer_length; 206 } 207 208 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", 209 qh, urb, address, qh->epnum, 210 is_in ? "in" : "out", 211 ({char *s; switch (qh->type) { 212 case USB_ENDPOINT_XFER_CONTROL: s = ""; break; 213 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; 214 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; 215 default: s = "-intr"; break; 216 }; s; }), 217 epnum, buf, len); 218 219 /* Configure endpoint */ 220 if (is_in || hw_ep->is_shared_fifo) 221 hw_ep->in_qh = qh; 222 else 223 hw_ep->out_qh = qh; 224 musb_ep_program(musb, epnum, urb, !is_in, buf, len); 225 226 /* transmit may have more work: start it when it is time */ 227 if (is_in) 228 return; 229 230 /* determine if the time is right for a periodic transfer */ 231 switch (qh->type) { 232 case USB_ENDPOINT_XFER_ISOC: 233 case USB_ENDPOINT_XFER_INT: 234 DBG(3, "check whether there's still time for periodic Tx\n"); 235 qh->iso_idx = 0; 236 frame = musb_readw(mbase, MUSB_FRAME); 237 /* FIXME this doesn't implement that scheduling policy ... 238 * or handle framecounter wrapping 239 */ 240 if ((urb->transfer_flags & URB_ISO_ASAP) 241 || (frame >= urb->start_frame)) { 242 /* REVISIT the SOF irq handler shouldn't duplicate 243 * this code; and we don't init urb->start_frame... 244 */ 245 qh->frame = 0; 246 goto start; 247 } else { 248 qh->frame = urb->start_frame; 249 /* enable SOF interrupt so we can count down */ 250 DBG(1, "SOF for %d\n", epnum); 251#if 1 /* ifndef CONFIG_ARCH_DAVINCI */ 252 musb_writeb(mbase, MUSB_INTRUSBE, 0xff); 253#endif 254 } 255 break; 256 default: 257start: 258 DBG(4, "Start TX%d %s\n", epnum, 259 hw_ep->tx_channel ? "dma" : "pio"); 260 261 if (!hw_ep->tx_channel) 262 musb_h_tx_start(hw_ep); 263 else if (is_cppi_enabled() || tusb_dma_omap()) 264 cppi_host_txdma_start(hw_ep); 265 } 266} 267 268/* caller owns controller lock, irqs are blocked */ 269static void 270__musb_giveback(struct musb *musb, struct urb *urb, int status) 271__releases(musb->lock) 272__acquires(musb->lock) 273{ 274 DBG(({ int level; switch (status) { 275 case 0: 276 level = 4; 277 break; 278 /* common/boring faults */ 279 case -EREMOTEIO: 280 case -ESHUTDOWN: 281 case -ECONNRESET: 282 case -EPIPE: 283 level = 3; 284 break; 285 default: 286 level = 2; 287 break; 288 }; level; }), 289 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n", 290 urb, urb->complete, status, 291 usb_pipedevice(urb->pipe), 292 usb_pipeendpoint(urb->pipe), 293 usb_pipein(urb->pipe) ? "in" : "out", 294 urb->actual_length, urb->transfer_buffer_length 295 ); 296 297 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); 298 spin_unlock(&musb->lock); 299 usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); 300 spin_lock(&musb->lock); 301} 302 303/* for bulk/interrupt endpoints only */ 304static inline void 305musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb) 306{ 307 struct usb_device *udev = urb->dev; 308 u16 csr; 309 void __iomem *epio = ep->regs; 310 struct musb_qh *qh; 311 312 /* FIXME: the current Mentor DMA code seems to have 313 * problems getting toggle correct. 314 */ 315 316 if (is_in || ep->is_shared_fifo) 317 qh = ep->in_qh; 318 else 319 qh = ep->out_qh; 320 321 if (!is_in) { 322 csr = musb_readw(epio, MUSB_TXCSR); 323 usb_settoggle(udev, qh->epnum, 1, 324 (csr & MUSB_TXCSR_H_DATATOGGLE) 325 ? 1 : 0); 326 } else { 327 csr = musb_readw(epio, MUSB_RXCSR); 328 usb_settoggle(udev, qh->epnum, 0, 329 (csr & MUSB_RXCSR_H_DATATOGGLE) 330 ? 1 : 0); 331 } 332} 333 334/* caller owns controller lock, irqs are blocked */ 335static struct musb_qh * 336musb_giveback(struct musb_qh *qh, struct urb *urb, int status) 337{ 338 struct musb_hw_ep *ep = qh->hw_ep; 339 struct musb *musb = ep->musb; 340 int is_in = usb_pipein(urb->pipe); 341 int ready = qh->is_ready; 342 343 /* save toggle eagerly, for paranoia */ 344 switch (qh->type) { 345 case USB_ENDPOINT_XFER_BULK: 346 case USB_ENDPOINT_XFER_INT: 347 musb_save_toggle(ep, is_in, urb); 348 break; 349 case USB_ENDPOINT_XFER_ISOC: 350 if (status == 0 && urb->error_count) 351 status = -EXDEV; 352 break; 353 } 354 355 qh->is_ready = 0; 356 __musb_giveback(musb, urb, status); 357 qh->is_ready = ready; 358 359 /* reclaim resources (and bandwidth) ASAP; deschedule it, and 360 * invalidate qh as soon as list_empty(&hep->urb_list) 361 */ 362 if (list_empty(&qh->hep->urb_list)) { 363 struct list_head *head; 364 365 if (is_in) 366 ep->rx_reinit = 1; 367 else 368 ep->tx_reinit = 1; 369 370 /* clobber old pointers to this qh */ 371 if (is_in || ep->is_shared_fifo) 372 ep->in_qh = NULL; 373 else 374 ep->out_qh = NULL; 375 qh->hep->hcpriv = NULL; 376 377 switch (qh->type) { 378 379 case USB_ENDPOINT_XFER_CONTROL: 380 case USB_ENDPOINT_XFER_BULK: 381 /* fifo policy for these lists, except that NAKing 382 * should rotate a qh to the end (for fairness). 383 */ 384 if (qh->mux == 1) { 385 head = qh->ring.prev; 386 list_del(&qh->ring); 387 kfree(qh); 388 qh = first_qh(head); 389 break; 390 } 391 392 case USB_ENDPOINT_XFER_ISOC: 393 case USB_ENDPOINT_XFER_INT: 394 /* this is where periodic bandwidth should be 395 * de-allocated if it's tracked and allocated; 396 * and where we'd update the schedule tree... 397 */ 398 kfree(qh); 399 qh = NULL; 400 break; 401 } 402 } 403 return qh; 404} 405 406/* 407 * Advance this hardware endpoint's queue, completing the specified urb and 408 * advancing to either the next urb queued to that qh, or else invalidating 409 * that qh and advancing to the next qh scheduled after the current one. 410 * 411 * Context: caller owns controller lock, irqs are blocked 412 */ 413static void 414musb_advance_schedule(struct musb *musb, struct urb *urb, 415 struct musb_hw_ep *hw_ep, int is_in) 416{ 417 struct musb_qh *qh; 418 419 if (is_in || hw_ep->is_shared_fifo) 420 qh = hw_ep->in_qh; 421 else 422 qh = hw_ep->out_qh; 423 424 if (urb->status == -EINPROGRESS) 425 qh = musb_giveback(qh, urb, 0); 426 else 427 qh = musb_giveback(qh, urb, urb->status); 428 429 if (qh != NULL && qh->is_ready) { 430 DBG(4, "... next ep%d %cX urb %p\n", 431 hw_ep->epnum, is_in ? 'R' : 'T', 432 next_urb(qh)); 433 musb_start_urb(musb, is_in, qh); 434 } 435} 436 437static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) 438{ 439 /* we don't want fifo to fill itself again; 440 * ignore dma (various models), 441 * leave toggle alone (may not have been saved yet) 442 */ 443 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; 444 csr &= ~(MUSB_RXCSR_H_REQPKT 445 | MUSB_RXCSR_H_AUTOREQ 446 | MUSB_RXCSR_AUTOCLEAR); 447 448 /* write 2x to allow double buffering */ 449 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 450 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 451 452 /* flush writebuffer */ 453 return musb_readw(hw_ep->regs, MUSB_RXCSR); 454} 455 456/* 457 * PIO RX for a packet (or part of it). 458 */ 459static bool 460musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) 461{ 462 u16 rx_count; 463 u8 *buf; 464 u16 csr; 465 bool done = false; 466 u32 length; 467 int do_flush = 0; 468 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 469 void __iomem *epio = hw_ep->regs; 470 struct musb_qh *qh = hw_ep->in_qh; 471 int pipe = urb->pipe; 472 void *buffer = urb->transfer_buffer; 473 474 /* musb_ep_select(mbase, epnum); */ 475 rx_count = musb_readw(epio, MUSB_RXCOUNT); 476 DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, 477 urb->transfer_buffer, qh->offset, 478 urb->transfer_buffer_length); 479 480 /* unload FIFO */ 481 if (usb_pipeisoc(pipe)) { 482 int status = 0; 483 struct usb_iso_packet_descriptor *d; 484 485 if (iso_err) { 486 status = -EILSEQ; 487 urb->error_count++; 488 } 489 490 d = urb->iso_frame_desc + qh->iso_idx; 491 buf = buffer + d->offset; 492 length = d->length; 493 if (rx_count > length) { 494 if (status == 0) { 495 status = -EOVERFLOW; 496 urb->error_count++; 497 } 498 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); 499 do_flush = 1; 500 } else 501 length = rx_count; 502 urb->actual_length += length; 503 d->actual_length = length; 504 505 d->status = status; 506 507 /* see if we are done */ 508 done = (++qh->iso_idx >= urb->number_of_packets); 509 } else { 510 /* non-isoch */ 511 buf = buffer + qh->offset; 512 length = urb->transfer_buffer_length - qh->offset; 513 if (rx_count > length) { 514 if (urb->status == -EINPROGRESS) 515 urb->status = -EOVERFLOW; 516 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); 517 do_flush = 1; 518 } else 519 length = rx_count; 520 urb->actual_length += length; 521 qh->offset += length; 522 523 /* see if we are done */ 524 done = (urb->actual_length == urb->transfer_buffer_length) 525 || (rx_count < qh->maxpacket) 526 || (urb->status != -EINPROGRESS); 527 if (done 528 && (urb->status == -EINPROGRESS) 529 && (urb->transfer_flags & URB_SHORT_NOT_OK) 530 && (urb->actual_length 531 < urb->transfer_buffer_length)) 532 urb->status = -EREMOTEIO; 533 } 534 535 musb_read_fifo(hw_ep, length, buf); 536 537 csr = musb_readw(epio, MUSB_RXCSR); 538 csr |= MUSB_RXCSR_H_WZC_BITS; 539 if (unlikely(do_flush)) 540 musb_h_flush_rxfifo(hw_ep, csr); 541 else { 542 /* REVISIT this assumes AUTOCLEAR is never set */ 543 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); 544 if (!done) 545 csr |= MUSB_RXCSR_H_REQPKT; 546 musb_writew(epio, MUSB_RXCSR, csr); 547 } 548 549 return done; 550} 551 552/* we don't always need to reinit a given side of an endpoint... 553 * when we do, use tx/rx reinit routine and then construct a new CSR 554 * to address data toggle, NYET, and DMA or PIO. 555 * 556 * it's possible that driver bugs (especially for DMA) or aborting a 557 * transfer might have left the endpoint busier than it should be. 558 * the busy/not-empty tests are basically paranoia. 559 */ 560static void 561musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) 562{ 563 u16 csr; 564 565 /* NOTE: we know the "rx" fifo reinit never triggers for ep0. 566 * That always uses tx_reinit since ep0 repurposes TX register 567 * offsets; the initial SETUP packet is also a kind of OUT. 568 */ 569 570 /* if programmed for Tx, put it in RX mode */ 571 if (ep->is_shared_fifo) { 572 csr = musb_readw(ep->regs, MUSB_TXCSR); 573 if (csr & MUSB_TXCSR_MODE) { 574 musb_h_tx_flush_fifo(ep); 575 musb_writew(ep->regs, MUSB_TXCSR, 576 MUSB_TXCSR_FRCDATATOG); 577 } 578 /* clear mode (and everything else) to enable Rx */ 579 musb_writew(ep->regs, MUSB_TXCSR, 0); 580 581 /* scrub all previous state, clearing toggle */ 582 } else { 583 csr = musb_readw(ep->regs, MUSB_RXCSR); 584 if (csr & MUSB_RXCSR_RXPKTRDY) 585 WARNING("rx%d, packet/%d ready?\n", ep->epnum, 586 musb_readw(ep->regs, MUSB_RXCOUNT)); 587 588 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); 589 } 590 591 /* target addr and (for multipoint) hub addr/port */ 592 if (musb->is_multipoint) { 593 musb_write_rxfunaddr(ep->target_regs, qh->addr_reg); 594 musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg); 595 musb_write_rxhubport(ep->target_regs, qh->h_port_reg); 596 597 } else 598 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); 599 600 /* protocol/endpoint, interval/NAKlimit, i/o size */ 601 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); 602 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); 603 /* NOTE: bulk combining rewrites high bits of maxpacket */ 604 musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket); 605 606 ep->rx_reinit = 0; 607} 608 609 610/* 611 * Program an HDRC endpoint as per the given URB 612 * Context: irqs blocked, controller lock held 613 */ 614static void musb_ep_program(struct musb *musb, u8 epnum, 615 struct urb *urb, unsigned int is_out, 616 u8 *buf, u32 len) 617{ 618 struct dma_controller *dma_controller; 619 struct dma_channel *dma_channel; 620 u8 dma_ok; 621 void __iomem *mbase = musb->mregs; 622 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 623 void __iomem *epio = hw_ep->regs; 624 struct musb_qh *qh; 625 u16 packet_sz; 626 627 if (!is_out || hw_ep->is_shared_fifo) 628 qh = hw_ep->in_qh; 629 else 630 qh = hw_ep->out_qh; 631 632 packet_sz = qh->maxpacket; 633 634 DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " 635 "h_addr%02x h_port%02x bytes %d\n", 636 is_out ? "-->" : "<--", 637 epnum, urb, urb->dev->speed, 638 qh->addr_reg, qh->epnum, is_out ? "out" : "in", 639 qh->h_addr_reg, qh->h_port_reg, 640 len); 641 642 musb_ep_select(mbase, epnum); 643 644 /* candidate for DMA? */ 645 dma_controller = musb->dma_controller; 646 if (is_dma_capable() && epnum && dma_controller) { 647 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; 648 if (!dma_channel) { 649 dma_channel = dma_controller->channel_alloc( 650 dma_controller, hw_ep, is_out); 651 if (is_out) 652 hw_ep->tx_channel = dma_channel; 653 else 654 hw_ep->rx_channel = dma_channel; 655 } 656 } else 657 dma_channel = NULL; 658 659 /* make sure we clear DMAEnab, autoSet bits from previous run */ 660 661 /* OUT/transmit/EP0 or IN/receive? */ 662 if (is_out) { 663 u16 csr; 664 u16 int_txe; 665 u16 load_count; 666 667 csr = musb_readw(epio, MUSB_TXCSR); 668 669 /* disable interrupt in case we flush */ 670 int_txe = musb_readw(mbase, MUSB_INTRTXE); 671 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); 672 673 /* general endpoint setup */ 674 if (epnum) { 675 /* ASSERT: TXCSR_DMAENAB was already cleared */ 676 677 /* flush all old state, set default */ 678 musb_h_tx_flush_fifo(hw_ep); 679 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT 680 | MUSB_TXCSR_DMAMODE 681 | MUSB_TXCSR_FRCDATATOG 682 | MUSB_TXCSR_H_RXSTALL 683 | MUSB_TXCSR_H_ERROR 684 | MUSB_TXCSR_TXPKTRDY 685 ); 686 csr |= MUSB_TXCSR_MODE; 687 688 if (usb_gettoggle(urb->dev, 689 qh->epnum, 1)) 690 csr |= MUSB_TXCSR_H_WR_DATATOGGLE 691 | MUSB_TXCSR_H_DATATOGGLE; 692 else 693 csr |= MUSB_TXCSR_CLRDATATOG; 694 695 /* twice in case of double packet buffering */ 696 musb_writew(epio, MUSB_TXCSR, csr); 697 /* REVISIT may need to clear FLUSHFIFO ... */ 698 musb_writew(epio, MUSB_TXCSR, csr); 699 csr = musb_readw(epio, MUSB_TXCSR); 700 } else { 701 /* endpoint 0: just flush */ 702 musb_writew(epio, MUSB_CSR0, 703 csr | MUSB_CSR0_FLUSHFIFO); 704 musb_writew(epio, MUSB_CSR0, 705 csr | MUSB_CSR0_FLUSHFIFO); 706 } 707 708 /* target addr and (for multipoint) hub addr/port */ 709 if (musb->is_multipoint) { 710 musb_write_txfunaddr(mbase, epnum, qh->addr_reg); 711 musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg); 712 musb_write_txhubport(mbase, epnum, qh->h_port_reg); 713/* FIXME if !epnum, do the same for RX ... */ 714 } else 715 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); 716 717 /* protocol/endpoint/interval/NAKlimit */ 718 if (epnum) { 719 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); 720 if (can_bulk_split(musb, qh->type)) 721 musb_writew(epio, MUSB_TXMAXP, 722 packet_sz 723 | ((hw_ep->max_packet_sz_tx / 724 packet_sz) - 1) << 11); 725 else 726 musb_writew(epio, MUSB_TXMAXP, 727 packet_sz); 728 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); 729 } else { 730 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); 731 if (musb->is_multipoint) 732 musb_writeb(epio, MUSB_TYPE0, 733 qh->type_reg); 734 } 735 736 if (can_bulk_split(musb, qh->type)) 737 load_count = min((u32) hw_ep->max_packet_sz_tx, 738 len); 739 else 740 load_count = min((u32) packet_sz, len); 741 742#ifdef CONFIG_USB_INVENTRA_DMA 743 if (dma_channel) { 744 745 /* clear previous state */ 746 csr = musb_readw(epio, MUSB_TXCSR); 747 csr &= ~(MUSB_TXCSR_AUTOSET 748 | MUSB_TXCSR_DMAMODE 749 | MUSB_TXCSR_DMAENAB); 750 csr |= MUSB_TXCSR_MODE; 751 musb_writew(epio, MUSB_TXCSR, 752 csr | MUSB_TXCSR_MODE); 753 754 qh->segsize = min(len, dma_channel->max_len); 755 756 if (qh->segsize <= packet_sz) 757 dma_channel->desired_mode = 0; 758 else 759 dma_channel->desired_mode = 1; 760 761 762 if (dma_channel->desired_mode == 0) { 763 csr &= ~(MUSB_TXCSR_AUTOSET 764 | MUSB_TXCSR_DMAMODE); 765 csr |= (MUSB_TXCSR_DMAENAB); 766 /* against programming guide */ 767 } else 768 csr |= (MUSB_TXCSR_AUTOSET 769 | MUSB_TXCSR_DMAENAB 770 | MUSB_TXCSR_DMAMODE); 771 772 musb_writew(epio, MUSB_TXCSR, csr); 773 774 dma_ok = dma_controller->channel_program( 775 dma_channel, packet_sz, 776 dma_channel->desired_mode, 777 urb->transfer_dma, 778 qh->segsize); 779 if (dma_ok) { 780 load_count = 0; 781 } else { 782 dma_controller->channel_release(dma_channel); 783 if (is_out) 784 hw_ep->tx_channel = NULL; 785 else 786 hw_ep->rx_channel = NULL; 787 dma_channel = NULL; 788 } 789 } 790#endif 791 792 /* candidate for DMA */ 793 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { 794 795 /* program endpoint CSRs first, then setup DMA. 796 * assume CPPI setup succeeds. 797 * defer enabling dma. 798 */ 799 csr = musb_readw(epio, MUSB_TXCSR); 800 csr &= ~(MUSB_TXCSR_AUTOSET 801 | MUSB_TXCSR_DMAMODE 802 | MUSB_TXCSR_DMAENAB); 803 csr |= MUSB_TXCSR_MODE; 804 musb_writew(epio, MUSB_TXCSR, 805 csr | MUSB_TXCSR_MODE); 806 807 dma_channel->actual_len = 0L; 808 qh->segsize = len; 809 810 /* TX uses "rndis" mode automatically, but needs help 811 * to identify the zero-length-final-packet case. 812 */ 813 dma_ok = dma_controller->channel_program( 814 dma_channel, packet_sz, 815 (urb->transfer_flags 816 & URB_ZERO_PACKET) 817 == URB_ZERO_PACKET, 818 urb->transfer_dma, 819 qh->segsize); 820 if (dma_ok) { 821 load_count = 0; 822 } else { 823 dma_controller->channel_release(dma_channel); 824 hw_ep->tx_channel = NULL; 825 dma_channel = NULL; 826 827 /* REVISIT there's an error path here that 828 * needs handling: can't do dma, but 829 * there's no pio buffer address... 830 */ 831 } 832 } 833 834 if (load_count) { 835 /* ASSERT: TXCSR_DMAENAB was already cleared */ 836 837 /* PIO to load FIFO */ 838 qh->segsize = load_count; 839 musb_write_fifo(hw_ep, load_count, buf); 840 csr = musb_readw(epio, MUSB_TXCSR); 841 csr &= ~(MUSB_TXCSR_DMAENAB 842 | MUSB_TXCSR_DMAMODE 843 | MUSB_TXCSR_AUTOSET); 844 /* write CSR */ 845 csr |= MUSB_TXCSR_MODE; 846 847 if (epnum) 848 musb_writew(epio, MUSB_TXCSR, csr); 849 } 850 851 /* re-enable interrupt */ 852 musb_writew(mbase, MUSB_INTRTXE, int_txe); 853 854 /* IN/receive */ 855 } else { 856 u16 csr; 857 858 if (hw_ep->rx_reinit) { 859 musb_rx_reinit(musb, qh, hw_ep); 860 861 /* init new state: toggle and NYET, maybe DMA later */ 862 if (usb_gettoggle(urb->dev, qh->epnum, 0)) 863 csr = MUSB_RXCSR_H_WR_DATATOGGLE 864 | MUSB_RXCSR_H_DATATOGGLE; 865 else 866 csr = 0; 867 if (qh->type == USB_ENDPOINT_XFER_INT) 868 csr |= MUSB_RXCSR_DISNYET; 869 870 } else { 871 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 872 873 if (csr & (MUSB_RXCSR_RXPKTRDY 874 | MUSB_RXCSR_DMAENAB 875 | MUSB_RXCSR_H_REQPKT)) 876 ERR("broken !rx_reinit, ep%d csr %04x\n", 877 hw_ep->epnum, csr); 878 879 /* scrub any stale state, leaving toggle alone */ 880 csr &= MUSB_RXCSR_DISNYET; 881 } 882 883 /* kick things off */ 884 885 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { 886 /* candidate for DMA */ 887 if (dma_channel) { 888 dma_channel->actual_len = 0L; 889 qh->segsize = len; 890 891 /* AUTOREQ is in a DMA register */ 892 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 893 csr = musb_readw(hw_ep->regs, 894 MUSB_RXCSR); 895 896 /* unless caller treats short rx transfers as 897 * errors, we dare not queue multiple transfers. 898 */ 899 dma_ok = dma_controller->channel_program( 900 dma_channel, packet_sz, 901 !(urb->transfer_flags 902 & URB_SHORT_NOT_OK), 903 urb->transfer_dma, 904 qh->segsize); 905 if (!dma_ok) { 906 dma_controller->channel_release( 907 dma_channel); 908 hw_ep->rx_channel = NULL; 909 dma_channel = NULL; 910 } else 911 csr |= MUSB_RXCSR_DMAENAB; 912 } 913 } 914 915 csr |= MUSB_RXCSR_H_REQPKT; 916 DBG(7, "RXCSR%d := %04x\n", epnum, csr); 917 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 918 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 919 } 920} 921 922 923/* 924 * Service the default endpoint (ep0) as host. 925 * Return true until it's time to start the status stage. 926 */ 927static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) 928{ 929 bool more = false; 930 u8 *fifo_dest = NULL; 931 u16 fifo_count = 0; 932 struct musb_hw_ep *hw_ep = musb->control_ep; 933 struct musb_qh *qh = hw_ep->in_qh; 934 struct usb_ctrlrequest *request; 935 936 switch (musb->ep0_stage) { 937 case MUSB_EP0_IN: 938 fifo_dest = urb->transfer_buffer + urb->actual_length; 939 fifo_count = min_t(size_t, len, urb->transfer_buffer_length - 940 urb->actual_length); 941 if (fifo_count < len) 942 urb->status = -EOVERFLOW; 943 944 musb_read_fifo(hw_ep, fifo_count, fifo_dest); 945 946 urb->actual_length += fifo_count; 947 if (len < qh->maxpacket) { 948 /* always terminate on short read; it's 949 * rarely reported as an error. 950 */ 951 } else if (urb->actual_length < 952 urb->transfer_buffer_length) 953 more = true; 954 break; 955 case MUSB_EP0_START: 956 request = (struct usb_ctrlrequest *) urb->setup_packet; 957 958 if (!request->wLength) { 959 DBG(4, "start no-DATA\n"); 960 break; 961 } else if (request->bRequestType & USB_DIR_IN) { 962 DBG(4, "start IN-DATA\n"); 963 musb->ep0_stage = MUSB_EP0_IN; 964 more = true; 965 break; 966 } else { 967 DBG(4, "start OUT-DATA\n"); 968 musb->ep0_stage = MUSB_EP0_OUT; 969 more = true; 970 } 971 /* FALLTHROUGH */ 972 case MUSB_EP0_OUT: 973 fifo_count = min_t(size_t, qh->maxpacket, 974 urb->transfer_buffer_length - 975 urb->actual_length); 976 if (fifo_count) { 977 fifo_dest = (u8 *) (urb->transfer_buffer 978 + urb->actual_length); 979 DBG(3, "Sending %d byte%s to ep0 fifo %p\n", 980 fifo_count, 981 (fifo_count == 1) ? "" : "s", 982 fifo_dest); 983 musb_write_fifo(hw_ep, fifo_count, fifo_dest); 984 985 urb->actual_length += fifo_count; 986 more = true; 987 } 988 break; 989 default: 990 ERR("bogus ep0 stage %d\n", musb->ep0_stage); 991 break; 992 } 993 994 return more; 995} 996 997/* 998 * Handle default endpoint interrupt as host. Only called in IRQ time 999 * from musb_interrupt(). 1000 * 1001 * called with controller irqlocked 1002 */ 1003irqreturn_t musb_h_ep0_irq(struct musb *musb) 1004{ 1005 struct urb *urb; 1006 u16 csr, len; 1007 int status = 0; 1008 void __iomem *mbase = musb->mregs; 1009 struct musb_hw_ep *hw_ep = musb->control_ep; 1010 void __iomem *epio = hw_ep->regs; 1011 struct musb_qh *qh = hw_ep->in_qh; 1012 bool complete = false; 1013 irqreturn_t retval = IRQ_NONE; 1014 1015 /* ep0 only has one queue, "in" */ 1016 urb = next_urb(qh); 1017 1018 musb_ep_select(mbase, 0); 1019 csr = musb_readw(epio, MUSB_CSR0); 1020 len = (csr & MUSB_CSR0_RXPKTRDY) 1021 ? musb_readb(epio, MUSB_COUNT0) 1022 : 0; 1023 1024 DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", 1025 csr, qh, len, urb, musb->ep0_stage); 1026 1027 /* if we just did status stage, we are done */ 1028 if (MUSB_EP0_STATUS == musb->ep0_stage) { 1029 retval = IRQ_HANDLED; 1030 complete = true; 1031 } 1032 1033 /* prepare status */ 1034 if (csr & MUSB_CSR0_H_RXSTALL) { 1035 DBG(6, "STALLING ENDPOINT\n"); 1036 status = -EPIPE; 1037 1038 } else if (csr & MUSB_CSR0_H_ERROR) { 1039 DBG(2, "no response, csr0 %04x\n", csr); 1040 status = -EPROTO; 1041 1042 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { 1043 DBG(2, "control NAK timeout\n"); 1044 1045 /* NOTE: this code path would be a good place to PAUSE a 1046 * control transfer, if another one is queued, so that 1047 * ep0 is more likely to stay busy. 1048 * 1049 * if (qh->ring.next != &musb->control), then 1050 * we have a candidate... NAKing is *NOT* an error 1051 */ 1052 musb_writew(epio, MUSB_CSR0, 0); 1053 retval = IRQ_HANDLED; 1054 } 1055 1056 if (status) { 1057 DBG(6, "aborting\n"); 1058 retval = IRQ_HANDLED; 1059 if (urb) 1060 urb->status = status; 1061 complete = true; 1062 1063 /* use the proper sequence to abort the transfer */ 1064 if (csr & MUSB_CSR0_H_REQPKT) { 1065 csr &= ~MUSB_CSR0_H_REQPKT; 1066 musb_writew(epio, MUSB_CSR0, csr); 1067 csr &= ~MUSB_CSR0_H_NAKTIMEOUT; 1068 musb_writew(epio, MUSB_CSR0, csr); 1069 } else { 1070 csr |= MUSB_CSR0_FLUSHFIFO; 1071 musb_writew(epio, MUSB_CSR0, csr); 1072 musb_writew(epio, MUSB_CSR0, csr); 1073 csr &= ~MUSB_CSR0_H_NAKTIMEOUT; 1074 musb_writew(epio, MUSB_CSR0, csr); 1075 } 1076 1077 musb_writeb(epio, MUSB_NAKLIMIT0, 0); 1078 1079 /* clear it */ 1080 musb_writew(epio, MUSB_CSR0, 0); 1081 } 1082 1083 if (unlikely(!urb)) { 1084 /* stop endpoint since we have no place for its data, this 1085 * SHOULD NEVER HAPPEN! */ 1086 ERR("no URB for end 0\n"); 1087 1088 musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); 1089 musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); 1090 musb_writew(epio, MUSB_CSR0, 0); 1091 1092 goto done; 1093 } 1094 1095 if (!complete) { 1096 /* call common logic and prepare response */ 1097 if (musb_h_ep0_continue(musb, len, urb)) { 1098 /* more packets required */ 1099 csr = (MUSB_EP0_IN == musb->ep0_stage) 1100 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; 1101 } else { 1102 /* data transfer complete; perform status phase */ 1103 if (usb_pipeout(urb->pipe) 1104 || !urb->transfer_buffer_length) 1105 csr = MUSB_CSR0_H_STATUSPKT 1106 | MUSB_CSR0_H_REQPKT; 1107 else 1108 csr = MUSB_CSR0_H_STATUSPKT 1109 | MUSB_CSR0_TXPKTRDY; 1110 1111 /* flag status stage */ 1112 musb->ep0_stage = MUSB_EP0_STATUS; 1113 1114 DBG(5, "ep0 STATUS, csr %04x\n", csr); 1115 1116 } 1117 musb_writew(epio, MUSB_CSR0, csr); 1118 retval = IRQ_HANDLED; 1119 } else 1120 musb->ep0_stage = MUSB_EP0_IDLE; 1121 1122 /* call completion handler if done */ 1123 if (complete) 1124 musb_advance_schedule(musb, urb, hw_ep, 1); 1125done: 1126 return retval; 1127} 1128 1129 1130#ifdef CONFIG_USB_INVENTRA_DMA 1131 1132/* Host side TX (OUT) using Mentor DMA works as follows: 1133 submit_urb -> 1134 - if queue was empty, Program Endpoint 1135 - ... which starts DMA to fifo in mode 1 or 0 1136 1137 DMA Isr (transfer complete) -> TxAvail() 1138 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens 1139 only in musb_cleanup_urb) 1140 - TxPktRdy has to be set in mode 0 or for 1141 short packets in mode 1. 1142*/ 1143 1144#endif 1145 1146/* Service a Tx-Available or dma completion irq for the endpoint */ 1147void musb_host_tx(struct musb *musb, u8 epnum) 1148{ 1149 int pipe; 1150 bool done = false; 1151 u16 tx_csr; 1152 size_t wLength = 0; 1153 u8 *buf = NULL; 1154 struct urb *urb; 1155 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1156 void __iomem *epio = hw_ep->regs; 1157 struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh 1158 : hw_ep->out_qh; 1159 u32 status = 0; 1160 void __iomem *mbase = musb->mregs; 1161 struct dma_channel *dma; 1162 1163 urb = next_urb(qh); 1164 1165 musb_ep_select(mbase, epnum); 1166 tx_csr = musb_readw(epio, MUSB_TXCSR); 1167 1168 /* with CPPI, DMA sometimes triggers "extra" irqs */ 1169 if (!urb) { 1170 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); 1171 goto finish; 1172 } 1173 1174 pipe = urb->pipe; 1175 dma = is_dma_capable() ? hw_ep->tx_channel : NULL; 1176 DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, 1177 dma ? ", dma" : ""); 1178 1179 /* check for errors */ 1180 if (tx_csr & MUSB_TXCSR_H_RXSTALL) { 1181 /* dma was disabled, fifo flushed */ 1182 DBG(3, "TX end %d stall\n", epnum); 1183 1184 /* stall; record URB status */ 1185 status = -EPIPE; 1186 1187 } else if (tx_csr & MUSB_TXCSR_H_ERROR) { 1188 /* (NON-ISO) dma was disabled, fifo flushed */ 1189 DBG(3, "TX 3strikes on ep=%d\n", epnum); 1190 1191 status = -ETIMEDOUT; 1192 1193 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { 1194 DBG(6, "TX end=%d device not responding\n", epnum); 1195 1196 /* NOTE: this code path would be a good place to PAUSE a 1197 * transfer, if there's some other (nonperiodic) tx urb 1198 * that could use this fifo. (dma complicates it...) 1199 * 1200 * if (bulk && qh->ring.next != &musb->out_bulk), then 1201 * we have a candidate... NAKing is *NOT* an error 1202 */ 1203 musb_ep_select(mbase, epnum); 1204 musb_writew(epio, MUSB_TXCSR, 1205 MUSB_TXCSR_H_WZC_BITS 1206 | MUSB_TXCSR_TXPKTRDY); 1207 goto finish; 1208 } 1209 1210 if (status) { 1211 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1212 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1213 (void) musb->dma_controller->channel_abort(dma); 1214 } 1215 1216 /* do the proper sequence to abort the transfer in the 1217 * usb core; the dma engine should already be stopped. 1218 */ 1219 musb_h_tx_flush_fifo(hw_ep); 1220 tx_csr &= ~(MUSB_TXCSR_AUTOSET 1221 | MUSB_TXCSR_DMAENAB 1222 | MUSB_TXCSR_H_ERROR 1223 | MUSB_TXCSR_H_RXSTALL 1224 | MUSB_TXCSR_H_NAKTIMEOUT 1225 ); 1226 1227 musb_ep_select(mbase, epnum); 1228 musb_writew(epio, MUSB_TXCSR, tx_csr); 1229 /* REVISIT may need to clear FLUSHFIFO ... */ 1230 musb_writew(epio, MUSB_TXCSR, tx_csr); 1231 musb_writeb(epio, MUSB_TXINTERVAL, 0); 1232 1233 done = true; 1234 } 1235 1236 /* second cppi case */ 1237 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1238 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); 1239 goto finish; 1240 1241 } 1242 1243 /* REVISIT this looks wrong... */ 1244 if (!status || dma || usb_pipeisoc(pipe)) { 1245 if (dma) 1246 wLength = dma->actual_len; 1247 else 1248 wLength = qh->segsize; 1249 qh->offset += wLength; 1250 1251 if (usb_pipeisoc(pipe)) { 1252 struct usb_iso_packet_descriptor *d; 1253 1254 d = urb->iso_frame_desc + qh->iso_idx; 1255 d->actual_length = qh->segsize; 1256 if (++qh->iso_idx >= urb->number_of_packets) { 1257 done = true; 1258 } else { 1259 d++; 1260 buf = urb->transfer_buffer + d->offset; 1261 wLength = d->length; 1262 } 1263 } else if (dma) { 1264 done = true; 1265 } else { 1266 /* see if we need to send more data, or ZLP */ 1267 if (qh->segsize < qh->maxpacket) 1268 done = true; 1269 else if (qh->offset == urb->transfer_buffer_length 1270 && !(urb->transfer_flags 1271 & URB_ZERO_PACKET)) 1272 done = true; 1273 if (!done) { 1274 buf = urb->transfer_buffer 1275 + qh->offset; 1276 wLength = urb->transfer_buffer_length 1277 - qh->offset; 1278 } 1279 } 1280 } 1281 1282 /* urb->status != -EINPROGRESS means request has been faulted, 1283 * so we must abort this transfer after cleanup 1284 */ 1285 if (urb->status != -EINPROGRESS) { 1286 done = true; 1287 if (status == 0) 1288 status = urb->status; 1289 } 1290 1291 if (done) { 1292 /* set status */ 1293 urb->status = status; 1294 urb->actual_length = qh->offset; 1295 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); 1296 1297 } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) { 1298 /* WARN_ON(!buf); */ 1299 1300 /* REVISIT: some docs say that when hw_ep->tx_double_buffered, 1301 * (and presumably, fifo is not half-full) we should write TWO 1302 * packets before updating TXCSR ... other docs disagree ... 1303 */ 1304 /* PIO: start next packet in this URB */ 1305 if (wLength > qh->maxpacket) 1306 wLength = qh->maxpacket; 1307 musb_write_fifo(hw_ep, wLength, buf); 1308 qh->segsize = wLength; 1309 1310 musb_ep_select(mbase, epnum); 1311 musb_writew(epio, MUSB_TXCSR, 1312 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); 1313 } else 1314 DBG(1, "not complete, but dma enabled?\n"); 1315 1316finish: 1317 return; 1318} 1319 1320 1321#ifdef CONFIG_USB_INVENTRA_DMA 1322 1323/* Host side RX (IN) using Mentor DMA works as follows: 1324 submit_urb -> 1325 - if queue was empty, ProgramEndpoint 1326 - first IN token is sent out (by setting ReqPkt) 1327 LinuxIsr -> RxReady() 1328 /\ => first packet is received 1329 | - Set in mode 0 (DmaEnab, ~ReqPkt) 1330 | -> DMA Isr (transfer complete) -> RxReady() 1331 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) 1332 | - if urb not complete, send next IN token (ReqPkt) 1333 | | else complete urb. 1334 | | 1335 --------------------------- 1336 * 1337 * Nuances of mode 1: 1338 * For short packets, no ack (+RxPktRdy) is sent automatically 1339 * (even if AutoClear is ON) 1340 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent 1341 * automatically => major problem, as collecting the next packet becomes 1342 * difficult. Hence mode 1 is not used. 1343 * 1344 * REVISIT 1345 * All we care about at this driver level is that 1346 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; 1347 * (b) termination conditions are: short RX, or buffer full; 1348 * (c) fault modes include 1349 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. 1350 * (and that endpoint's dma queue stops immediately) 1351 * - overflow (full, PLUS more bytes in the terminal packet) 1352 * 1353 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would 1354 * thus be a great candidate for using mode 1 ... for all but the 1355 * last packet of one URB's transfer. 1356 */ 1357 1358#endif 1359 1360/* 1361 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, 1362 * and high-bandwidth IN transfer cases. 1363 */ 1364void musb_host_rx(struct musb *musb, u8 epnum) 1365{ 1366 struct urb *urb; 1367 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1368 void __iomem *epio = hw_ep->regs; 1369 struct musb_qh *qh = hw_ep->in_qh; 1370 size_t xfer_len; 1371 void __iomem *mbase = musb->mregs; 1372 int pipe; 1373 u16 rx_csr, val; 1374 bool iso_err = false; 1375 bool done = false; 1376 u32 status; 1377 struct dma_channel *dma; 1378 1379 musb_ep_select(mbase, epnum); 1380 1381 urb = next_urb(qh); 1382 dma = is_dma_capable() ? hw_ep->rx_channel : NULL; 1383 status = 0; 1384 xfer_len = 0; 1385 1386 rx_csr = musb_readw(epio, MUSB_RXCSR); 1387 val = rx_csr; 1388 1389 if (unlikely(!urb)) { 1390 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least 1391 * usbtest #11 (unlinks) triggers it regularly, sometimes 1392 * with fifo full. (Only with DMA??) 1393 */ 1394 DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, 1395 musb_readw(epio, MUSB_RXCOUNT)); 1396 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); 1397 return; 1398 } 1399 1400 pipe = urb->pipe; 1401 1402 DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", 1403 epnum, rx_csr, urb->actual_length, 1404 dma ? dma->actual_len : 0); 1405 1406 /* check for errors, concurrent stall & unlink is not really 1407 * handled yet! */ 1408 if (rx_csr & MUSB_RXCSR_H_RXSTALL) { 1409 DBG(3, "RX end %d STALL\n", epnum); 1410 1411 /* stall; record URB status */ 1412 status = -EPIPE; 1413 1414 } else if (rx_csr & MUSB_RXCSR_H_ERROR) { 1415 DBG(3, "end %d RX proto error\n", epnum); 1416 1417 status = -EPROTO; 1418 musb_writeb(epio, MUSB_RXINTERVAL, 0); 1419 1420 } else if (rx_csr & MUSB_RXCSR_DATAERROR) { 1421 1422 if (USB_ENDPOINT_XFER_ISOC != qh->type) { 1423 /* NOTE this code path would be a good place to PAUSE a 1424 * transfer, if there's some other (nonperiodic) rx urb 1425 * that could use this fifo. (dma complicates it...) 1426 * 1427 * if (bulk && qh->ring.next != &musb->in_bulk), then 1428 * we have a candidate... NAKing is *NOT* an error 1429 */ 1430 DBG(6, "RX end %d NAK timeout\n", epnum); 1431 musb_ep_select(mbase, epnum); 1432 musb_writew(epio, MUSB_RXCSR, 1433 MUSB_RXCSR_H_WZC_BITS 1434 | MUSB_RXCSR_H_REQPKT); 1435 1436 goto finish; 1437 } else { 1438 DBG(4, "RX end %d ISO data error\n", epnum); 1439 /* packet error reported later */ 1440 iso_err = true; 1441 } 1442 } 1443 1444 /* faults abort the transfer */ 1445 if (status) { 1446 /* clean up dma and collect transfer count */ 1447 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1448 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1449 (void) musb->dma_controller->channel_abort(dma); 1450 xfer_len = dma->actual_len; 1451 } 1452 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); 1453 musb_writeb(epio, MUSB_RXINTERVAL, 0); 1454 done = true; 1455 goto finish; 1456 } 1457 1458 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { 1459 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ 1460 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); 1461 goto finish; 1462 } 1463 1464 /* thorough shutdown for now ... given more precise fault handling 1465 * and better queueing support, we might keep a DMA pipeline going 1466 * while processing this irq for earlier completions. 1467 */ 1468 1469 /* FIXME this is _way_ too much in-line logic for Mentor DMA */ 1470 1471#ifndef CONFIG_USB_INVENTRA_DMA 1472 if (rx_csr & MUSB_RXCSR_H_REQPKT) { 1473 /* REVISIT this happened for a while on some short reads... 1474 * the cleanup still needs investigation... looks bad... 1475 * and also duplicates dma cleanup code above ... plus, 1476 * shouldn't this be the "half full" double buffer case? 1477 */ 1478 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1479 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1480 (void) musb->dma_controller->channel_abort(dma); 1481 xfer_len = dma->actual_len; 1482 done = true; 1483 } 1484 1485 DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, 1486 xfer_len, dma ? ", dma" : ""); 1487 rx_csr &= ~MUSB_RXCSR_H_REQPKT; 1488 1489 musb_ep_select(mbase, epnum); 1490 musb_writew(epio, MUSB_RXCSR, 1491 MUSB_RXCSR_H_WZC_BITS | rx_csr); 1492 } 1493#endif 1494 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { 1495 xfer_len = dma->actual_len; 1496 1497 val &= ~(MUSB_RXCSR_DMAENAB 1498 | MUSB_RXCSR_H_AUTOREQ 1499 | MUSB_RXCSR_AUTOCLEAR 1500 | MUSB_RXCSR_RXPKTRDY); 1501 musb_writew(hw_ep->regs, MUSB_RXCSR, val); 1502 1503#ifdef CONFIG_USB_INVENTRA_DMA 1504 if (usb_pipeisoc(pipe)) { 1505 struct usb_iso_packet_descriptor *d; 1506 1507 d = urb->iso_frame_desc + qh->iso_idx; 1508 d->actual_length = xfer_len; 1509 1510 /* even if there was an error, we did the dma 1511 * for iso_frame_desc->length 1512 */ 1513 if (d->status != EILSEQ && d->status != -EOVERFLOW) 1514 d->status = 0; 1515 1516 if (++qh->iso_idx >= urb->number_of_packets) 1517 done = true; 1518 else 1519 done = false; 1520 1521 } else { 1522 /* done if urb buffer is full or short packet is recd */ 1523 done = (urb->actual_length + xfer_len >= 1524 urb->transfer_buffer_length 1525 || dma->actual_len < qh->maxpacket); 1526 } 1527 1528 /* send IN token for next packet, without AUTOREQ */ 1529 if (!done) { 1530 val |= MUSB_RXCSR_H_REQPKT; 1531 musb_writew(epio, MUSB_RXCSR, 1532 MUSB_RXCSR_H_WZC_BITS | val); 1533 } 1534 1535 DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, 1536 done ? "off" : "reset", 1537 musb_readw(epio, MUSB_RXCSR), 1538 musb_readw(epio, MUSB_RXCOUNT)); 1539#else 1540 done = true; 1541#endif 1542 } else if (urb->status == -EINPROGRESS) { 1543 /* if no errors, be sure a packet is ready for unloading */ 1544 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { 1545 status = -EPROTO; 1546 ERR("Rx interrupt with no errors or packet!\n"); 1547 1548 /* FIXME this is another "SHOULD NEVER HAPPEN" */ 1549 1550/* SCRUB (RX) */ 1551 /* do the proper sequence to abort the transfer */ 1552 musb_ep_select(mbase, epnum); 1553 val &= ~MUSB_RXCSR_H_REQPKT; 1554 musb_writew(epio, MUSB_RXCSR, val); 1555 goto finish; 1556 } 1557 1558 /* we are expecting IN packets */ 1559#ifdef CONFIG_USB_INVENTRA_DMA 1560 if (dma) { 1561 struct dma_controller *c; 1562 u16 rx_count; 1563 int ret, length; 1564 dma_addr_t buf; 1565 1566 rx_count = musb_readw(epio, MUSB_RXCOUNT); 1567 1568 DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n", 1569 epnum, rx_count, 1570 urb->transfer_dma 1571 + urb->actual_length, 1572 qh->offset, 1573 urb->transfer_buffer_length); 1574 1575 c = musb->dma_controller; 1576 1577 if (usb_pipeisoc(pipe)) { 1578 int status = 0; 1579 struct usb_iso_packet_descriptor *d; 1580 1581 d = urb->iso_frame_desc + qh->iso_idx; 1582 1583 if (iso_err) { 1584 status = -EILSEQ; 1585 urb->error_count++; 1586 } 1587 if (rx_count > d->length) { 1588 if (status == 0) { 1589 status = -EOVERFLOW; 1590 urb->error_count++; 1591 } 1592 DBG(2, "** OVERFLOW %d into %d\n",\ 1593 rx_count, d->length); 1594 1595 length = d->length; 1596 } else 1597 length = rx_count; 1598 d->status = status; 1599 buf = urb->transfer_dma + d->offset; 1600 } else { 1601 length = rx_count; 1602 buf = urb->transfer_dma + 1603 urb->actual_length; 1604 } 1605 1606 dma->desired_mode = 0; 1607#ifdef USE_MODE1 1608 /* because of the issue below, mode 1 will 1609 * only rarely behave with correct semantics. 1610 */ 1611 if ((urb->transfer_flags & 1612 URB_SHORT_NOT_OK) 1613 && (urb->transfer_buffer_length - 1614 urb->actual_length) 1615 > qh->maxpacket) 1616 dma->desired_mode = 1; 1617 if (rx_count < hw_ep->max_packet_sz_rx) { 1618 length = rx_count; 1619 dma->bDesiredMode = 0; 1620 } else { 1621 length = urb->transfer_buffer_length; 1622 } 1623#endif 1624 1625/* Disadvantage of using mode 1: 1626 * It's basically usable only for mass storage class; essentially all 1627 * other protocols also terminate transfers on short packets. 1628 * 1629 * Details: 1630 * An extra IN token is sent at the end of the transfer (due to AUTOREQ) 1631 * If you try to use mode 1 for (transfer_buffer_length - 512), and try 1632 * to use the extra IN token to grab the last packet using mode 0, then 1633 * the problem is that you cannot be sure when the device will send the 1634 * last packet and RxPktRdy set. Sometimes the packet is recd too soon 1635 * such that it gets lost when RxCSR is re-set at the end of the mode 1 1636 * transfer, while sometimes it is recd just a little late so that if you 1637 * try to configure for mode 0 soon after the mode 1 transfer is 1638 * completed, you will find rxcount 0. Okay, so you might think why not 1639 * wait for an interrupt when the pkt is recd. Well, you won't get any! 1640 */ 1641 1642 val = musb_readw(epio, MUSB_RXCSR); 1643 val &= ~MUSB_RXCSR_H_REQPKT; 1644 1645 if (dma->desired_mode == 0) 1646 val &= ~MUSB_RXCSR_H_AUTOREQ; 1647 else 1648 val |= MUSB_RXCSR_H_AUTOREQ; 1649 val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB; 1650 1651 musb_writew(epio, MUSB_RXCSR, 1652 MUSB_RXCSR_H_WZC_BITS | val); 1653 1654 /* REVISIT if when actual_length != 0, 1655 * transfer_buffer_length needs to be 1656 * adjusted first... 1657 */ 1658 ret = c->channel_program( 1659 dma, qh->maxpacket, 1660 dma->desired_mode, buf, length); 1661 1662 if (!ret) { 1663 c->channel_release(dma); 1664 hw_ep->rx_channel = NULL; 1665 dma = NULL; 1666 /* REVISIT reset CSR */ 1667 } 1668 } 1669#endif /* Mentor DMA */ 1670 1671 if (!dma) { 1672 done = musb_host_packet_rx(musb, urb, 1673 epnum, iso_err); 1674 DBG(6, "read %spacket\n", done ? "last " : ""); 1675 } 1676 } 1677 1678finish: 1679 urb->actual_length += xfer_len; 1680 qh->offset += xfer_len; 1681 if (done) { 1682 if (urb->status == -EINPROGRESS) 1683 urb->status = status; 1684 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); 1685 } 1686} 1687 1688/* schedule nodes correspond to peripheral endpoints, like an OHCI QH. 1689 * the software schedule associates multiple such nodes with a given 1690 * host side hardware endpoint + direction; scheduling may activate 1691 * that hardware endpoint. 1692 */ 1693static int musb_schedule( 1694 struct musb *musb, 1695 struct musb_qh *qh, 1696 int is_in) 1697{ 1698 int idle; 1699 int best_diff; 1700 int best_end, epnum; 1701 struct musb_hw_ep *hw_ep = NULL; 1702 struct list_head *head = NULL; 1703 1704 /* use fixed hardware for control and bulk */ 1705 if (qh->type == USB_ENDPOINT_XFER_CONTROL) { 1706 head = &musb->control; 1707 hw_ep = musb->control_ep; 1708 goto success; 1709 } 1710 1711 /* else, periodic transfers get muxed to other endpoints */ 1712 1713 /* 1714 * We know this qh hasn't been scheduled, so all we need to do 1715 * is choose which hardware endpoint to put it on ... 1716 * 1717 * REVISIT what we really want here is a regular schedule tree 1718 * like e.g. OHCI uses. 1719 */ 1720 best_diff = 4096; 1721 best_end = -1; 1722 1723 for (epnum = 1, hw_ep = musb->endpoints + 1; 1724 epnum < musb->nr_endpoints; 1725 epnum++, hw_ep++) { 1726 int diff; 1727 1728 if (is_in || hw_ep->is_shared_fifo) { 1729 if (hw_ep->in_qh != NULL) 1730 continue; 1731 } else if (hw_ep->out_qh != NULL) 1732 continue; 1733 1734 if (hw_ep == musb->bulk_ep) 1735 continue; 1736 1737 if (is_in) 1738 diff = hw_ep->max_packet_sz_rx - qh->maxpacket; 1739 else 1740 diff = hw_ep->max_packet_sz_tx - qh->maxpacket; 1741 1742 if (diff >= 0 && best_diff > diff) { 1743 best_diff = diff; 1744 best_end = epnum; 1745 } 1746 } 1747 /* use bulk reserved ep1 if no other ep is free */ 1748 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { 1749 hw_ep = musb->bulk_ep; 1750 if (is_in) 1751 head = &musb->in_bulk; 1752 else 1753 head = &musb->out_bulk; 1754 goto success; 1755 } else if (best_end < 0) { 1756 return -ENOSPC; 1757 } 1758 1759 idle = 1; 1760 qh->mux = 0; 1761 hw_ep = musb->endpoints + best_end; 1762 DBG(4, "qh %p periodic slot %d\n", qh, best_end); 1763success: 1764 if (head) { 1765 idle = list_empty(head); 1766 list_add_tail(&qh->ring, head); 1767 qh->mux = 1; 1768 } 1769 qh->hw_ep = hw_ep; 1770 qh->hep->hcpriv = qh; 1771 if (idle) 1772 musb_start_urb(musb, is_in, qh); 1773 return 0; 1774} 1775 1776static int musb_urb_enqueue( 1777 struct usb_hcd *hcd, 1778 struct urb *urb, 1779 gfp_t mem_flags) 1780{ 1781 unsigned long flags; 1782 struct musb *musb = hcd_to_musb(hcd); 1783 struct usb_host_endpoint *hep = urb->ep; 1784 struct musb_qh *qh = hep->hcpriv; 1785 struct usb_endpoint_descriptor *epd = &hep->desc; 1786 int ret; 1787 unsigned type_reg; 1788 unsigned interval; 1789 1790 /* host role must be active */ 1791 if (!is_host_active(musb) || !musb->is_active) 1792 return -ENODEV; 1793 1794 spin_lock_irqsave(&musb->lock, flags); 1795 ret = usb_hcd_link_urb_to_ep(hcd, urb); 1796 spin_unlock_irqrestore(&musb->lock, flags); 1797 if (ret) 1798 return ret; 1799 1800 /* DMA mapping was already done, if needed, and this urb is on 1801 * hep->urb_list ... so there's little to do unless hep wasn't 1802 * yet scheduled onto a live qh. 1803 * 1804 * REVISIT best to keep hep->hcpriv valid until the endpoint gets 1805 * disabled, testing for empty qh->ring and avoiding qh setup costs 1806 * except for the first urb queued after a config change. 1807 */ 1808 if (qh) { 1809 urb->hcpriv = qh; 1810 return 0; 1811 } 1812 1813 /* Allocate and initialize qh, minimizing the work done each time 1814 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. 1815 * 1816 * REVISIT consider a dedicated qh kmem_cache, so it's harder 1817 * for bugs in other kernel code to break this driver... 1818 */ 1819 qh = kzalloc(sizeof *qh, mem_flags); 1820 if (!qh) { 1821 spin_lock_irqsave(&musb->lock, flags); 1822 usb_hcd_unlink_urb_from_ep(hcd, urb); 1823 spin_unlock_irqrestore(&musb->lock, flags); 1824 return -ENOMEM; 1825 } 1826 1827 qh->hep = hep; 1828 qh->dev = urb->dev; 1829 INIT_LIST_HEAD(&qh->ring); 1830 qh->is_ready = 1; 1831 1832 qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); 1833 1834 /* no high bandwidth support yet */ 1835 if (qh->maxpacket & ~0x7ff) { 1836 ret = -EMSGSIZE; 1837 goto done; 1838 } 1839 1840 qh->epnum = usb_endpoint_num(epd); 1841 qh->type = usb_endpoint_type(epd); 1842 1843 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ 1844 qh->addr_reg = (u8) usb_pipedevice(urb->pipe); 1845 1846 /* precompute rxtype/txtype/type0 register */ 1847 type_reg = (qh->type << 4) | qh->epnum; 1848 switch (urb->dev->speed) { 1849 case USB_SPEED_LOW: 1850 type_reg |= 0xc0; 1851 break; 1852 case USB_SPEED_FULL: 1853 type_reg |= 0x80; 1854 break; 1855 default: 1856 type_reg |= 0x40; 1857 } 1858 qh->type_reg = type_reg; 1859 1860 /* Precompute RXINTERVAL/TXINTERVAL register */ 1861 switch (qh->type) { 1862 case USB_ENDPOINT_XFER_INT: 1863 /* 1864 * Full/low speeds use the linear encoding, 1865 * high speed uses the logarithmic encoding. 1866 */ 1867 if (urb->dev->speed <= USB_SPEED_FULL) { 1868 interval = max_t(u8, epd->bInterval, 1); 1869 break; 1870 } 1871 /* FALLTHROUGH */ 1872 case USB_ENDPOINT_XFER_ISOC: 1873 /* ISO always uses logarithmic encoding */ 1874 interval = min_t(u8, epd->bInterval, 16); 1875 break; 1876 default: 1877 /* REVISIT we actually want to use NAK limits, hinting to the 1878 * transfer scheduling logic to try some other qh, e.g. try 1879 * for 2 msec first: 1880 * 1881 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; 1882 * 1883 * The downside of disabling this is that transfer scheduling 1884 * gets VERY unfair for nonperiodic transfers; a misbehaving 1885 * peripheral could make that hurt. Or for reads, one that's 1886 * perfectly normal: network and other drivers keep reads 1887 * posted at all times, having one pending for a week should 1888 * be perfectly safe. 1889 * 1890 * The upside of disabling it is avoidng transfer scheduling 1891 * code to put this aside for while. 1892 */ 1893 interval = 0; 1894 } 1895 qh->intv_reg = interval; 1896 1897 /* precompute addressing for external hub/tt ports */ 1898 if (musb->is_multipoint) { 1899 struct usb_device *parent = urb->dev->parent; 1900 1901 if (parent != hcd->self.root_hub) { 1902 qh->h_addr_reg = (u8) parent->devnum; 1903 1904 /* set up tt info if needed */ 1905 if (urb->dev->tt) { 1906 qh->h_port_reg = (u8) urb->dev->ttport; 1907 if (urb->dev->tt->hub) 1908 qh->h_addr_reg = 1909 (u8) urb->dev->tt->hub->devnum; 1910 if (urb->dev->tt->multi) 1911 qh->h_addr_reg |= 0x80; 1912 } 1913 } 1914 } 1915 1916 /* invariant: hep->hcpriv is null OR the qh that's already scheduled. 1917 * until we get real dma queues (with an entry for each urb/buffer), 1918 * we only have work to do in the former case. 1919 */ 1920 spin_lock_irqsave(&musb->lock, flags); 1921 if (hep->hcpriv) { 1922 /* some concurrent activity submitted another urb to hep... 1923 * odd, rare, error prone, but legal. 1924 */ 1925 kfree(qh); 1926 ret = 0; 1927 } else 1928 ret = musb_schedule(musb, qh, 1929 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); 1930 1931 if (ret == 0) { 1932 urb->hcpriv = qh; 1933 /* FIXME set urb->start_frame for iso/intr, it's tested in 1934 * musb_start_urb(), but otherwise only konicawc cares ... 1935 */ 1936 } 1937 spin_unlock_irqrestore(&musb->lock, flags); 1938 1939done: 1940 if (ret != 0) { 1941 spin_lock_irqsave(&musb->lock, flags); 1942 usb_hcd_unlink_urb_from_ep(hcd, urb); 1943 spin_unlock_irqrestore(&musb->lock, flags); 1944 kfree(qh); 1945 } 1946 return ret; 1947} 1948 1949 1950/* 1951 * abort a transfer that's at the head of a hardware queue. 1952 * called with controller locked, irqs blocked 1953 * that hardware queue advances to the next transfer, unless prevented 1954 */ 1955static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) 1956{ 1957 struct musb_hw_ep *ep = qh->hw_ep; 1958 void __iomem *epio = ep->regs; 1959 unsigned hw_end = ep->epnum; 1960 void __iomem *regs = ep->musb->mregs; 1961 u16 csr; 1962 int status = 0; 1963 1964 musb_ep_select(regs, hw_end); 1965 1966 if (is_dma_capable()) { 1967 struct dma_channel *dma; 1968 1969 dma = is_in ? ep->rx_channel : ep->tx_channel; 1970 if (dma) { 1971 status = ep->musb->dma_controller->channel_abort(dma); 1972 DBG(status ? 1 : 3, 1973 "abort %cX%d DMA for urb %p --> %d\n", 1974 is_in ? 'R' : 'T', ep->epnum, 1975 urb, status); 1976 urb->actual_length += dma->actual_len; 1977 } 1978 } 1979 1980 /* turn off DMA requests, discard state, stop polling ... */ 1981 if (is_in) { 1982 /* giveback saves bulk toggle */ 1983 csr = musb_h_flush_rxfifo(ep, 0); 1984 1985 /* REVISIT we still get an irq; should likely clear the 1986 * endpoint's irq status here to avoid bogus irqs. 1987 * clearing that status is platform-specific... 1988 */ 1989 } else { 1990 musb_h_tx_flush_fifo(ep); 1991 csr = musb_readw(epio, MUSB_TXCSR); 1992 csr &= ~(MUSB_TXCSR_AUTOSET 1993 | MUSB_TXCSR_DMAENAB 1994 | MUSB_TXCSR_H_RXSTALL 1995 | MUSB_TXCSR_H_NAKTIMEOUT 1996 | MUSB_TXCSR_H_ERROR 1997 | MUSB_TXCSR_TXPKTRDY); 1998 musb_writew(epio, MUSB_TXCSR, csr); 1999 /* REVISIT may need to clear FLUSHFIFO ... */ 2000 musb_writew(epio, MUSB_TXCSR, csr); 2001 /* flush cpu writebuffer */ 2002 csr = musb_readw(epio, MUSB_TXCSR); 2003 } 2004 if (status == 0) 2005 musb_advance_schedule(ep->musb, urb, ep, is_in); 2006 return status; 2007} 2008 2009static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 2010{ 2011 struct musb *musb = hcd_to_musb(hcd); 2012 struct musb_qh *qh; 2013 struct list_head *sched; 2014 unsigned long flags; 2015 int ret; 2016 2017 DBG(4, "urb=%p, dev%d ep%d%s\n", urb, 2018 usb_pipedevice(urb->pipe), 2019 usb_pipeendpoint(urb->pipe), 2020 usb_pipein(urb->pipe) ? "in" : "out"); 2021 2022 spin_lock_irqsave(&musb->lock, flags); 2023 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 2024 if (ret) 2025 goto done; 2026 2027 qh = urb->hcpriv; 2028 if (!qh) 2029 goto done; 2030 2031 /* Any URB not actively programmed into endpoint hardware can be 2032 * immediately given back; that's any URB not at the head of an 2033 * endpoint queue, unless someday we get real DMA queues. And even 2034 * if it's at the head, it might not be known to the hardware... 2035 * 2036 * Otherwise abort current transfer, pending dma, etc.; urb->status 2037 * has already been updated. This is a synchronous abort; it'd be 2038 * OK to hold off until after some IRQ, though. 2039 */ 2040 if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list) 2041 ret = -EINPROGRESS; 2042 else { 2043 switch (qh->type) { 2044 case USB_ENDPOINT_XFER_CONTROL: 2045 sched = &musb->control; 2046 break; 2047 case USB_ENDPOINT_XFER_BULK: 2048 if (qh->mux == 1) { 2049 if (usb_pipein(urb->pipe)) 2050 sched = &musb->in_bulk; 2051 else 2052 sched = &musb->out_bulk; 2053 break; 2054 } 2055 default: 2056 /* REVISIT when we get a schedule tree, periodic 2057 * transfers won't always be at the head of a 2058 * singleton queue... 2059 */ 2060 sched = NULL; 2061 break; 2062 } 2063 } 2064 2065 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ 2066 if (ret < 0 || (sched && qh != first_qh(sched))) { 2067 int ready = qh->is_ready; 2068 2069 ret = 0; 2070 qh->is_ready = 0; 2071 __musb_giveback(musb, urb, 0); 2072 qh->is_ready = ready; 2073 2074 /* If nothing else (usually musb_giveback) is using it 2075 * and its URB list has emptied, recycle this qh. 2076 */ 2077 if (ready && list_empty(&qh->hep->urb_list)) { 2078 qh->hep->hcpriv = NULL; 2079 list_del(&qh->ring); 2080 kfree(qh); 2081 } 2082 } else 2083 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); 2084done: 2085 spin_unlock_irqrestore(&musb->lock, flags); 2086 return ret; 2087} 2088 2089/* disable an endpoint */ 2090static void 2091musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) 2092{ 2093 u8 epnum = hep->desc.bEndpointAddress; 2094 unsigned long flags; 2095 struct musb *musb = hcd_to_musb(hcd); 2096 u8 is_in = epnum & USB_DIR_IN; 2097 struct musb_qh *qh; 2098 struct urb *urb; 2099 struct list_head *sched; 2100 2101 spin_lock_irqsave(&musb->lock, flags); 2102 2103 qh = hep->hcpriv; 2104 if (qh == NULL) 2105 goto exit; 2106 2107 switch (qh->type) { 2108 case USB_ENDPOINT_XFER_CONTROL: 2109 sched = &musb->control; 2110 break; 2111 case USB_ENDPOINT_XFER_BULK: 2112 if (qh->mux == 1) { 2113 if (is_in) 2114 sched = &musb->in_bulk; 2115 else 2116 sched = &musb->out_bulk; 2117 break; 2118 } 2119 default: 2120 /* REVISIT when we get a schedule tree, periodic transfers 2121 * won't always be at the head of a singleton queue... 2122 */ 2123 sched = NULL; 2124 break; 2125 } 2126 2127 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ 2128 2129 /* kick first urb off the hardware, if needed */ 2130 qh->is_ready = 0; 2131 if (!sched || qh == first_qh(sched)) { 2132 urb = next_urb(qh); 2133 2134 /* make software (then hardware) stop ASAP */ 2135 if (!urb->unlinked) 2136 urb->status = -ESHUTDOWN; 2137 2138 /* cleanup */ 2139 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); 2140 2141 /* Then nuke all the others ... and advance the 2142 * queue on hw_ep (e.g. bulk ring) when we're done. 2143 */ 2144 while (!list_empty(&hep->urb_list)) { 2145 urb = next_urb(qh); 2146 urb->status = -ESHUTDOWN; 2147 musb_advance_schedule(musb, urb, qh->hw_ep, is_in); 2148 } 2149 } else { 2150 /* Just empty the queue; the hardware is busy with 2151 * other transfers, and since !qh->is_ready nothing 2152 * will activate any of these as it advances. 2153 */ 2154 while (!list_empty(&hep->urb_list)) 2155 __musb_giveback(musb, next_urb(qh), -ESHUTDOWN); 2156 2157 hep->hcpriv = NULL; 2158 list_del(&qh->ring); 2159 kfree(qh); 2160 } 2161exit: 2162 spin_unlock_irqrestore(&musb->lock, flags); 2163} 2164 2165static int musb_h_get_frame_number(struct usb_hcd *hcd) 2166{ 2167 struct musb *musb = hcd_to_musb(hcd); 2168 2169 return musb_readw(musb->mregs, MUSB_FRAME); 2170} 2171 2172static int musb_h_start(struct usb_hcd *hcd) 2173{ 2174 struct musb *musb = hcd_to_musb(hcd); 2175 2176 /* NOTE: musb_start() is called when the hub driver turns 2177 * on port power, or when (OTG) peripheral starts. 2178 */ 2179 hcd->state = HC_STATE_RUNNING; 2180 musb->port1_status = 0; 2181 return 0; 2182} 2183 2184static void musb_h_stop(struct usb_hcd *hcd) 2185{ 2186 musb_stop(hcd_to_musb(hcd)); 2187 hcd->state = HC_STATE_HALT; 2188} 2189 2190static int musb_bus_suspend(struct usb_hcd *hcd) 2191{ 2192 struct musb *musb = hcd_to_musb(hcd); 2193 2194 if (musb->xceiv.state == OTG_STATE_A_SUSPEND) 2195 return 0; 2196 2197 if (is_host_active(musb) && musb->is_active) { 2198 WARNING("trying to suspend as %s is_active=%i\n", 2199 otg_state_string(musb), musb->is_active); 2200 return -EBUSY; 2201 } else 2202 return 0; 2203} 2204 2205static int musb_bus_resume(struct usb_hcd *hcd) 2206{ 2207 /* resuming child port does the work */ 2208 return 0; 2209} 2210 2211const struct hc_driver musb_hc_driver = { 2212 .description = "musb-hcd", 2213 .product_desc = "MUSB HDRC host driver", 2214 .hcd_priv_size = sizeof(struct musb), 2215 .flags = HCD_USB2 | HCD_MEMORY, 2216 2217 /* not using irq handler or reset hooks from usbcore, since 2218 * those must be shared with peripheral code for OTG configs 2219 */ 2220 2221 .start = musb_h_start, 2222 .stop = musb_h_stop, 2223 2224 .get_frame_number = musb_h_get_frame_number, 2225 2226 .urb_enqueue = musb_urb_enqueue, 2227 .urb_dequeue = musb_urb_dequeue, 2228 .endpoint_disable = musb_h_disable, 2229 2230 .hub_status_data = musb_hub_status_data, 2231 .hub_control = musb_hub_control, 2232 .bus_suspend = musb_bus_suspend, 2233 .bus_resume = musb_bus_resume, 2234 /* .start_port_reset = NULL, */ 2235 /* .hub_irq_enable = NULL, */ 2236}; 2237