musb_host.c revision 2cc65feab2f18dfa4297209829ce228989c7356b
1/* 2 * MUSB OTG driver host support 3 * 4 * Copyright 2005 Mentor Graphics Corporation 5 * Copyright (C) 2005-2006 by Texas Instruments 6 * Copyright (C) 2006-2007 Nokia Corporation 7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * version 2 as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 21 * 02110-1301 USA 22 * 23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36#include <linux/module.h> 37#include <linux/kernel.h> 38#include <linux/delay.h> 39#include <linux/sched.h> 40#include <linux/slab.h> 41#include <linux/errno.h> 42#include <linux/init.h> 43#include <linux/list.h> 44#include <linux/dma-mapping.h> 45 46#include "musb_core.h" 47#include "musb_host.h" 48 49/* MUSB HOST status 22-mar-2006 50 * 51 * - There's still lots of partial code duplication for fault paths, so 52 * they aren't handled as consistently as they need to be. 53 * 54 * - PIO mostly behaved when last tested. 55 * + including ep0, with all usbtest cases 9, 10 56 * + usbtest 14 (ep0out) doesn't seem to run at all 57 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest 58 * configurations, but otherwise double buffering passes basic tests. 59 * + for 2.6.N, for N > ~10, needs API changes for hcd framework. 60 * 61 * - DMA (CPPI) ... partially behaves, not currently recommended 62 * + about 1/15 the speed of typical EHCI implementations (PCI) 63 * + RX, all too often reqpkt seems to misbehave after tx 64 * + TX, no known issues (other than evident silicon issue) 65 * 66 * - DMA (Mentor/OMAP) ...has at least toggle update problems 67 * 68 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet 69 * starvation ... nothing yet for TX, interrupt, or bulk. 70 * 71 * - Not tested with HNP, but some SRP paths seem to behave. 72 * 73 * NOTE 24-August-2006: 74 * 75 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an 76 * extra endpoint for periodic use enabling hub + keybd + mouse. That 77 * mostly works, except that with "usbnet" it's easy to trigger cases 78 * with "ping" where RX loses. (a) ping to davinci, even "ping -f", 79 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses 80 * although ARP RX wins. (That test was done with a full speed link.) 81 */ 82 83 84/* 85 * NOTE on endpoint usage: 86 * 87 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN 88 * and OUT endpoints ... hardware is dedicated for those "async" queue(s). 89 * (Yes, bulk _could_ use more of the endpoints than that, and would even 90 * benefit from it.) 91 * 92 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. 93 * So far that scheduling is both dumb and optimistic: the endpoint will be 94 * "claimed" until its software queue is no longer refilled. No multiplexing 95 * of transfers between endpoints, or anything clever. 96 */ 97 98struct musb *hcd_to_musb(struct usb_hcd *hcd) 99{ 100 return *(struct musb **) hcd->hcd_priv; 101} 102 103 104static void musb_ep_program(struct musb *musb, u8 epnum, 105 struct urb *urb, int is_out, 106 u8 *buf, u32 offset, u32 len); 107 108/* 109 * Clear TX fifo. Needed to avoid BABBLE errors. 110 */ 111static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) 112{ 113 struct musb *musb = ep->musb; 114 void __iomem *epio = ep->regs; 115 u16 csr; 116 u16 lastcsr = 0; 117 int retries = 1000; 118 119 csr = musb_readw(epio, MUSB_TXCSR); 120 while (csr & MUSB_TXCSR_FIFONOTEMPTY) { 121 if (csr != lastcsr) 122 dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr); 123 lastcsr = csr; 124 csr |= MUSB_TXCSR_FLUSHFIFO; 125 musb_writew(epio, MUSB_TXCSR, csr); 126 csr = musb_readw(epio, MUSB_TXCSR); 127 if (WARN(retries-- < 1, 128 "Could not flush host TX%d fifo: csr: %04x\n", 129 ep->epnum, csr)) 130 return; 131 mdelay(1); 132 } 133} 134 135static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) 136{ 137 void __iomem *epio = ep->regs; 138 u16 csr; 139 int retries = 5; 140 141 /* scrub any data left in the fifo */ 142 do { 143 csr = musb_readw(epio, MUSB_TXCSR); 144 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) 145 break; 146 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); 147 csr = musb_readw(epio, MUSB_TXCSR); 148 udelay(10); 149 } while (--retries); 150 151 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n", 152 ep->epnum, csr); 153 154 /* and reset for the next transfer */ 155 musb_writew(epio, MUSB_TXCSR, 0); 156} 157 158/* 159 * Start transmit. Caller is responsible for locking shared resources. 160 * musb must be locked. 161 */ 162static inline void musb_h_tx_start(struct musb_hw_ep *ep) 163{ 164 u16 txcsr; 165 166 /* NOTE: no locks here; caller should lock and select EP */ 167 if (ep->epnum) { 168 txcsr = musb_readw(ep->regs, MUSB_TXCSR); 169 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; 170 musb_writew(ep->regs, MUSB_TXCSR, txcsr); 171 } else { 172 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; 173 musb_writew(ep->regs, MUSB_CSR0, txcsr); 174 } 175 176} 177 178static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) 179{ 180 u16 txcsr; 181 182 /* NOTE: no locks here; caller should lock and select EP */ 183 txcsr = musb_readw(ep->regs, MUSB_TXCSR); 184 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; 185 if (is_cppi_enabled()) 186 txcsr |= MUSB_TXCSR_DMAMODE; 187 musb_writew(ep->regs, MUSB_TXCSR, txcsr); 188} 189 190static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) 191{ 192 if (is_in != 0 || ep->is_shared_fifo) 193 ep->in_qh = qh; 194 if (is_in == 0 || ep->is_shared_fifo) 195 ep->out_qh = qh; 196} 197 198static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) 199{ 200 return is_in ? ep->in_qh : ep->out_qh; 201} 202 203/* 204 * Start the URB at the front of an endpoint's queue 205 * end must be claimed from the caller. 206 * 207 * Context: controller locked, irqs blocked 208 */ 209static void 210musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) 211{ 212 u16 frame; 213 u32 len; 214 void __iomem *mbase = musb->mregs; 215 struct urb *urb = next_urb(qh); 216 void *buf = urb->transfer_buffer; 217 u32 offset = 0; 218 struct musb_hw_ep *hw_ep = qh->hw_ep; 219 unsigned pipe = urb->pipe; 220 u8 address = usb_pipedevice(pipe); 221 int epnum = hw_ep->epnum; 222 223 /* initialize software qh state */ 224 qh->offset = 0; 225 qh->segsize = 0; 226 227 /* gather right source of data */ 228 switch (qh->type) { 229 case USB_ENDPOINT_XFER_CONTROL: 230 /* control transfers always start with SETUP */ 231 is_in = 0; 232 musb->ep0_stage = MUSB_EP0_START; 233 buf = urb->setup_packet; 234 len = 8; 235 break; 236 case USB_ENDPOINT_XFER_ISOC: 237 qh->iso_idx = 0; 238 qh->frame = 0; 239 offset = urb->iso_frame_desc[0].offset; 240 len = urb->iso_frame_desc[0].length; 241 break; 242 default: /* bulk, interrupt */ 243 /* actual_length may be nonzero on retry paths */ 244 buf = urb->transfer_buffer + urb->actual_length; 245 len = urb->transfer_buffer_length - urb->actual_length; 246 } 247 248 dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", 249 qh, urb, address, qh->epnum, 250 is_in ? "in" : "out", 251 ({char *s; switch (qh->type) { 252 case USB_ENDPOINT_XFER_CONTROL: s = ""; break; 253 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; 254 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; 255 default: s = "-intr"; break; 256 }; s; }), 257 epnum, buf + offset, len); 258 259 /* Configure endpoint */ 260 musb_ep_set_qh(hw_ep, is_in, qh); 261 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); 262 263 /* transmit may have more work: start it when it is time */ 264 if (is_in) 265 return; 266 267 /* determine if the time is right for a periodic transfer */ 268 switch (qh->type) { 269 case USB_ENDPOINT_XFER_ISOC: 270 case USB_ENDPOINT_XFER_INT: 271 dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n"); 272 frame = musb_readw(mbase, MUSB_FRAME); 273 /* FIXME this doesn't implement that scheduling policy ... 274 * or handle framecounter wrapping 275 */ 276 if ((urb->transfer_flags & URB_ISO_ASAP) 277 || (frame >= urb->start_frame)) { 278 /* REVISIT the SOF irq handler shouldn't duplicate 279 * this code; and we don't init urb->start_frame... 280 */ 281 qh->frame = 0; 282 goto start; 283 } else { 284 qh->frame = urb->start_frame; 285 /* enable SOF interrupt so we can count down */ 286 dev_dbg(musb->controller, "SOF for %d\n", epnum); 287#if 1 /* ifndef CONFIG_ARCH_DAVINCI */ 288 musb_writeb(mbase, MUSB_INTRUSBE, 0xff); 289#endif 290 } 291 break; 292 default: 293start: 294 dev_dbg(musb->controller, "Start TX%d %s\n", epnum, 295 hw_ep->tx_channel ? "dma" : "pio"); 296 297 if (!hw_ep->tx_channel) 298 musb_h_tx_start(hw_ep); 299 else if (is_cppi_enabled() || tusb_dma_omap()) 300 musb_h_tx_dma_start(hw_ep); 301 } 302} 303 304/* Context: caller owns controller lock, IRQs are blocked */ 305static void musb_giveback(struct musb *musb, struct urb *urb, int status) 306__releases(musb->lock) 307__acquires(musb->lock) 308{ 309 dev_dbg(musb->controller, 310 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n", 311 urb, urb->complete, status, 312 usb_pipedevice(urb->pipe), 313 usb_pipeendpoint(urb->pipe), 314 usb_pipein(urb->pipe) ? "in" : "out", 315 urb->actual_length, urb->transfer_buffer_length 316 ); 317 318 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); 319 spin_unlock(&musb->lock); 320 usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); 321 spin_lock(&musb->lock); 322} 323 324/* For bulk/interrupt endpoints only */ 325static inline void musb_save_toggle(struct musb_qh *qh, int is_in, 326 struct urb *urb) 327{ 328 void __iomem *epio = qh->hw_ep->regs; 329 u16 csr; 330 331 /* 332 * FIXME: the current Mentor DMA code seems to have 333 * problems getting toggle correct. 334 */ 335 336 if (is_in) 337 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; 338 else 339 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; 340 341 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); 342} 343 344/* 345 * Advance this hardware endpoint's queue, completing the specified URB and 346 * advancing to either the next URB queued to that qh, or else invalidating 347 * that qh and advancing to the next qh scheduled after the current one. 348 * 349 * Context: caller owns controller lock, IRQs are blocked 350 */ 351static void musb_advance_schedule(struct musb *musb, struct urb *urb, 352 struct musb_hw_ep *hw_ep, int is_in) 353{ 354 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); 355 struct musb_hw_ep *ep = qh->hw_ep; 356 int ready = qh->is_ready; 357 int status; 358 359 status = (urb->status == -EINPROGRESS) ? 0 : urb->status; 360 361 /* save toggle eagerly, for paranoia */ 362 switch (qh->type) { 363 case USB_ENDPOINT_XFER_BULK: 364 case USB_ENDPOINT_XFER_INT: 365 musb_save_toggle(qh, is_in, urb); 366 break; 367 case USB_ENDPOINT_XFER_ISOC: 368 if (status == 0 && urb->error_count) 369 status = -EXDEV; 370 break; 371 } 372 373 qh->is_ready = 0; 374 musb_giveback(musb, urb, status); 375 qh->is_ready = ready; 376 377 /* reclaim resources (and bandwidth) ASAP; deschedule it, and 378 * invalidate qh as soon as list_empty(&hep->urb_list) 379 */ 380 if (list_empty(&qh->hep->urb_list)) { 381 struct list_head *head; 382 struct dma_controller *dma = musb->dma_controller; 383 384 if (is_in) { 385 ep->rx_reinit = 1; 386 if (ep->rx_channel) { 387 dma->channel_release(ep->rx_channel); 388 ep->rx_channel = NULL; 389 } 390 } else { 391 ep->tx_reinit = 1; 392 if (ep->tx_channel) { 393 dma->channel_release(ep->tx_channel); 394 ep->tx_channel = NULL; 395 } 396 } 397 398 /* Clobber old pointers to this qh */ 399 musb_ep_set_qh(ep, is_in, NULL); 400 qh->hep->hcpriv = NULL; 401 402 switch (qh->type) { 403 404 case USB_ENDPOINT_XFER_CONTROL: 405 case USB_ENDPOINT_XFER_BULK: 406 /* fifo policy for these lists, except that NAKing 407 * should rotate a qh to the end (for fairness). 408 */ 409 if (qh->mux == 1) { 410 head = qh->ring.prev; 411 list_del(&qh->ring); 412 kfree(qh); 413 qh = first_qh(head); 414 break; 415 } 416 417 case USB_ENDPOINT_XFER_ISOC: 418 case USB_ENDPOINT_XFER_INT: 419 /* this is where periodic bandwidth should be 420 * de-allocated if it's tracked and allocated; 421 * and where we'd update the schedule tree... 422 */ 423 kfree(qh); 424 qh = NULL; 425 break; 426 } 427 } 428 429 if (qh != NULL && qh->is_ready) { 430 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", 431 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); 432 musb_start_urb(musb, is_in, qh); 433 } 434} 435 436static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) 437{ 438 /* we don't want fifo to fill itself again; 439 * ignore dma (various models), 440 * leave toggle alone (may not have been saved yet) 441 */ 442 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; 443 csr &= ~(MUSB_RXCSR_H_REQPKT 444 | MUSB_RXCSR_H_AUTOREQ 445 | MUSB_RXCSR_AUTOCLEAR); 446 447 /* write 2x to allow double buffering */ 448 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 449 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 450 451 /* flush writebuffer */ 452 return musb_readw(hw_ep->regs, MUSB_RXCSR); 453} 454 455/* 456 * PIO RX for a packet (or part of it). 457 */ 458static bool 459musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) 460{ 461 u16 rx_count; 462 u8 *buf; 463 u16 csr; 464 bool done = false; 465 u32 length; 466 int do_flush = 0; 467 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 468 void __iomem *epio = hw_ep->regs; 469 struct musb_qh *qh = hw_ep->in_qh; 470 int pipe = urb->pipe; 471 void *buffer = urb->transfer_buffer; 472 473 /* musb_ep_select(mbase, epnum); */ 474 rx_count = musb_readw(epio, MUSB_RXCOUNT); 475 dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, 476 urb->transfer_buffer, qh->offset, 477 urb->transfer_buffer_length); 478 479 /* unload FIFO */ 480 if (usb_pipeisoc(pipe)) { 481 int status = 0; 482 struct usb_iso_packet_descriptor *d; 483 484 if (iso_err) { 485 status = -EILSEQ; 486 urb->error_count++; 487 } 488 489 d = urb->iso_frame_desc + qh->iso_idx; 490 buf = buffer + d->offset; 491 length = d->length; 492 if (rx_count > length) { 493 if (status == 0) { 494 status = -EOVERFLOW; 495 urb->error_count++; 496 } 497 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); 498 do_flush = 1; 499 } else 500 length = rx_count; 501 urb->actual_length += length; 502 d->actual_length = length; 503 504 d->status = status; 505 506 /* see if we are done */ 507 done = (++qh->iso_idx >= urb->number_of_packets); 508 } else { 509 /* non-isoch */ 510 buf = buffer + qh->offset; 511 length = urb->transfer_buffer_length - qh->offset; 512 if (rx_count > length) { 513 if (urb->status == -EINPROGRESS) 514 urb->status = -EOVERFLOW; 515 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); 516 do_flush = 1; 517 } else 518 length = rx_count; 519 urb->actual_length += length; 520 qh->offset += length; 521 522 /* see if we are done */ 523 done = (urb->actual_length == urb->transfer_buffer_length) 524 || (rx_count < qh->maxpacket) 525 || (urb->status != -EINPROGRESS); 526 if (done 527 && (urb->status == -EINPROGRESS) 528 && (urb->transfer_flags & URB_SHORT_NOT_OK) 529 && (urb->actual_length 530 < urb->transfer_buffer_length)) 531 urb->status = -EREMOTEIO; 532 } 533 534 musb_read_fifo(hw_ep, length, buf); 535 536 csr = musb_readw(epio, MUSB_RXCSR); 537 csr |= MUSB_RXCSR_H_WZC_BITS; 538 if (unlikely(do_flush)) 539 musb_h_flush_rxfifo(hw_ep, csr); 540 else { 541 /* REVISIT this assumes AUTOCLEAR is never set */ 542 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); 543 if (!done) 544 csr |= MUSB_RXCSR_H_REQPKT; 545 musb_writew(epio, MUSB_RXCSR, csr); 546 } 547 548 return done; 549} 550 551/* we don't always need to reinit a given side of an endpoint... 552 * when we do, use tx/rx reinit routine and then construct a new CSR 553 * to address data toggle, NYET, and DMA or PIO. 554 * 555 * it's possible that driver bugs (especially for DMA) or aborting a 556 * transfer might have left the endpoint busier than it should be. 557 * the busy/not-empty tests are basically paranoia. 558 */ 559static void 560musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) 561{ 562 u16 csr; 563 564 /* NOTE: we know the "rx" fifo reinit never triggers for ep0. 565 * That always uses tx_reinit since ep0 repurposes TX register 566 * offsets; the initial SETUP packet is also a kind of OUT. 567 */ 568 569 /* if programmed for Tx, put it in RX mode */ 570 if (ep->is_shared_fifo) { 571 csr = musb_readw(ep->regs, MUSB_TXCSR); 572 if (csr & MUSB_TXCSR_MODE) { 573 musb_h_tx_flush_fifo(ep); 574 csr = musb_readw(ep->regs, MUSB_TXCSR); 575 musb_writew(ep->regs, MUSB_TXCSR, 576 csr | MUSB_TXCSR_FRCDATATOG); 577 } 578 579 /* 580 * Clear the MODE bit (and everything else) to enable Rx. 581 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. 582 */ 583 if (csr & MUSB_TXCSR_DMAMODE) 584 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); 585 musb_writew(ep->regs, MUSB_TXCSR, 0); 586 587 /* scrub all previous state, clearing toggle */ 588 } else { 589 csr = musb_readw(ep->regs, MUSB_RXCSR); 590 if (csr & MUSB_RXCSR_RXPKTRDY) 591 WARNING("rx%d, packet/%d ready?\n", ep->epnum, 592 musb_readw(ep->regs, MUSB_RXCOUNT)); 593 594 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); 595 } 596 597 /* target addr and (for multipoint) hub addr/port */ 598 if (musb->is_multipoint) { 599 musb_write_rxfunaddr(ep->target_regs, qh->addr_reg); 600 musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg); 601 musb_write_rxhubport(ep->target_regs, qh->h_port_reg); 602 603 } else 604 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); 605 606 /* protocol/endpoint, interval/NAKlimit, i/o size */ 607 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); 608 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); 609 /* NOTE: bulk combining rewrites high bits of maxpacket */ 610 /* Set RXMAXP with the FIFO size of the endpoint 611 * to disable double buffer mode. 612 */ 613 if (musb->double_buffer_not_ok) 614 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); 615 else 616 musb_writew(ep->regs, MUSB_RXMAXP, 617 qh->maxpacket | ((qh->hb_mult - 1) << 11)); 618 619 ep->rx_reinit = 0; 620} 621 622static bool musb_tx_dma_program(struct dma_controller *dma, 623 struct musb_hw_ep *hw_ep, struct musb_qh *qh, 624 struct urb *urb, u32 offset, u32 length) 625{ 626 struct dma_channel *channel = hw_ep->tx_channel; 627 void __iomem *epio = hw_ep->regs; 628 u16 pkt_size = qh->maxpacket; 629 u16 csr; 630 u8 mode; 631 632#ifdef CONFIG_USB_INVENTRA_DMA 633 if (length > channel->max_len) 634 length = channel->max_len; 635 636 csr = musb_readw(epio, MUSB_TXCSR); 637 if (length > pkt_size) { 638 mode = 1; 639 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB; 640 /* autoset shouldn't be set in high bandwidth */ 641 /* 642 * Enable Autoset according to table 643 * below 644 * bulk_split hb_mult Autoset_Enable 645 * 0 1 Yes(Normal) 646 * 0 >1 No(High BW ISO) 647 * 1 1 Yes(HS bulk) 648 * 1 >1 Yes(FS bulk) 649 */ 650 if (qh->hb_mult == 1 || (qh->hb_mult > 1 && 651 can_bulk_split(hw_ep->musb, qh->type))) 652 csr |= MUSB_TXCSR_AUTOSET; 653 } else { 654 mode = 0; 655 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); 656 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ 657 } 658 channel->desired_mode = mode; 659 musb_writew(epio, MUSB_TXCSR, csr); 660#else 661 if (!is_cppi_enabled() && !tusb_dma_omap()) 662 return false; 663 664 channel->actual_len = 0; 665 666 /* 667 * TX uses "RNDIS" mode automatically but needs help 668 * to identify the zero-length-final-packet case. 669 */ 670 mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; 671#endif 672 673 qh->segsize = length; 674 675 /* 676 * Ensure the data reaches to main memory before starting 677 * DMA transfer 678 */ 679 wmb(); 680 681 if (!dma->channel_program(channel, pkt_size, mode, 682 urb->transfer_dma + offset, length)) { 683 dma->channel_release(channel); 684 hw_ep->tx_channel = NULL; 685 686 csr = musb_readw(epio, MUSB_TXCSR); 687 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); 688 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); 689 return false; 690 } 691 return true; 692} 693 694/* 695 * Program an HDRC endpoint as per the given URB 696 * Context: irqs blocked, controller lock held 697 */ 698static void musb_ep_program(struct musb *musb, u8 epnum, 699 struct urb *urb, int is_out, 700 u8 *buf, u32 offset, u32 len) 701{ 702 struct dma_controller *dma_controller; 703 struct dma_channel *dma_channel; 704 u8 dma_ok; 705 void __iomem *mbase = musb->mregs; 706 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 707 void __iomem *epio = hw_ep->regs; 708 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); 709 u16 packet_sz = qh->maxpacket; 710 u8 use_dma = 1; 711 u16 csr; 712 713 dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s " 714 "h_addr%02x h_port%02x bytes %d\n", 715 is_out ? "-->" : "<--", 716 epnum, urb, urb->dev->speed, 717 qh->addr_reg, qh->epnum, is_out ? "out" : "in", 718 qh->h_addr_reg, qh->h_port_reg, 719 len); 720 721 musb_ep_select(mbase, epnum); 722 723 if (is_out && !len) { 724 use_dma = 0; 725 csr = musb_readw(epio, MUSB_TXCSR); 726 csr &= ~MUSB_TXCSR_DMAENAB; 727 musb_writew(epio, MUSB_TXCSR, csr); 728 hw_ep->tx_channel = NULL; 729 } 730 731 /* candidate for DMA? */ 732 dma_controller = musb->dma_controller; 733 if (use_dma && is_dma_capable() && epnum && dma_controller) { 734 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; 735 if (!dma_channel) { 736 dma_channel = dma_controller->channel_alloc( 737 dma_controller, hw_ep, is_out); 738 if (is_out) 739 hw_ep->tx_channel = dma_channel; 740 else 741 hw_ep->rx_channel = dma_channel; 742 } 743 } else 744 dma_channel = NULL; 745 746 /* make sure we clear DMAEnab, autoSet bits from previous run */ 747 748 /* OUT/transmit/EP0 or IN/receive? */ 749 if (is_out) { 750 u16 csr; 751 u16 int_txe; 752 u16 load_count; 753 754 csr = musb_readw(epio, MUSB_TXCSR); 755 756 /* disable interrupt in case we flush */ 757 int_txe = musb->intrtxe; 758 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); 759 760 /* general endpoint setup */ 761 if (epnum) { 762 /* flush all old state, set default */ 763 /* 764 * We could be flushing valid 765 * packets in double buffering 766 * case 767 */ 768 if (!hw_ep->tx_double_buffered) 769 musb_h_tx_flush_fifo(hw_ep); 770 771 /* 772 * We must not clear the DMAMODE bit before or in 773 * the same cycle with the DMAENAB bit, so we clear 774 * the latter first... 775 */ 776 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT 777 | MUSB_TXCSR_AUTOSET 778 | MUSB_TXCSR_DMAENAB 779 | MUSB_TXCSR_FRCDATATOG 780 | MUSB_TXCSR_H_RXSTALL 781 | MUSB_TXCSR_H_ERROR 782 | MUSB_TXCSR_TXPKTRDY 783 ); 784 csr |= MUSB_TXCSR_MODE; 785 786 if (!hw_ep->tx_double_buffered) { 787 if (usb_gettoggle(urb->dev, qh->epnum, 1)) 788 csr |= MUSB_TXCSR_H_WR_DATATOGGLE 789 | MUSB_TXCSR_H_DATATOGGLE; 790 else 791 csr |= MUSB_TXCSR_CLRDATATOG; 792 } 793 794 musb_writew(epio, MUSB_TXCSR, csr); 795 /* REVISIT may need to clear FLUSHFIFO ... */ 796 csr &= ~MUSB_TXCSR_DMAMODE; 797 musb_writew(epio, MUSB_TXCSR, csr); 798 csr = musb_readw(epio, MUSB_TXCSR); 799 } else { 800 /* endpoint 0: just flush */ 801 musb_h_ep0_flush_fifo(hw_ep); 802 } 803 804 /* target addr and (for multipoint) hub addr/port */ 805 if (musb->is_multipoint) { 806 musb_write_txfunaddr(mbase, epnum, qh->addr_reg); 807 musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg); 808 musb_write_txhubport(mbase, epnum, qh->h_port_reg); 809/* FIXME if !epnum, do the same for RX ... */ 810 } else 811 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); 812 813 /* protocol/endpoint/interval/NAKlimit */ 814 if (epnum) { 815 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); 816 if (musb->double_buffer_not_ok) { 817 musb_writew(epio, MUSB_TXMAXP, 818 hw_ep->max_packet_sz_tx); 819 } else if (can_bulk_split(musb, qh->type)) { 820 qh->hb_mult = hw_ep->max_packet_sz_tx 821 / packet_sz; 822 musb_writew(epio, MUSB_TXMAXP, packet_sz 823 | ((qh->hb_mult) - 1) << 11); 824 } else { 825 musb_writew(epio, MUSB_TXMAXP, 826 qh->maxpacket | 827 ((qh->hb_mult - 1) << 11)); 828 } 829 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); 830 } else { 831 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); 832 if (musb->is_multipoint) 833 musb_writeb(epio, MUSB_TYPE0, 834 qh->type_reg); 835 } 836 837 if (can_bulk_split(musb, qh->type)) 838 load_count = min((u32) hw_ep->max_packet_sz_tx, 839 len); 840 else 841 load_count = min((u32) packet_sz, len); 842 843 if (dma_channel && musb_tx_dma_program(dma_controller, 844 hw_ep, qh, urb, offset, len)) 845 load_count = 0; 846 847 if (load_count) { 848 /* PIO to load FIFO */ 849 qh->segsize = load_count; 850 if (!buf) { 851 sg_miter_start(&qh->sg_miter, urb->sg, 1, 852 SG_MITER_ATOMIC 853 | SG_MITER_FROM_SG); 854 if (!sg_miter_next(&qh->sg_miter)) { 855 dev_err(musb->controller, 856 "error: sg" 857 "list empty\n"); 858 sg_miter_stop(&qh->sg_miter); 859 goto finish; 860 } 861 buf = qh->sg_miter.addr + urb->sg->offset + 862 urb->actual_length; 863 load_count = min_t(u32, load_count, 864 qh->sg_miter.length); 865 musb_write_fifo(hw_ep, load_count, buf); 866 qh->sg_miter.consumed = load_count; 867 sg_miter_stop(&qh->sg_miter); 868 } else 869 musb_write_fifo(hw_ep, load_count, buf); 870 } 871finish: 872 /* re-enable interrupt */ 873 musb_writew(mbase, MUSB_INTRTXE, int_txe); 874 875 /* IN/receive */ 876 } else { 877 u16 csr; 878 879 if (hw_ep->rx_reinit) { 880 musb_rx_reinit(musb, qh, hw_ep); 881 882 /* init new state: toggle and NYET, maybe DMA later */ 883 if (usb_gettoggle(urb->dev, qh->epnum, 0)) 884 csr = MUSB_RXCSR_H_WR_DATATOGGLE 885 | MUSB_RXCSR_H_DATATOGGLE; 886 else 887 csr = 0; 888 if (qh->type == USB_ENDPOINT_XFER_INT) 889 csr |= MUSB_RXCSR_DISNYET; 890 891 } else { 892 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 893 894 if (csr & (MUSB_RXCSR_RXPKTRDY 895 | MUSB_RXCSR_DMAENAB 896 | MUSB_RXCSR_H_REQPKT)) 897 ERR("broken !rx_reinit, ep%d csr %04x\n", 898 hw_ep->epnum, csr); 899 900 /* scrub any stale state, leaving toggle alone */ 901 csr &= MUSB_RXCSR_DISNYET; 902 } 903 904 /* kick things off */ 905 906 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { 907 /* Candidate for DMA */ 908 dma_channel->actual_len = 0L; 909 qh->segsize = len; 910 911 /* AUTOREQ is in a DMA register */ 912 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 913 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 914 915 /* 916 * Unless caller treats short RX transfers as 917 * errors, we dare not queue multiple transfers. 918 */ 919 dma_ok = dma_controller->channel_program(dma_channel, 920 packet_sz, !(urb->transfer_flags & 921 URB_SHORT_NOT_OK), 922 urb->transfer_dma + offset, 923 qh->segsize); 924 if (!dma_ok) { 925 dma_controller->channel_release(dma_channel); 926 hw_ep->rx_channel = dma_channel = NULL; 927 } else 928 csr |= MUSB_RXCSR_DMAENAB; 929 } 930 931 csr |= MUSB_RXCSR_H_REQPKT; 932 dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr); 933 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 934 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 935 } 936} 937 938/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to 939 * the end; avoids starvation for other endpoints. 940 */ 941static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, 942 int is_in) 943{ 944 struct dma_channel *dma; 945 struct urb *urb; 946 void __iomem *mbase = musb->mregs; 947 void __iomem *epio = ep->regs; 948 struct musb_qh *cur_qh, *next_qh; 949 u16 rx_csr, tx_csr; 950 951 musb_ep_select(mbase, ep->epnum); 952 if (is_in) { 953 dma = is_dma_capable() ? ep->rx_channel : NULL; 954 955 /* clear nak timeout bit */ 956 rx_csr = musb_readw(epio, MUSB_RXCSR); 957 rx_csr |= MUSB_RXCSR_H_WZC_BITS; 958 rx_csr &= ~MUSB_RXCSR_DATAERROR; 959 musb_writew(epio, MUSB_RXCSR, rx_csr); 960 961 cur_qh = first_qh(&musb->in_bulk); 962 } else { 963 dma = is_dma_capable() ? ep->tx_channel : NULL; 964 965 /* clear nak timeout bit */ 966 tx_csr = musb_readw(epio, MUSB_TXCSR); 967 tx_csr |= MUSB_TXCSR_H_WZC_BITS; 968 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT; 969 musb_writew(epio, MUSB_TXCSR, tx_csr); 970 971 cur_qh = first_qh(&musb->out_bulk); 972 } 973 if (cur_qh) { 974 urb = next_urb(cur_qh); 975 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 976 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 977 musb->dma_controller->channel_abort(dma); 978 urb->actual_length += dma->actual_len; 979 dma->actual_len = 0L; 980 } 981 musb_save_toggle(cur_qh, is_in, urb); 982 983 if (is_in) { 984 /* move cur_qh to end of queue */ 985 list_move_tail(&cur_qh->ring, &musb->in_bulk); 986 987 /* get the next qh from musb->in_bulk */ 988 next_qh = first_qh(&musb->in_bulk); 989 990 /* set rx_reinit and schedule the next qh */ 991 ep->rx_reinit = 1; 992 } else { 993 /* move cur_qh to end of queue */ 994 list_move_tail(&cur_qh->ring, &musb->out_bulk); 995 996 /* get the next qh from musb->out_bulk */ 997 next_qh = first_qh(&musb->out_bulk); 998 999 /* set tx_reinit and schedule the next qh */ 1000 ep->tx_reinit = 1; 1001 } 1002 musb_start_urb(musb, is_in, next_qh); 1003 } 1004} 1005 1006/* 1007 * Service the default endpoint (ep0) as host. 1008 * Return true until it's time to start the status stage. 1009 */ 1010static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) 1011{ 1012 bool more = false; 1013 u8 *fifo_dest = NULL; 1014 u16 fifo_count = 0; 1015 struct musb_hw_ep *hw_ep = musb->control_ep; 1016 struct musb_qh *qh = hw_ep->in_qh; 1017 struct usb_ctrlrequest *request; 1018 1019 switch (musb->ep0_stage) { 1020 case MUSB_EP0_IN: 1021 fifo_dest = urb->transfer_buffer + urb->actual_length; 1022 fifo_count = min_t(size_t, len, urb->transfer_buffer_length - 1023 urb->actual_length); 1024 if (fifo_count < len) 1025 urb->status = -EOVERFLOW; 1026 1027 musb_read_fifo(hw_ep, fifo_count, fifo_dest); 1028 1029 urb->actual_length += fifo_count; 1030 if (len < qh->maxpacket) { 1031 /* always terminate on short read; it's 1032 * rarely reported as an error. 1033 */ 1034 } else if (urb->actual_length < 1035 urb->transfer_buffer_length) 1036 more = true; 1037 break; 1038 case MUSB_EP0_START: 1039 request = (struct usb_ctrlrequest *) urb->setup_packet; 1040 1041 if (!request->wLength) { 1042 dev_dbg(musb->controller, "start no-DATA\n"); 1043 break; 1044 } else if (request->bRequestType & USB_DIR_IN) { 1045 dev_dbg(musb->controller, "start IN-DATA\n"); 1046 musb->ep0_stage = MUSB_EP0_IN; 1047 more = true; 1048 break; 1049 } else { 1050 dev_dbg(musb->controller, "start OUT-DATA\n"); 1051 musb->ep0_stage = MUSB_EP0_OUT; 1052 more = true; 1053 } 1054 /* FALLTHROUGH */ 1055 case MUSB_EP0_OUT: 1056 fifo_count = min_t(size_t, qh->maxpacket, 1057 urb->transfer_buffer_length - 1058 urb->actual_length); 1059 if (fifo_count) { 1060 fifo_dest = (u8 *) (urb->transfer_buffer 1061 + urb->actual_length); 1062 dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n", 1063 fifo_count, 1064 (fifo_count == 1) ? "" : "s", 1065 fifo_dest); 1066 musb_write_fifo(hw_ep, fifo_count, fifo_dest); 1067 1068 urb->actual_length += fifo_count; 1069 more = true; 1070 } 1071 break; 1072 default: 1073 ERR("bogus ep0 stage %d\n", musb->ep0_stage); 1074 break; 1075 } 1076 1077 return more; 1078} 1079 1080/* 1081 * Handle default endpoint interrupt as host. Only called in IRQ time 1082 * from musb_interrupt(). 1083 * 1084 * called with controller irqlocked 1085 */ 1086irqreturn_t musb_h_ep0_irq(struct musb *musb) 1087{ 1088 struct urb *urb; 1089 u16 csr, len; 1090 int status = 0; 1091 void __iomem *mbase = musb->mregs; 1092 struct musb_hw_ep *hw_ep = musb->control_ep; 1093 void __iomem *epio = hw_ep->regs; 1094 struct musb_qh *qh = hw_ep->in_qh; 1095 bool complete = false; 1096 irqreturn_t retval = IRQ_NONE; 1097 1098 /* ep0 only has one queue, "in" */ 1099 urb = next_urb(qh); 1100 1101 musb_ep_select(mbase, 0); 1102 csr = musb_readw(epio, MUSB_CSR0); 1103 len = (csr & MUSB_CSR0_RXPKTRDY) 1104 ? musb_readb(epio, MUSB_COUNT0) 1105 : 0; 1106 1107 dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", 1108 csr, qh, len, urb, musb->ep0_stage); 1109 1110 /* if we just did status stage, we are done */ 1111 if (MUSB_EP0_STATUS == musb->ep0_stage) { 1112 retval = IRQ_HANDLED; 1113 complete = true; 1114 } 1115 1116 /* prepare status */ 1117 if (csr & MUSB_CSR0_H_RXSTALL) { 1118 dev_dbg(musb->controller, "STALLING ENDPOINT\n"); 1119 status = -EPIPE; 1120 1121 } else if (csr & MUSB_CSR0_H_ERROR) { 1122 dev_dbg(musb->controller, "no response, csr0 %04x\n", csr); 1123 status = -EPROTO; 1124 1125 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { 1126 dev_dbg(musb->controller, "control NAK timeout\n"); 1127 1128 /* NOTE: this code path would be a good place to PAUSE a 1129 * control transfer, if another one is queued, so that 1130 * ep0 is more likely to stay busy. That's already done 1131 * for bulk RX transfers. 1132 * 1133 * if (qh->ring.next != &musb->control), then 1134 * we have a candidate... NAKing is *NOT* an error 1135 */ 1136 musb_writew(epio, MUSB_CSR0, 0); 1137 retval = IRQ_HANDLED; 1138 } 1139 1140 if (status) { 1141 dev_dbg(musb->controller, "aborting\n"); 1142 retval = IRQ_HANDLED; 1143 if (urb) 1144 urb->status = status; 1145 complete = true; 1146 1147 /* use the proper sequence to abort the transfer */ 1148 if (csr & MUSB_CSR0_H_REQPKT) { 1149 csr &= ~MUSB_CSR0_H_REQPKT; 1150 musb_writew(epio, MUSB_CSR0, csr); 1151 csr &= ~MUSB_CSR0_H_NAKTIMEOUT; 1152 musb_writew(epio, MUSB_CSR0, csr); 1153 } else { 1154 musb_h_ep0_flush_fifo(hw_ep); 1155 } 1156 1157 musb_writeb(epio, MUSB_NAKLIMIT0, 0); 1158 1159 /* clear it */ 1160 musb_writew(epio, MUSB_CSR0, 0); 1161 } 1162 1163 if (unlikely(!urb)) { 1164 /* stop endpoint since we have no place for its data, this 1165 * SHOULD NEVER HAPPEN! */ 1166 ERR("no URB for end 0\n"); 1167 1168 musb_h_ep0_flush_fifo(hw_ep); 1169 goto done; 1170 } 1171 1172 if (!complete) { 1173 /* call common logic and prepare response */ 1174 if (musb_h_ep0_continue(musb, len, urb)) { 1175 /* more packets required */ 1176 csr = (MUSB_EP0_IN == musb->ep0_stage) 1177 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; 1178 } else { 1179 /* data transfer complete; perform status phase */ 1180 if (usb_pipeout(urb->pipe) 1181 || !urb->transfer_buffer_length) 1182 csr = MUSB_CSR0_H_STATUSPKT 1183 | MUSB_CSR0_H_REQPKT; 1184 else 1185 csr = MUSB_CSR0_H_STATUSPKT 1186 | MUSB_CSR0_TXPKTRDY; 1187 1188 /* flag status stage */ 1189 musb->ep0_stage = MUSB_EP0_STATUS; 1190 1191 dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr); 1192 1193 } 1194 musb_writew(epio, MUSB_CSR0, csr); 1195 retval = IRQ_HANDLED; 1196 } else 1197 musb->ep0_stage = MUSB_EP0_IDLE; 1198 1199 /* call completion handler if done */ 1200 if (complete) 1201 musb_advance_schedule(musb, urb, hw_ep, 1); 1202done: 1203 return retval; 1204} 1205 1206 1207#ifdef CONFIG_USB_INVENTRA_DMA 1208 1209/* Host side TX (OUT) using Mentor DMA works as follows: 1210 submit_urb -> 1211 - if queue was empty, Program Endpoint 1212 - ... which starts DMA to fifo in mode 1 or 0 1213 1214 DMA Isr (transfer complete) -> TxAvail() 1215 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens 1216 only in musb_cleanup_urb) 1217 - TxPktRdy has to be set in mode 0 or for 1218 short packets in mode 1. 1219*/ 1220 1221#endif 1222 1223/* Service a Tx-Available or dma completion irq for the endpoint */ 1224void musb_host_tx(struct musb *musb, u8 epnum) 1225{ 1226 int pipe; 1227 bool done = false; 1228 u16 tx_csr; 1229 size_t length = 0; 1230 size_t offset = 0; 1231 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1232 void __iomem *epio = hw_ep->regs; 1233 struct musb_qh *qh = hw_ep->out_qh; 1234 struct urb *urb = next_urb(qh); 1235 u32 status = 0; 1236 void __iomem *mbase = musb->mregs; 1237 struct dma_channel *dma; 1238 bool transfer_pending = false; 1239 static bool use_sg; 1240 1241 musb_ep_select(mbase, epnum); 1242 tx_csr = musb_readw(epio, MUSB_TXCSR); 1243 1244 /* with CPPI, DMA sometimes triggers "extra" irqs */ 1245 if (!urb) { 1246 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); 1247 return; 1248 } 1249 1250 pipe = urb->pipe; 1251 dma = is_dma_capable() ? hw_ep->tx_channel : NULL; 1252 dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, 1253 dma ? ", dma" : ""); 1254 1255 /* check for errors */ 1256 if (tx_csr & MUSB_TXCSR_H_RXSTALL) { 1257 /* dma was disabled, fifo flushed */ 1258 dev_dbg(musb->controller, "TX end %d stall\n", epnum); 1259 1260 /* stall; record URB status */ 1261 status = -EPIPE; 1262 1263 } else if (tx_csr & MUSB_TXCSR_H_ERROR) { 1264 /* (NON-ISO) dma was disabled, fifo flushed */ 1265 dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum); 1266 1267 status = -ETIMEDOUT; 1268 1269 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { 1270 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1 1271 && !list_is_singular(&musb->out_bulk)) { 1272 dev_dbg(musb->controller, 1273 "NAK timeout on TX%d ep\n", epnum); 1274 musb_bulk_nak_timeout(musb, hw_ep, 0); 1275 } else { 1276 dev_dbg(musb->controller, 1277 "TX end=%d device not responding\n", epnum); 1278 /* NOTE: this code path would be a good place to PAUSE a 1279 * transfer, if there's some other (nonperiodic) tx urb 1280 * that could use this fifo. (dma complicates it...) 1281 * That's already done for bulk RX transfers. 1282 * 1283 * if (bulk && qh->ring.next != &musb->out_bulk), then 1284 * we have a candidate... NAKing is *NOT* an error 1285 */ 1286 musb_ep_select(mbase, epnum); 1287 musb_writew(epio, MUSB_TXCSR, 1288 MUSB_TXCSR_H_WZC_BITS 1289 | MUSB_TXCSR_TXPKTRDY); 1290 } 1291 return; 1292 } 1293 1294done: 1295 if (status) { 1296 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1297 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1298 (void) musb->dma_controller->channel_abort(dma); 1299 } 1300 1301 /* do the proper sequence to abort the transfer in the 1302 * usb core; the dma engine should already be stopped. 1303 */ 1304 musb_h_tx_flush_fifo(hw_ep); 1305 tx_csr &= ~(MUSB_TXCSR_AUTOSET 1306 | MUSB_TXCSR_DMAENAB 1307 | MUSB_TXCSR_H_ERROR 1308 | MUSB_TXCSR_H_RXSTALL 1309 | MUSB_TXCSR_H_NAKTIMEOUT 1310 ); 1311 1312 musb_ep_select(mbase, epnum); 1313 musb_writew(epio, MUSB_TXCSR, tx_csr); 1314 /* REVISIT may need to clear FLUSHFIFO ... */ 1315 musb_writew(epio, MUSB_TXCSR, tx_csr); 1316 musb_writeb(epio, MUSB_TXINTERVAL, 0); 1317 1318 done = true; 1319 } 1320 1321 /* second cppi case */ 1322 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1323 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); 1324 return; 1325 } 1326 1327 if (is_dma_capable() && dma && !status) { 1328 /* 1329 * DMA has completed. But if we're using DMA mode 1 (multi 1330 * packet DMA), we need a terminal TXPKTRDY interrupt before 1331 * we can consider this transfer completed, lest we trash 1332 * its last packet when writing the next URB's data. So we 1333 * switch back to mode 0 to get that interrupt; we'll come 1334 * back here once it happens. 1335 */ 1336 if (tx_csr & MUSB_TXCSR_DMAMODE) { 1337 /* 1338 * We shouldn't clear DMAMODE with DMAENAB set; so 1339 * clear them in a safe order. That should be OK 1340 * once TXPKTRDY has been set (and I've never seen 1341 * it being 0 at this moment -- DMA interrupt latency 1342 * is significant) but if it hasn't been then we have 1343 * no choice but to stop being polite and ignore the 1344 * programmer's guide... :-) 1345 * 1346 * Note that we must write TXCSR with TXPKTRDY cleared 1347 * in order not to re-trigger the packet send (this bit 1348 * can't be cleared by CPU), and there's another caveat: 1349 * TXPKTRDY may be set shortly and then cleared in the 1350 * double-buffered FIFO mode, so we do an extra TXCSR 1351 * read for debouncing... 1352 */ 1353 tx_csr &= musb_readw(epio, MUSB_TXCSR); 1354 if (tx_csr & MUSB_TXCSR_TXPKTRDY) { 1355 tx_csr &= ~(MUSB_TXCSR_DMAENAB | 1356 MUSB_TXCSR_TXPKTRDY); 1357 musb_writew(epio, MUSB_TXCSR, 1358 tx_csr | MUSB_TXCSR_H_WZC_BITS); 1359 } 1360 tx_csr &= ~(MUSB_TXCSR_DMAMODE | 1361 MUSB_TXCSR_TXPKTRDY); 1362 musb_writew(epio, MUSB_TXCSR, 1363 tx_csr | MUSB_TXCSR_H_WZC_BITS); 1364 1365 /* 1366 * There is no guarantee that we'll get an interrupt 1367 * after clearing DMAMODE as we might have done this 1368 * too late (after TXPKTRDY was cleared by controller). 1369 * Re-read TXCSR as we have spoiled its previous value. 1370 */ 1371 tx_csr = musb_readw(epio, MUSB_TXCSR); 1372 } 1373 1374 /* 1375 * We may get here from a DMA completion or TXPKTRDY interrupt. 1376 * In any case, we must check the FIFO status here and bail out 1377 * only if the FIFO still has data -- that should prevent the 1378 * "missed" TXPKTRDY interrupts and deal with double-buffered 1379 * FIFO mode too... 1380 */ 1381 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { 1382 dev_dbg(musb->controller, "DMA complete but packet still in FIFO, " 1383 "CSR %04x\n", tx_csr); 1384 return; 1385 } 1386 } 1387 1388 if (!status || dma || usb_pipeisoc(pipe)) { 1389 if (dma) 1390 length = dma->actual_len; 1391 else 1392 length = qh->segsize; 1393 qh->offset += length; 1394 1395 if (usb_pipeisoc(pipe)) { 1396 struct usb_iso_packet_descriptor *d; 1397 1398 d = urb->iso_frame_desc + qh->iso_idx; 1399 d->actual_length = length; 1400 d->status = status; 1401 if (++qh->iso_idx >= urb->number_of_packets) { 1402 done = true; 1403 } else { 1404 d++; 1405 offset = d->offset; 1406 length = d->length; 1407 } 1408 } else if (dma && urb->transfer_buffer_length == qh->offset) { 1409 done = true; 1410 } else { 1411 /* see if we need to send more data, or ZLP */ 1412 if (qh->segsize < qh->maxpacket) 1413 done = true; 1414 else if (qh->offset == urb->transfer_buffer_length 1415 && !(urb->transfer_flags 1416 & URB_ZERO_PACKET)) 1417 done = true; 1418 if (!done) { 1419 offset = qh->offset; 1420 length = urb->transfer_buffer_length - offset; 1421 transfer_pending = true; 1422 } 1423 } 1424 } 1425 1426 /* urb->status != -EINPROGRESS means request has been faulted, 1427 * so we must abort this transfer after cleanup 1428 */ 1429 if (urb->status != -EINPROGRESS) { 1430 done = true; 1431 if (status == 0) 1432 status = urb->status; 1433 } 1434 1435 if (done) { 1436 /* set status */ 1437 urb->status = status; 1438 urb->actual_length = qh->offset; 1439 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); 1440 return; 1441 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) { 1442 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, 1443 offset, length)) { 1444 if (is_cppi_enabled() || tusb_dma_omap()) 1445 musb_h_tx_dma_start(hw_ep); 1446 return; 1447 } 1448 } else if (tx_csr & MUSB_TXCSR_DMAENAB) { 1449 dev_dbg(musb->controller, "not complete, but DMA enabled?\n"); 1450 return; 1451 } 1452 1453 /* 1454 * PIO: start next packet in this URB. 1455 * 1456 * REVISIT: some docs say that when hw_ep->tx_double_buffered, 1457 * (and presumably, FIFO is not half-full) we should write *two* 1458 * packets before updating TXCSR; other docs disagree... 1459 */ 1460 if (length > qh->maxpacket) 1461 length = qh->maxpacket; 1462 /* Unmap the buffer so that CPU can use it */ 1463 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); 1464 1465 /* 1466 * We need to map sg if the transfer_buffer is 1467 * NULL. 1468 */ 1469 if (!urb->transfer_buffer) 1470 use_sg = true; 1471 1472 if (use_sg) { 1473 /* sg_miter_start is already done in musb_ep_program */ 1474 if (!sg_miter_next(&qh->sg_miter)) { 1475 dev_err(musb->controller, "error: sg list empty\n"); 1476 sg_miter_stop(&qh->sg_miter); 1477 status = -EINVAL; 1478 goto done; 1479 } 1480 urb->transfer_buffer = qh->sg_miter.addr; 1481 length = min_t(u32, length, qh->sg_miter.length); 1482 musb_write_fifo(hw_ep, length, urb->transfer_buffer); 1483 qh->sg_miter.consumed = length; 1484 sg_miter_stop(&qh->sg_miter); 1485 } else { 1486 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); 1487 } 1488 1489 qh->segsize = length; 1490 1491 if (use_sg) { 1492 if (offset + length >= urb->transfer_buffer_length) 1493 use_sg = false; 1494 } 1495 1496 musb_ep_select(mbase, epnum); 1497 musb_writew(epio, MUSB_TXCSR, 1498 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); 1499} 1500 1501 1502#ifdef CONFIG_USB_INVENTRA_DMA 1503 1504/* Host side RX (IN) using Mentor DMA works as follows: 1505 submit_urb -> 1506 - if queue was empty, ProgramEndpoint 1507 - first IN token is sent out (by setting ReqPkt) 1508 LinuxIsr -> RxReady() 1509 /\ => first packet is received 1510 | - Set in mode 0 (DmaEnab, ~ReqPkt) 1511 | -> DMA Isr (transfer complete) -> RxReady() 1512 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) 1513 | - if urb not complete, send next IN token (ReqPkt) 1514 | | else complete urb. 1515 | | 1516 --------------------------- 1517 * 1518 * Nuances of mode 1: 1519 * For short packets, no ack (+RxPktRdy) is sent automatically 1520 * (even if AutoClear is ON) 1521 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent 1522 * automatically => major problem, as collecting the next packet becomes 1523 * difficult. Hence mode 1 is not used. 1524 * 1525 * REVISIT 1526 * All we care about at this driver level is that 1527 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; 1528 * (b) termination conditions are: short RX, or buffer full; 1529 * (c) fault modes include 1530 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. 1531 * (and that endpoint's dma queue stops immediately) 1532 * - overflow (full, PLUS more bytes in the terminal packet) 1533 * 1534 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would 1535 * thus be a great candidate for using mode 1 ... for all but the 1536 * last packet of one URB's transfer. 1537 */ 1538 1539#endif 1540 1541/* 1542 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, 1543 * and high-bandwidth IN transfer cases. 1544 */ 1545void musb_host_rx(struct musb *musb, u8 epnum) 1546{ 1547 struct urb *urb; 1548 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1549 void __iomem *epio = hw_ep->regs; 1550 struct musb_qh *qh = hw_ep->in_qh; 1551 size_t xfer_len; 1552 void __iomem *mbase = musb->mregs; 1553 int pipe; 1554 u16 rx_csr, val; 1555 bool iso_err = false; 1556 bool done = false; 1557 u32 status; 1558 struct dma_channel *dma; 1559 static bool use_sg; 1560 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; 1561 1562 musb_ep_select(mbase, epnum); 1563 1564 urb = next_urb(qh); 1565 dma = is_dma_capable() ? hw_ep->rx_channel : NULL; 1566 status = 0; 1567 xfer_len = 0; 1568 1569 rx_csr = musb_readw(epio, MUSB_RXCSR); 1570 val = rx_csr; 1571 1572 if (unlikely(!urb)) { 1573 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least 1574 * usbtest #11 (unlinks) triggers it regularly, sometimes 1575 * with fifo full. (Only with DMA??) 1576 */ 1577 dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, 1578 musb_readw(epio, MUSB_RXCOUNT)); 1579 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); 1580 return; 1581 } 1582 1583 pipe = urb->pipe; 1584 1585 dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", 1586 epnum, rx_csr, urb->actual_length, 1587 dma ? dma->actual_len : 0); 1588 1589 /* check for errors, concurrent stall & unlink is not really 1590 * handled yet! */ 1591 if (rx_csr & MUSB_RXCSR_H_RXSTALL) { 1592 dev_dbg(musb->controller, "RX end %d STALL\n", epnum); 1593 1594 /* stall; record URB status */ 1595 status = -EPIPE; 1596 1597 } else if (rx_csr & MUSB_RXCSR_H_ERROR) { 1598 dev_dbg(musb->controller, "end %d RX proto error\n", epnum); 1599 1600 status = -EPROTO; 1601 musb_writeb(epio, MUSB_RXINTERVAL, 0); 1602 1603 } else if (rx_csr & MUSB_RXCSR_DATAERROR) { 1604 1605 if (USB_ENDPOINT_XFER_ISOC != qh->type) { 1606 dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum); 1607 1608 /* NOTE: NAKing is *NOT* an error, so we want to 1609 * continue. Except ... if there's a request for 1610 * another QH, use that instead of starving it. 1611 * 1612 * Devices like Ethernet and serial adapters keep 1613 * reads posted at all times, which will starve 1614 * other devices without this logic. 1615 */ 1616 if (usb_pipebulk(urb->pipe) 1617 && qh->mux == 1 1618 && !list_is_singular(&musb->in_bulk)) { 1619 musb_bulk_nak_timeout(musb, hw_ep, 1); 1620 return; 1621 } 1622 musb_ep_select(mbase, epnum); 1623 rx_csr |= MUSB_RXCSR_H_WZC_BITS; 1624 rx_csr &= ~MUSB_RXCSR_DATAERROR; 1625 musb_writew(epio, MUSB_RXCSR, rx_csr); 1626 1627 goto finish; 1628 } else { 1629 dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum); 1630 /* packet error reported later */ 1631 iso_err = true; 1632 } 1633 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { 1634 dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n", 1635 epnum); 1636 status = -EPROTO; 1637 } 1638 1639 /* faults abort the transfer */ 1640 if (status) { 1641 /* clean up dma and collect transfer count */ 1642 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1643 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1644 (void) musb->dma_controller->channel_abort(dma); 1645 xfer_len = dma->actual_len; 1646 } 1647 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); 1648 musb_writeb(epio, MUSB_RXINTERVAL, 0); 1649 done = true; 1650 goto finish; 1651 } 1652 1653 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { 1654 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ 1655 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); 1656 goto finish; 1657 } 1658 1659 /* thorough shutdown for now ... given more precise fault handling 1660 * and better queueing support, we might keep a DMA pipeline going 1661 * while processing this irq for earlier completions. 1662 */ 1663 1664 /* FIXME this is _way_ too much in-line logic for Mentor DMA */ 1665 1666#ifndef CONFIG_USB_INVENTRA_DMA 1667 if (rx_csr & MUSB_RXCSR_H_REQPKT) { 1668 /* REVISIT this happened for a while on some short reads... 1669 * the cleanup still needs investigation... looks bad... 1670 * and also duplicates dma cleanup code above ... plus, 1671 * shouldn't this be the "half full" double buffer case? 1672 */ 1673 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1674 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1675 (void) musb->dma_controller->channel_abort(dma); 1676 xfer_len = dma->actual_len; 1677 done = true; 1678 } 1679 1680 dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, 1681 xfer_len, dma ? ", dma" : ""); 1682 rx_csr &= ~MUSB_RXCSR_H_REQPKT; 1683 1684 musb_ep_select(mbase, epnum); 1685 musb_writew(epio, MUSB_RXCSR, 1686 MUSB_RXCSR_H_WZC_BITS | rx_csr); 1687 } 1688#endif 1689 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { 1690 xfer_len = dma->actual_len; 1691 1692 val &= ~(MUSB_RXCSR_DMAENAB 1693 | MUSB_RXCSR_H_AUTOREQ 1694 | MUSB_RXCSR_AUTOCLEAR 1695 | MUSB_RXCSR_RXPKTRDY); 1696 musb_writew(hw_ep->regs, MUSB_RXCSR, val); 1697 1698#ifdef CONFIG_USB_INVENTRA_DMA 1699 if (usb_pipeisoc(pipe)) { 1700 struct usb_iso_packet_descriptor *d; 1701 1702 d = urb->iso_frame_desc + qh->iso_idx; 1703 d->actual_length = xfer_len; 1704 1705 /* even if there was an error, we did the dma 1706 * for iso_frame_desc->length 1707 */ 1708 if (d->status != -EILSEQ && d->status != -EOVERFLOW) 1709 d->status = 0; 1710 1711 if (++qh->iso_idx >= urb->number_of_packets) 1712 done = true; 1713 else 1714 done = false; 1715 1716 } else { 1717 /* done if urb buffer is full or short packet is recd */ 1718 done = (urb->actual_length + xfer_len >= 1719 urb->transfer_buffer_length 1720 || dma->actual_len < qh->maxpacket); 1721 } 1722 1723 /* send IN token for next packet, without AUTOREQ */ 1724 if (!done) { 1725 val |= MUSB_RXCSR_H_REQPKT; 1726 musb_writew(epio, MUSB_RXCSR, 1727 MUSB_RXCSR_H_WZC_BITS | val); 1728 } 1729 1730 dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, 1731 done ? "off" : "reset", 1732 musb_readw(epio, MUSB_RXCSR), 1733 musb_readw(epio, MUSB_RXCOUNT)); 1734#else 1735 done = true; 1736#endif 1737 } else if (urb->status == -EINPROGRESS) { 1738 /* if no errors, be sure a packet is ready for unloading */ 1739 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { 1740 status = -EPROTO; 1741 ERR("Rx interrupt with no errors or packet!\n"); 1742 1743 /* FIXME this is another "SHOULD NEVER HAPPEN" */ 1744 1745/* SCRUB (RX) */ 1746 /* do the proper sequence to abort the transfer */ 1747 musb_ep_select(mbase, epnum); 1748 val &= ~MUSB_RXCSR_H_REQPKT; 1749 musb_writew(epio, MUSB_RXCSR, val); 1750 goto finish; 1751 } 1752 1753 /* we are expecting IN packets */ 1754#ifdef CONFIG_USB_INVENTRA_DMA 1755 if (dma) { 1756 struct dma_controller *c; 1757 u16 rx_count; 1758 int ret, length; 1759 dma_addr_t buf; 1760 1761 rx_count = musb_readw(epio, MUSB_RXCOUNT); 1762 1763 dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n", 1764 epnum, rx_count, 1765 urb->transfer_dma 1766 + urb->actual_length, 1767 qh->offset, 1768 urb->transfer_buffer_length); 1769 1770 c = musb->dma_controller; 1771 1772 if (usb_pipeisoc(pipe)) { 1773 int d_status = 0; 1774 struct usb_iso_packet_descriptor *d; 1775 1776 d = urb->iso_frame_desc + qh->iso_idx; 1777 1778 if (iso_err) { 1779 d_status = -EILSEQ; 1780 urb->error_count++; 1781 } 1782 if (rx_count > d->length) { 1783 if (d_status == 0) { 1784 d_status = -EOVERFLOW; 1785 urb->error_count++; 1786 } 1787 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\ 1788 rx_count, d->length); 1789 1790 length = d->length; 1791 } else 1792 length = rx_count; 1793 d->status = d_status; 1794 buf = urb->transfer_dma + d->offset; 1795 } else { 1796 length = rx_count; 1797 buf = urb->transfer_dma + 1798 urb->actual_length; 1799 } 1800 1801 dma->desired_mode = 0; 1802#ifdef USE_MODE1 1803 /* because of the issue below, mode 1 will 1804 * only rarely behave with correct semantics. 1805 */ 1806 if ((urb->transfer_flags & 1807 URB_SHORT_NOT_OK) 1808 && (urb->transfer_buffer_length - 1809 urb->actual_length) 1810 > qh->maxpacket) 1811 dma->desired_mode = 1; 1812 if (rx_count < hw_ep->max_packet_sz_rx) { 1813 length = rx_count; 1814 dma->desired_mode = 0; 1815 } else { 1816 length = urb->transfer_buffer_length; 1817 } 1818#endif 1819 1820/* Disadvantage of using mode 1: 1821 * It's basically usable only for mass storage class; essentially all 1822 * other protocols also terminate transfers on short packets. 1823 * 1824 * Details: 1825 * An extra IN token is sent at the end of the transfer (due to AUTOREQ) 1826 * If you try to use mode 1 for (transfer_buffer_length - 512), and try 1827 * to use the extra IN token to grab the last packet using mode 0, then 1828 * the problem is that you cannot be sure when the device will send the 1829 * last packet and RxPktRdy set. Sometimes the packet is recd too soon 1830 * such that it gets lost when RxCSR is re-set at the end of the mode 1 1831 * transfer, while sometimes it is recd just a little late so that if you 1832 * try to configure for mode 0 soon after the mode 1 transfer is 1833 * completed, you will find rxcount 0. Okay, so you might think why not 1834 * wait for an interrupt when the pkt is recd. Well, you won't get any! 1835 */ 1836 1837 val = musb_readw(epio, MUSB_RXCSR); 1838 val &= ~MUSB_RXCSR_H_REQPKT; 1839 1840 if (dma->desired_mode == 0) 1841 val &= ~MUSB_RXCSR_H_AUTOREQ; 1842 else 1843 val |= MUSB_RXCSR_H_AUTOREQ; 1844 val |= MUSB_RXCSR_DMAENAB; 1845 1846 /* autoclear shouldn't be set in high bandwidth */ 1847 if (qh->hb_mult == 1) 1848 val |= MUSB_RXCSR_AUTOCLEAR; 1849 1850 musb_writew(epio, MUSB_RXCSR, 1851 MUSB_RXCSR_H_WZC_BITS | val); 1852 1853 /* REVISIT if when actual_length != 0, 1854 * transfer_buffer_length needs to be 1855 * adjusted first... 1856 */ 1857 ret = c->channel_program( 1858 dma, qh->maxpacket, 1859 dma->desired_mode, buf, length); 1860 1861 if (!ret) { 1862 c->channel_release(dma); 1863 hw_ep->rx_channel = NULL; 1864 dma = NULL; 1865 val = musb_readw(epio, MUSB_RXCSR); 1866 val &= ~(MUSB_RXCSR_DMAENAB 1867 | MUSB_RXCSR_H_AUTOREQ 1868 | MUSB_RXCSR_AUTOCLEAR); 1869 musb_writew(epio, MUSB_RXCSR, val); 1870 } 1871 } 1872#endif /* Mentor DMA */ 1873 1874 if (!dma) { 1875 unsigned int received_len; 1876 1877 /* Unmap the buffer so that CPU can use it */ 1878 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); 1879 1880 /* 1881 * We need to map sg if the transfer_buffer is 1882 * NULL. 1883 */ 1884 if (!urb->transfer_buffer) { 1885 use_sg = true; 1886 sg_miter_start(&qh->sg_miter, urb->sg, 1, 1887 sg_flags); 1888 } 1889 1890 if (use_sg) { 1891 if (!sg_miter_next(&qh->sg_miter)) { 1892 dev_err(musb->controller, "error: sg list empty\n"); 1893 sg_miter_stop(&qh->sg_miter); 1894 status = -EINVAL; 1895 done = true; 1896 goto finish; 1897 } 1898 urb->transfer_buffer = qh->sg_miter.addr; 1899 received_len = urb->actual_length; 1900 qh->offset = 0x0; 1901 done = musb_host_packet_rx(musb, urb, epnum, 1902 iso_err); 1903 /* Calculate the number of bytes received */ 1904 received_len = urb->actual_length - 1905 received_len; 1906 qh->sg_miter.consumed = received_len; 1907 sg_miter_stop(&qh->sg_miter); 1908 } else { 1909 done = musb_host_packet_rx(musb, urb, 1910 epnum, iso_err); 1911 } 1912 dev_dbg(musb->controller, "read %spacket\n", done ? "last " : ""); 1913 } 1914 } 1915 1916finish: 1917 urb->actual_length += xfer_len; 1918 qh->offset += xfer_len; 1919 if (done) { 1920 if (use_sg) 1921 use_sg = false; 1922 1923 if (urb->status == -EINPROGRESS) 1924 urb->status = status; 1925 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); 1926 } 1927} 1928 1929/* schedule nodes correspond to peripheral endpoints, like an OHCI QH. 1930 * the software schedule associates multiple such nodes with a given 1931 * host side hardware endpoint + direction; scheduling may activate 1932 * that hardware endpoint. 1933 */ 1934static int musb_schedule( 1935 struct musb *musb, 1936 struct musb_qh *qh, 1937 int is_in) 1938{ 1939 int idle; 1940 int best_diff; 1941 int best_end, epnum; 1942 struct musb_hw_ep *hw_ep = NULL; 1943 struct list_head *head = NULL; 1944 u8 toggle; 1945 u8 txtype; 1946 struct urb *urb = next_urb(qh); 1947 1948 /* use fixed hardware for control and bulk */ 1949 if (qh->type == USB_ENDPOINT_XFER_CONTROL) { 1950 head = &musb->control; 1951 hw_ep = musb->control_ep; 1952 goto success; 1953 } 1954 1955 /* else, periodic transfers get muxed to other endpoints */ 1956 1957 /* 1958 * We know this qh hasn't been scheduled, so all we need to do 1959 * is choose which hardware endpoint to put it on ... 1960 * 1961 * REVISIT what we really want here is a regular schedule tree 1962 * like e.g. OHCI uses. 1963 */ 1964 best_diff = 4096; 1965 best_end = -1; 1966 1967 for (epnum = 1, hw_ep = musb->endpoints + 1; 1968 epnum < musb->nr_endpoints; 1969 epnum++, hw_ep++) { 1970 int diff; 1971 1972 if (musb_ep_get_qh(hw_ep, is_in) != NULL) 1973 continue; 1974 1975 if (hw_ep == musb->bulk_ep) 1976 continue; 1977 1978 if (is_in) 1979 diff = hw_ep->max_packet_sz_rx; 1980 else 1981 diff = hw_ep->max_packet_sz_tx; 1982 diff -= (qh->maxpacket * qh->hb_mult); 1983 1984 if (diff >= 0 && best_diff > diff) { 1985 1986 /* 1987 * Mentor controller has a bug in that if we schedule 1988 * a BULK Tx transfer on an endpoint that had earlier 1989 * handled ISOC then the BULK transfer has to start on 1990 * a zero toggle. If the BULK transfer starts on a 1 1991 * toggle then this transfer will fail as the mentor 1992 * controller starts the Bulk transfer on a 0 toggle 1993 * irrespective of the programming of the toggle bits 1994 * in the TXCSR register. Check for this condition 1995 * while allocating the EP for a Tx Bulk transfer. If 1996 * so skip this EP. 1997 */ 1998 hw_ep = musb->endpoints + epnum; 1999 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in); 2000 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE) 2001 >> 4) & 0x3; 2002 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) && 2003 toggle && (txtype == USB_ENDPOINT_XFER_ISOC)) 2004 continue; 2005 2006 best_diff = diff; 2007 best_end = epnum; 2008 } 2009 } 2010 /* use bulk reserved ep1 if no other ep is free */ 2011 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { 2012 hw_ep = musb->bulk_ep; 2013 if (is_in) 2014 head = &musb->in_bulk; 2015 else 2016 head = &musb->out_bulk; 2017 2018 /* Enable bulk RX/TX NAK timeout scheme when bulk requests are 2019 * multiplexed. This scheme doen't work in high speed to full 2020 * speed scenario as NAK interrupts are not coming from a 2021 * full speed device connected to a high speed device. 2022 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and 2023 * 4 (8 frame or 8ms) for FS device. 2024 */ 2025 if (qh->dev) 2026 qh->intv_reg = 2027 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; 2028 goto success; 2029 } else if (best_end < 0) { 2030 return -ENOSPC; 2031 } 2032 2033 idle = 1; 2034 qh->mux = 0; 2035 hw_ep = musb->endpoints + best_end; 2036 dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end); 2037success: 2038 if (head) { 2039 idle = list_empty(head); 2040 list_add_tail(&qh->ring, head); 2041 qh->mux = 1; 2042 } 2043 qh->hw_ep = hw_ep; 2044 qh->hep->hcpriv = qh; 2045 if (idle) 2046 musb_start_urb(musb, is_in, qh); 2047 return 0; 2048} 2049 2050static int musb_urb_enqueue( 2051 struct usb_hcd *hcd, 2052 struct urb *urb, 2053 gfp_t mem_flags) 2054{ 2055 unsigned long flags; 2056 struct musb *musb = hcd_to_musb(hcd); 2057 struct usb_host_endpoint *hep = urb->ep; 2058 struct musb_qh *qh; 2059 struct usb_endpoint_descriptor *epd = &hep->desc; 2060 int ret; 2061 unsigned type_reg; 2062 unsigned interval; 2063 2064 /* host role must be active */ 2065 if (!is_host_active(musb) || !musb->is_active) 2066 return -ENODEV; 2067 2068 spin_lock_irqsave(&musb->lock, flags); 2069 ret = usb_hcd_link_urb_to_ep(hcd, urb); 2070 qh = ret ? NULL : hep->hcpriv; 2071 if (qh) 2072 urb->hcpriv = qh; 2073 spin_unlock_irqrestore(&musb->lock, flags); 2074 2075 /* DMA mapping was already done, if needed, and this urb is on 2076 * hep->urb_list now ... so we're done, unless hep wasn't yet 2077 * scheduled onto a live qh. 2078 * 2079 * REVISIT best to keep hep->hcpriv valid until the endpoint gets 2080 * disabled, testing for empty qh->ring and avoiding qh setup costs 2081 * except for the first urb queued after a config change. 2082 */ 2083 if (qh || ret) 2084 return ret; 2085 2086 /* Allocate and initialize qh, minimizing the work done each time 2087 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. 2088 * 2089 * REVISIT consider a dedicated qh kmem_cache, so it's harder 2090 * for bugs in other kernel code to break this driver... 2091 */ 2092 qh = kzalloc(sizeof *qh, mem_flags); 2093 if (!qh) { 2094 spin_lock_irqsave(&musb->lock, flags); 2095 usb_hcd_unlink_urb_from_ep(hcd, urb); 2096 spin_unlock_irqrestore(&musb->lock, flags); 2097 return -ENOMEM; 2098 } 2099 2100 qh->hep = hep; 2101 qh->dev = urb->dev; 2102 INIT_LIST_HEAD(&qh->ring); 2103 qh->is_ready = 1; 2104 2105 qh->maxpacket = usb_endpoint_maxp(epd); 2106 qh->type = usb_endpoint_type(epd); 2107 2108 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier. 2109 * Some musb cores don't support high bandwidth ISO transfers; and 2110 * we don't (yet!) support high bandwidth interrupt transfers. 2111 */ 2112 qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03); 2113 if (qh->hb_mult > 1) { 2114 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); 2115 2116 if (ok) 2117 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx) 2118 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx); 2119 if (!ok) { 2120 ret = -EMSGSIZE; 2121 goto done; 2122 } 2123 qh->maxpacket &= 0x7ff; 2124 } 2125 2126 qh->epnum = usb_endpoint_num(epd); 2127 2128 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ 2129 qh->addr_reg = (u8) usb_pipedevice(urb->pipe); 2130 2131 /* precompute rxtype/txtype/type0 register */ 2132 type_reg = (qh->type << 4) | qh->epnum; 2133 switch (urb->dev->speed) { 2134 case USB_SPEED_LOW: 2135 type_reg |= 0xc0; 2136 break; 2137 case USB_SPEED_FULL: 2138 type_reg |= 0x80; 2139 break; 2140 default: 2141 type_reg |= 0x40; 2142 } 2143 qh->type_reg = type_reg; 2144 2145 /* Precompute RXINTERVAL/TXINTERVAL register */ 2146 switch (qh->type) { 2147 case USB_ENDPOINT_XFER_INT: 2148 /* 2149 * Full/low speeds use the linear encoding, 2150 * high speed uses the logarithmic encoding. 2151 */ 2152 if (urb->dev->speed <= USB_SPEED_FULL) { 2153 interval = max_t(u8, epd->bInterval, 1); 2154 break; 2155 } 2156 /* FALLTHROUGH */ 2157 case USB_ENDPOINT_XFER_ISOC: 2158 /* ISO always uses logarithmic encoding */ 2159 interval = min_t(u8, epd->bInterval, 16); 2160 break; 2161 default: 2162 /* REVISIT we actually want to use NAK limits, hinting to the 2163 * transfer scheduling logic to try some other qh, e.g. try 2164 * for 2 msec first: 2165 * 2166 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; 2167 * 2168 * The downside of disabling this is that transfer scheduling 2169 * gets VERY unfair for nonperiodic transfers; a misbehaving 2170 * peripheral could make that hurt. That's perfectly normal 2171 * for reads from network or serial adapters ... so we have 2172 * partial NAKlimit support for bulk RX. 2173 * 2174 * The upside of disabling it is simpler transfer scheduling. 2175 */ 2176 interval = 0; 2177 } 2178 qh->intv_reg = interval; 2179 2180 /* precompute addressing for external hub/tt ports */ 2181 if (musb->is_multipoint) { 2182 struct usb_device *parent = urb->dev->parent; 2183 2184 if (parent != hcd->self.root_hub) { 2185 qh->h_addr_reg = (u8) parent->devnum; 2186 2187 /* set up tt info if needed */ 2188 if (urb->dev->tt) { 2189 qh->h_port_reg = (u8) urb->dev->ttport; 2190 if (urb->dev->tt->hub) 2191 qh->h_addr_reg = 2192 (u8) urb->dev->tt->hub->devnum; 2193 if (urb->dev->tt->multi) 2194 qh->h_addr_reg |= 0x80; 2195 } 2196 } 2197 } 2198 2199 /* invariant: hep->hcpriv is null OR the qh that's already scheduled. 2200 * until we get real dma queues (with an entry for each urb/buffer), 2201 * we only have work to do in the former case. 2202 */ 2203 spin_lock_irqsave(&musb->lock, flags); 2204 if (hep->hcpriv || !next_urb(qh)) { 2205 /* some concurrent activity submitted another urb to hep... 2206 * odd, rare, error prone, but legal. 2207 */ 2208 kfree(qh); 2209 qh = NULL; 2210 ret = 0; 2211 } else 2212 ret = musb_schedule(musb, qh, 2213 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); 2214 2215 if (ret == 0) { 2216 urb->hcpriv = qh; 2217 /* FIXME set urb->start_frame for iso/intr, it's tested in 2218 * musb_start_urb(), but otherwise only konicawc cares ... 2219 */ 2220 } 2221 spin_unlock_irqrestore(&musb->lock, flags); 2222 2223done: 2224 if (ret != 0) { 2225 spin_lock_irqsave(&musb->lock, flags); 2226 usb_hcd_unlink_urb_from_ep(hcd, urb); 2227 spin_unlock_irqrestore(&musb->lock, flags); 2228 kfree(qh); 2229 } 2230 return ret; 2231} 2232 2233 2234/* 2235 * abort a transfer that's at the head of a hardware queue. 2236 * called with controller locked, irqs blocked 2237 * that hardware queue advances to the next transfer, unless prevented 2238 */ 2239static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) 2240{ 2241 struct musb_hw_ep *ep = qh->hw_ep; 2242 struct musb *musb = ep->musb; 2243 void __iomem *epio = ep->regs; 2244 unsigned hw_end = ep->epnum; 2245 void __iomem *regs = ep->musb->mregs; 2246 int is_in = usb_pipein(urb->pipe); 2247 int status = 0; 2248 u16 csr; 2249 2250 musb_ep_select(regs, hw_end); 2251 2252 if (is_dma_capable()) { 2253 struct dma_channel *dma; 2254 2255 dma = is_in ? ep->rx_channel : ep->tx_channel; 2256 if (dma) { 2257 status = ep->musb->dma_controller->channel_abort(dma); 2258 dev_dbg(musb->controller, 2259 "abort %cX%d DMA for urb %p --> %d\n", 2260 is_in ? 'R' : 'T', ep->epnum, 2261 urb, status); 2262 urb->actual_length += dma->actual_len; 2263 } 2264 } 2265 2266 /* turn off DMA requests, discard state, stop polling ... */ 2267 if (ep->epnum && is_in) { 2268 /* giveback saves bulk toggle */ 2269 csr = musb_h_flush_rxfifo(ep, 0); 2270 2271 /* REVISIT we still get an irq; should likely clear the 2272 * endpoint's irq status here to avoid bogus irqs. 2273 * clearing that status is platform-specific... 2274 */ 2275 } else if (ep->epnum) { 2276 musb_h_tx_flush_fifo(ep); 2277 csr = musb_readw(epio, MUSB_TXCSR); 2278 csr &= ~(MUSB_TXCSR_AUTOSET 2279 | MUSB_TXCSR_DMAENAB 2280 | MUSB_TXCSR_H_RXSTALL 2281 | MUSB_TXCSR_H_NAKTIMEOUT 2282 | MUSB_TXCSR_H_ERROR 2283 | MUSB_TXCSR_TXPKTRDY); 2284 musb_writew(epio, MUSB_TXCSR, csr); 2285 /* REVISIT may need to clear FLUSHFIFO ... */ 2286 musb_writew(epio, MUSB_TXCSR, csr); 2287 /* flush cpu writebuffer */ 2288 csr = musb_readw(epio, MUSB_TXCSR); 2289 } else { 2290 musb_h_ep0_flush_fifo(ep); 2291 } 2292 if (status == 0) 2293 musb_advance_schedule(ep->musb, urb, ep, is_in); 2294 return status; 2295} 2296 2297static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 2298{ 2299 struct musb *musb = hcd_to_musb(hcd); 2300 struct musb_qh *qh; 2301 unsigned long flags; 2302 int is_in = usb_pipein(urb->pipe); 2303 int ret; 2304 2305 dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb, 2306 usb_pipedevice(urb->pipe), 2307 usb_pipeendpoint(urb->pipe), 2308 is_in ? "in" : "out"); 2309 2310 spin_lock_irqsave(&musb->lock, flags); 2311 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 2312 if (ret) 2313 goto done; 2314 2315 qh = urb->hcpriv; 2316 if (!qh) 2317 goto done; 2318 2319 /* 2320 * Any URB not actively programmed into endpoint hardware can be 2321 * immediately given back; that's any URB not at the head of an 2322 * endpoint queue, unless someday we get real DMA queues. And even 2323 * if it's at the head, it might not be known to the hardware... 2324 * 2325 * Otherwise abort current transfer, pending DMA, etc.; urb->status 2326 * has already been updated. This is a synchronous abort; it'd be 2327 * OK to hold off until after some IRQ, though. 2328 * 2329 * NOTE: qh is invalid unless !list_empty(&hep->urb_list) 2330 */ 2331 if (!qh->is_ready 2332 || urb->urb_list.prev != &qh->hep->urb_list 2333 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) { 2334 int ready = qh->is_ready; 2335 2336 qh->is_ready = 0; 2337 musb_giveback(musb, urb, 0); 2338 qh->is_ready = ready; 2339 2340 /* If nothing else (usually musb_giveback) is using it 2341 * and its URB list has emptied, recycle this qh. 2342 */ 2343 if (ready && list_empty(&qh->hep->urb_list)) { 2344 qh->hep->hcpriv = NULL; 2345 list_del(&qh->ring); 2346 kfree(qh); 2347 } 2348 } else 2349 ret = musb_cleanup_urb(urb, qh); 2350done: 2351 spin_unlock_irqrestore(&musb->lock, flags); 2352 return ret; 2353} 2354 2355/* disable an endpoint */ 2356static void 2357musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) 2358{ 2359 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN; 2360 unsigned long flags; 2361 struct musb *musb = hcd_to_musb(hcd); 2362 struct musb_qh *qh; 2363 struct urb *urb; 2364 2365 spin_lock_irqsave(&musb->lock, flags); 2366 2367 qh = hep->hcpriv; 2368 if (qh == NULL) 2369 goto exit; 2370 2371 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ 2372 2373 /* Kick the first URB off the hardware, if needed */ 2374 qh->is_ready = 0; 2375 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) { 2376 urb = next_urb(qh); 2377 2378 /* make software (then hardware) stop ASAP */ 2379 if (!urb->unlinked) 2380 urb->status = -ESHUTDOWN; 2381 2382 /* cleanup */ 2383 musb_cleanup_urb(urb, qh); 2384 2385 /* Then nuke all the others ... and advance the 2386 * queue on hw_ep (e.g. bulk ring) when we're done. 2387 */ 2388 while (!list_empty(&hep->urb_list)) { 2389 urb = next_urb(qh); 2390 urb->status = -ESHUTDOWN; 2391 musb_advance_schedule(musb, urb, qh->hw_ep, is_in); 2392 } 2393 } else { 2394 /* Just empty the queue; the hardware is busy with 2395 * other transfers, and since !qh->is_ready nothing 2396 * will activate any of these as it advances. 2397 */ 2398 while (!list_empty(&hep->urb_list)) 2399 musb_giveback(musb, next_urb(qh), -ESHUTDOWN); 2400 2401 hep->hcpriv = NULL; 2402 list_del(&qh->ring); 2403 kfree(qh); 2404 } 2405exit: 2406 spin_unlock_irqrestore(&musb->lock, flags); 2407} 2408 2409static int musb_h_get_frame_number(struct usb_hcd *hcd) 2410{ 2411 struct musb *musb = hcd_to_musb(hcd); 2412 2413 return musb_readw(musb->mregs, MUSB_FRAME); 2414} 2415 2416static int musb_h_start(struct usb_hcd *hcd) 2417{ 2418 struct musb *musb = hcd_to_musb(hcd); 2419 2420 /* NOTE: musb_start() is called when the hub driver turns 2421 * on port power, or when (OTG) peripheral starts. 2422 */ 2423 hcd->state = HC_STATE_RUNNING; 2424 musb->port1_status = 0; 2425 return 0; 2426} 2427 2428static void musb_h_stop(struct usb_hcd *hcd) 2429{ 2430 musb_stop(hcd_to_musb(hcd)); 2431 hcd->state = HC_STATE_HALT; 2432} 2433 2434static int musb_bus_suspend(struct usb_hcd *hcd) 2435{ 2436 struct musb *musb = hcd_to_musb(hcd); 2437 u8 devctl; 2438 2439 if (!is_host_active(musb)) 2440 return 0; 2441 2442 switch (musb->xceiv->state) { 2443 case OTG_STATE_A_SUSPEND: 2444 return 0; 2445 case OTG_STATE_A_WAIT_VRISE: 2446 /* ID could be grounded even if there's no device 2447 * on the other end of the cable. NOTE that the 2448 * A_WAIT_VRISE timers are messy with MUSB... 2449 */ 2450 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 2451 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 2452 musb->xceiv->state = OTG_STATE_A_WAIT_BCON; 2453 break; 2454 default: 2455 break; 2456 } 2457 2458 if (musb->is_active) { 2459 WARNING("trying to suspend as %s while active\n", 2460 usb_otg_state_string(musb->xceiv->state)); 2461 return -EBUSY; 2462 } else 2463 return 0; 2464} 2465 2466static int musb_bus_resume(struct usb_hcd *hcd) 2467{ 2468 /* resuming child port does the work */ 2469 return 0; 2470} 2471 2472#ifndef CONFIG_MUSB_PIO_ONLY 2473 2474#define MUSB_USB_DMA_ALIGN 4 2475 2476struct musb_temp_buffer { 2477 void *kmalloc_ptr; 2478 void *old_xfer_buffer; 2479 u8 data[0]; 2480}; 2481 2482static void musb_free_temp_buffer(struct urb *urb) 2483{ 2484 enum dma_data_direction dir; 2485 struct musb_temp_buffer *temp; 2486 2487 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) 2488 return; 2489 2490 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 2491 2492 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer, 2493 data); 2494 2495 if (dir == DMA_FROM_DEVICE) { 2496 memcpy(temp->old_xfer_buffer, temp->data, 2497 urb->transfer_buffer_length); 2498 } 2499 urb->transfer_buffer = temp->old_xfer_buffer; 2500 kfree(temp->kmalloc_ptr); 2501 2502 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; 2503} 2504 2505static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags) 2506{ 2507 enum dma_data_direction dir; 2508 struct musb_temp_buffer *temp; 2509 void *kmalloc_ptr; 2510 size_t kmalloc_size; 2511 2512 if (urb->num_sgs || urb->sg || 2513 urb->transfer_buffer_length == 0 || 2514 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1))) 2515 return 0; 2516 2517 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 2518 2519 /* Allocate a buffer with enough padding for alignment */ 2520 kmalloc_size = urb->transfer_buffer_length + 2521 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1; 2522 2523 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); 2524 if (!kmalloc_ptr) 2525 return -ENOMEM; 2526 2527 /* Position our struct temp_buffer such that data is aligned */ 2528 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN); 2529 2530 2531 temp->kmalloc_ptr = kmalloc_ptr; 2532 temp->old_xfer_buffer = urb->transfer_buffer; 2533 if (dir == DMA_TO_DEVICE) 2534 memcpy(temp->data, urb->transfer_buffer, 2535 urb->transfer_buffer_length); 2536 urb->transfer_buffer = temp->data; 2537 2538 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; 2539 2540 return 0; 2541} 2542 2543static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, 2544 gfp_t mem_flags) 2545{ 2546 struct musb *musb = hcd_to_musb(hcd); 2547 int ret; 2548 2549 /* 2550 * The DMA engine in RTL1.8 and above cannot handle 2551 * DMA addresses that are not aligned to a 4 byte boundary. 2552 * For such engine implemented (un)map_urb_for_dma hooks. 2553 * Do not use these hooks for RTL<1.8 2554 */ 2555 if (musb->hwvers < MUSB_HWVERS_1800) 2556 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); 2557 2558 ret = musb_alloc_temp_buffer(urb, mem_flags); 2559 if (ret) 2560 return ret; 2561 2562 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); 2563 if (ret) 2564 musb_free_temp_buffer(urb); 2565 2566 return ret; 2567} 2568 2569static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) 2570{ 2571 struct musb *musb = hcd_to_musb(hcd); 2572 2573 usb_hcd_unmap_urb_for_dma(hcd, urb); 2574 2575 /* Do not use this hook for RTL<1.8 (see description above) */ 2576 if (musb->hwvers < MUSB_HWVERS_1800) 2577 return; 2578 2579 musb_free_temp_buffer(urb); 2580} 2581#endif /* !CONFIG_MUSB_PIO_ONLY */ 2582 2583static const struct hc_driver musb_hc_driver = { 2584 .description = "musb-hcd", 2585 .product_desc = "MUSB HDRC host driver", 2586 .hcd_priv_size = sizeof(struct musb *), 2587 .flags = HCD_USB2 | HCD_MEMORY, 2588 2589 /* not using irq handler or reset hooks from usbcore, since 2590 * those must be shared with peripheral code for OTG configs 2591 */ 2592 2593 .start = musb_h_start, 2594 .stop = musb_h_stop, 2595 2596 .get_frame_number = musb_h_get_frame_number, 2597 2598 .urb_enqueue = musb_urb_enqueue, 2599 .urb_dequeue = musb_urb_dequeue, 2600 .endpoint_disable = musb_h_disable, 2601 2602#ifndef CONFIG_MUSB_PIO_ONLY 2603 .map_urb_for_dma = musb_map_urb_for_dma, 2604 .unmap_urb_for_dma = musb_unmap_urb_for_dma, 2605#endif 2606 2607 .hub_status_data = musb_hub_status_data, 2608 .hub_control = musb_hub_control, 2609 .bus_suspend = musb_bus_suspend, 2610 .bus_resume = musb_bus_resume, 2611 /* .start_port_reset = NULL, */ 2612 /* .hub_irq_enable = NULL, */ 2613}; 2614 2615int musb_host_alloc(struct musb *musb) 2616{ 2617 struct device *dev = musb->controller; 2618 2619 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ 2620 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev)); 2621 if (!musb->hcd) 2622 return -EINVAL; 2623 2624 *musb->hcd->hcd_priv = (unsigned long) musb; 2625 musb->hcd->self.uses_pio_for_control = 1; 2626 musb->hcd->uses_new_polling = 1; 2627 musb->hcd->has_tt = 1; 2628 2629 return 0; 2630} 2631 2632void musb_host_cleanup(struct musb *musb) 2633{ 2634 usb_remove_hcd(musb->hcd); 2635 musb->hcd = NULL; 2636} 2637 2638void musb_host_free(struct musb *musb) 2639{ 2640 usb_put_hcd(musb->hcd); 2641} 2642 2643int musb_host_setup(struct musb *musb, int power_budget) 2644{ 2645 int ret; 2646 struct usb_hcd *hcd = musb->hcd; 2647 2648 MUSB_HST_MODE(musb); 2649 musb->xceiv->otg->default_a = 1; 2650 musb->xceiv->state = OTG_STATE_A_IDLE; 2651 2652 otg_set_host(musb->xceiv->otg, &hcd->self); 2653 hcd->self.otg_port = 1; 2654 musb->xceiv->otg->host = &hcd->self; 2655 hcd->power_budget = 2 * (power_budget ? : 250); 2656 2657 ret = usb_add_hcd(hcd, 0, 0); 2658 if (ret < 0) 2659 return ret; 2660 2661 return 0; 2662} 2663 2664void musb_host_resume_root_hub(struct musb *musb) 2665{ 2666 usb_hcd_resume_root_hub(musb->hcd); 2667} 2668 2669void musb_host_poke_root_hub(struct musb *musb) 2670{ 2671 MUSB_HST_MODE(musb); 2672 if (musb->hcd->status_urb) 2673 usb_hcd_poll_rh_status(musb->hcd); 2674 else 2675 usb_hcd_resume_root_hub(musb->hcd); 2676} 2677