ehci-q.c revision 914b701280a76f96890ad63eb0fa99bf204b961c
1/* 2 * Copyright (C) 2001-2004 by David Brownell 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the 6 * Free Software Foundation; either version 2 of the License, or (at your 7 * option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software Foundation, 16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 */ 18 19/* this file is part of ehci-hcd.c */ 20 21/*-------------------------------------------------------------------------*/ 22 23/* 24 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. 25 * 26 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" 27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned 28 * buffers needed for the larger number). We use one QH per endpoint, queue 29 * multiple urbs (all three types) per endpoint. URBs may need several qtds. 30 * 31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with 32 * interrupts) needs careful scheduling. Performance improvements can be 33 * an ongoing challenge. That's in "ehci-sched.c". 34 * 35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, 36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using 37 * (b) special fields in qh entries or (c) split iso entries. TTs will 38 * buffer low/full speed data so the host collects it at high speed. 39 */ 40 41/*-------------------------------------------------------------------------*/ 42 43/* fill a qtd, returning how much of the buffer we were able to queue up */ 44 45static int 46qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, 47 size_t len, int token, int maxpacket) 48{ 49 int i, count; 50 u64 addr = buf; 51 52 /* one buffer entry per 4K ... first might be short or unaligned */ 53 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr); 54 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32)); 55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */ 56 if (likely (len < count)) /* ... iff needed */ 57 count = len; 58 else { 59 buf += 0x1000; 60 buf &= ~0x0fff; 61 62 /* per-qtd limit: from 16K to 20K (best alignment) */ 63 for (i = 1; count < len && i < 5; i++) { 64 addr = buf; 65 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr); 66 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci, 67 (u32)(addr >> 32)); 68 buf += 0x1000; 69 if ((count + 0x1000) < len) 70 count += 0x1000; 71 else 72 count = len; 73 } 74 75 /* short packets may only terminate transfers */ 76 if (count != len) 77 count -= (count % maxpacket); 78 } 79 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token); 80 qtd->length = count; 81 82 return count; 83} 84 85/*-------------------------------------------------------------------------*/ 86 87static inline void 88qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) 89{ 90 /* writes to an active overlay are unsafe */ 91 BUG_ON(qh->qh_state != QH_STATE_IDLE); 92 93 qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); 94 qh->hw_alt_next = EHCI_LIST_END(ehci); 95 96 /* Except for control endpoints, we make hardware maintain data 97 * toggle (like OHCI) ... here (re)initialize the toggle in the QH, 98 * and set the pseudo-toggle in udev. Only usb_clear_halt() will 99 * ever clear it. 100 */ 101 if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) { 102 unsigned is_out, epnum; 103 104 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8)); 105 epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f; 106 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { 107 qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); 108 usb_settoggle (qh->dev, epnum, is_out, 1); 109 } 110 } 111 112 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ 113 wmb (); 114 qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); 115} 116 117/* if it weren't for a common silicon quirk (writing the dummy into the qh 118 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault 119 * recovery (including urb dequeue) would need software changes to a QH... 120 */ 121static void 122qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) 123{ 124 struct ehci_qtd *qtd; 125 126 if (list_empty (&qh->qtd_list)) 127 qtd = qh->dummy; 128 else { 129 qtd = list_entry (qh->qtd_list.next, 130 struct ehci_qtd, qtd_list); 131 /* first qtd may already be partially processed */ 132 if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current) 133 qtd = NULL; 134 } 135 136 if (qtd) 137 qh_update (ehci, qh, qtd); 138} 139 140/*-------------------------------------------------------------------------*/ 141 142static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh); 143 144static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd, 145 struct usb_host_endpoint *ep) 146{ 147 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 148 struct ehci_qh *qh = ep->hcpriv; 149 unsigned long flags; 150 151 spin_lock_irqsave(&ehci->lock, flags); 152 qh->clearing_tt = 0; 153 if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) 154 && HC_IS_RUNNING(hcd->state)) 155 qh_link_async(ehci, qh); 156 spin_unlock_irqrestore(&ehci->lock, flags); 157} 158 159static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, 160 struct urb *urb, u32 token) 161{ 162 163 /* If an async split transaction gets an error or is unlinked, 164 * the TT buffer may be left in an indeterminate state. We 165 * have to clear the TT buffer. 166 * 167 * Note: this routine is never called for Isochronous transfers. 168 */ 169 if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { 170#ifdef DEBUG 171 struct usb_device *tt = urb->dev->tt->hub; 172 dev_dbg(&tt->dev, 173 "clear tt buffer port %d, a%d ep%d t%08x\n", 174 urb->dev->ttport, urb->dev->devnum, 175 usb_pipeendpoint(urb->pipe), token); 176#endif /* DEBUG */ 177 if (!ehci_is_TDI(ehci) 178 || urb->dev->tt->hub != 179 ehci_to_hcd(ehci)->self.root_hub) { 180 if (usb_hub_clear_tt_buffer(urb) == 0) 181 qh->clearing_tt = 1; 182 } else { 183 184 /* REVISIT ARC-derived cores don't clear the root 185 * hub TT buffer in this way... 186 */ 187 } 188 } 189} 190 191static int qtd_copy_status ( 192 struct ehci_hcd *ehci, 193 struct urb *urb, 194 size_t length, 195 u32 token 196) 197{ 198 int status = -EINPROGRESS; 199 200 /* count IN/OUT bytes, not SETUP (even short packets) */ 201 if (likely (QTD_PID (token) != 2)) 202 urb->actual_length += length - QTD_LENGTH (token); 203 204 /* don't modify error codes */ 205 if (unlikely(urb->unlinked)) 206 return status; 207 208 /* force cleanup after short read; not always an error */ 209 if (unlikely (IS_SHORT_READ (token))) 210 status = -EREMOTEIO; 211 212 /* serious "can't proceed" faults reported by the hardware */ 213 if (token & QTD_STS_HALT) { 214 if (token & QTD_STS_BABBLE) { 215 /* FIXME "must" disable babbling device's port too */ 216 status = -EOVERFLOW; 217 } else if (token & QTD_STS_MMF) { 218 /* fs/ls interrupt xfer missed the complete-split */ 219 status = -EPROTO; 220 } else if (token & QTD_STS_DBE) { 221 status = (QTD_PID (token) == 1) /* IN ? */ 222 ? -ENOSR /* hc couldn't read data */ 223 : -ECOMM; /* hc couldn't write data */ 224 } else if (token & QTD_STS_XACT) { 225 /* timeout, bad crc, wrong PID, etc; retried */ 226 if (QTD_CERR (token)) 227 status = -EPIPE; 228 else { 229 ehci_dbg (ehci, "devpath %s ep%d%s 3strikes\n", 230 urb->dev->devpath, 231 usb_pipeendpoint (urb->pipe), 232 usb_pipein (urb->pipe) ? "in" : "out"); 233 status = -EPROTO; 234 } 235 /* CERR nonzero + no errors + halt --> stall */ 236 } else if (QTD_CERR (token)) 237 status = -EPIPE; 238 else /* unknown */ 239 status = -EPROTO; 240 241 ehci_vdbg (ehci, 242 "dev%d ep%d%s qtd token %08x --> status %d\n", 243 usb_pipedevice (urb->pipe), 244 usb_pipeendpoint (urb->pipe), 245 usb_pipein (urb->pipe) ? "in" : "out", 246 token, status); 247 } 248 249 return status; 250} 251 252static void 253ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) 254__releases(ehci->lock) 255__acquires(ehci->lock) 256{ 257 if (likely (urb->hcpriv != NULL)) { 258 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; 259 260 /* S-mask in a QH means it's an interrupt urb */ 261 if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) { 262 263 /* ... update hc-wide periodic stats (for usbfs) */ 264 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; 265 } 266 qh_put (qh); 267 } 268 269 if (unlikely(urb->unlinked)) { 270 COUNT(ehci->stats.unlink); 271 } else { 272 /* report non-error and short read status as zero */ 273 if (status == -EINPROGRESS || status == -EREMOTEIO) 274 status = 0; 275 COUNT(ehci->stats.complete); 276 } 277 278#ifdef EHCI_URB_TRACE 279 ehci_dbg (ehci, 280 "%s %s urb %p ep%d%s status %d len %d/%d\n", 281 __func__, urb->dev->devpath, urb, 282 usb_pipeendpoint (urb->pipe), 283 usb_pipein (urb->pipe) ? "in" : "out", 284 status, 285 urb->actual_length, urb->transfer_buffer_length); 286#endif 287 288 /* complete() can reenter this HCD */ 289 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 290 spin_unlock (&ehci->lock); 291 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); 292 spin_lock (&ehci->lock); 293} 294 295static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); 296static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); 297 298static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh); 299static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); 300 301/* 302 * Process and free completed qtds for a qh, returning URBs to drivers. 303 * Chases up to qh->hw_current. Returns number of completions called, 304 * indicating how much "real" work we did. 305 */ 306static unsigned 307qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) 308{ 309 struct ehci_qtd *last = NULL, *end = qh->dummy; 310 struct list_head *entry, *tmp; 311 int last_status = -EINPROGRESS; 312 int stopped; 313 unsigned count = 0; 314 u8 state; 315 __le32 halt = HALT_BIT(ehci); 316 317 if (unlikely (list_empty (&qh->qtd_list))) 318 return count; 319 320 /* completions (or tasks on other cpus) must never clobber HALT 321 * till we've gone through and cleaned everything up, even when 322 * they add urbs to this qh's queue or mark them for unlinking. 323 * 324 * NOTE: unlinking expects to be done in queue order. 325 */ 326 state = qh->qh_state; 327 qh->qh_state = QH_STATE_COMPLETING; 328 stopped = (state == QH_STATE_IDLE); 329 330 /* remove de-activated QTDs from front of queue. 331 * after faults (including short reads), cleanup this urb 332 * then let the queue advance. 333 * if queue is stopped, handles unlinks. 334 */ 335 list_for_each_safe (entry, tmp, &qh->qtd_list) { 336 struct ehci_qtd *qtd; 337 struct urb *urb; 338 u32 token = 0; 339 340 qtd = list_entry (entry, struct ehci_qtd, qtd_list); 341 urb = qtd->urb; 342 343 /* clean up any state from previous QTD ...*/ 344 if (last) { 345 if (likely (last->urb != urb)) { 346 ehci_urb_done(ehci, last->urb, last_status); 347 count++; 348 last_status = -EINPROGRESS; 349 } 350 ehci_qtd_free (ehci, last); 351 last = NULL; 352 } 353 354 /* ignore urbs submitted during completions we reported */ 355 if (qtd == end) 356 break; 357 358 /* hardware copies qtd out of qh overlay */ 359 rmb (); 360 token = hc32_to_cpu(ehci, qtd->hw_token); 361 362 /* always clean up qtds the hc de-activated */ 363 retry_xacterr: 364 if ((token & QTD_STS_ACTIVE) == 0) { 365 366 /* on STALL, error, and short reads this urb must 367 * complete and all its qtds must be recycled. 368 */ 369 if ((token & QTD_STS_HALT) != 0) { 370 371 /* retry transaction errors until we 372 * reach the software xacterr limit 373 */ 374 if ((token & QTD_STS_XACT) && 375 QTD_CERR(token) == 0 && 376 --qh->xacterrs > 0 && 377 !urb->unlinked) { 378 ehci_dbg(ehci, 379 "detected XactErr len %zu/%zu retry %d\n", 380 qtd->length - QTD_LENGTH(token), qtd->length, 381 QH_XACTERR_MAX - qh->xacterrs); 382 383 /* reset the token in the qtd and the 384 * qh overlay (which still contains 385 * the qtd) so that we pick up from 386 * where we left off 387 */ 388 token &= ~QTD_STS_HALT; 389 token |= QTD_STS_ACTIVE | 390 (EHCI_TUNE_CERR << 10); 391 qtd->hw_token = cpu_to_hc32(ehci, 392 token); 393 wmb(); 394 qh->hw_token = cpu_to_hc32(ehci, token); 395 goto retry_xacterr; 396 } 397 stopped = 1; 398 399 /* magic dummy for some short reads; qh won't advance. 400 * that silicon quirk can kick in with this dummy too. 401 * 402 * other short reads won't stop the queue, including 403 * control transfers (status stage handles that) or 404 * most other single-qtd reads ... the queue stops if 405 * URB_SHORT_NOT_OK was set so the driver submitting 406 * the urbs could clean it up. 407 */ 408 } else if (IS_SHORT_READ (token) 409 && !(qtd->hw_alt_next 410 & EHCI_LIST_END(ehci))) { 411 stopped = 1; 412 goto halt; 413 } 414 415 /* stop scanning when we reach qtds the hc is using */ 416 } else if (likely (!stopped 417 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) { 418 break; 419 420 /* scan the whole queue for unlinks whenever it stops */ 421 } else { 422 stopped = 1; 423 424 /* cancel everything if we halt, suspend, etc */ 425 if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) 426 last_status = -ESHUTDOWN; 427 428 /* this qtd is active; skip it unless a previous qtd 429 * for its urb faulted, or its urb was canceled. 430 */ 431 else if (last_status == -EINPROGRESS && !urb->unlinked) 432 continue; 433 434 /* qh unlinked; token in overlay may be most current */ 435 if (state == QH_STATE_IDLE 436 && cpu_to_hc32(ehci, qtd->qtd_dma) 437 == qh->hw_current) { 438 token = hc32_to_cpu(ehci, qh->hw_token); 439 440 /* An unlink may leave an incomplete 441 * async transaction in the TT buffer. 442 * We have to clear it. 443 */ 444 ehci_clear_tt_buffer(ehci, qh, urb, token); 445 } 446 447 /* force halt for unlinked or blocked qh, so we'll 448 * patch the qh later and so that completions can't 449 * activate it while we "know" it's stopped. 450 */ 451 if ((halt & qh->hw_token) == 0) { 452halt: 453 qh->hw_token |= halt; 454 wmb (); 455 } 456 } 457 458 /* unless we already know the urb's status, collect qtd status 459 * and update count of bytes transferred. in common short read 460 * cases with only one data qtd (including control transfers), 461 * queue processing won't halt. but with two or more qtds (for 462 * example, with a 32 KB transfer), when the first qtd gets a 463 * short read the second must be removed by hand. 464 */ 465 if (last_status == -EINPROGRESS) { 466 last_status = qtd_copy_status(ehci, urb, 467 qtd->length, token); 468 if (last_status == -EREMOTEIO 469 && (qtd->hw_alt_next 470 & EHCI_LIST_END(ehci))) 471 last_status = -EINPROGRESS; 472 473 /* As part of low/full-speed endpoint-halt processing 474 * we must clear the TT buffer (11.17.5). 475 */ 476 if (unlikely(last_status != -EINPROGRESS && 477 last_status != -EREMOTEIO)) 478 ehci_clear_tt_buffer(ehci, qh, urb, token); 479 } 480 481 /* if we're removing something not at the queue head, 482 * patch the hardware queue pointer. 483 */ 484 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { 485 last = list_entry (qtd->qtd_list.prev, 486 struct ehci_qtd, qtd_list); 487 last->hw_next = qtd->hw_next; 488 } 489 490 /* remove qtd; it's recycled after possible urb completion */ 491 list_del (&qtd->qtd_list); 492 last = qtd; 493 494 /* reinit the xacterr counter for the next qtd */ 495 qh->xacterrs = QH_XACTERR_MAX; 496 } 497 498 /* last urb's completion might still need calling */ 499 if (likely (last != NULL)) { 500 ehci_urb_done(ehci, last->urb, last_status); 501 count++; 502 ehci_qtd_free (ehci, last); 503 } 504 505 /* restore original state; caller must unlink or relink */ 506 qh->qh_state = state; 507 508 /* be sure the hardware's done with the qh before refreshing 509 * it after fault cleanup, or recovering from silicon wrongly 510 * overlaying the dummy qtd (which reduces DMA chatter). 511 */ 512 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) { 513 switch (state) { 514 case QH_STATE_IDLE: 515 qh_refresh(ehci, qh); 516 break; 517 case QH_STATE_LINKED: 518 /* We won't refresh a QH that's linked (after the HC 519 * stopped the queue). That avoids a race: 520 * - HC reads first part of QH; 521 * - CPU updates that first part and the token; 522 * - HC reads rest of that QH, including token 523 * Result: HC gets an inconsistent image, and then 524 * DMAs to/from the wrong memory (corrupting it). 525 * 526 * That should be rare for interrupt transfers, 527 * except maybe high bandwidth ... 528 */ 529 if ((cpu_to_hc32(ehci, QH_SMASK) 530 & qh->hw_info2) != 0) { 531 intr_deschedule (ehci, qh); 532 (void) qh_schedule (ehci, qh); 533 } else 534 unlink_async (ehci, qh); 535 break; 536 /* otherwise, unlink already started */ 537 } 538 } 539 540 return count; 541} 542 543/*-------------------------------------------------------------------------*/ 544 545// high bandwidth multiplier, as encoded in highspeed endpoint descriptors 546#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) 547// ... and packet size, for any kind of endpoint descriptor 548#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) 549 550/* 551 * reverse of qh_urb_transaction: free a list of TDs. 552 * used for cleanup after errors, before HC sees an URB's TDs. 553 */ 554static void qtd_list_free ( 555 struct ehci_hcd *ehci, 556 struct urb *urb, 557 struct list_head *qtd_list 558) { 559 struct list_head *entry, *temp; 560 561 list_for_each_safe (entry, temp, qtd_list) { 562 struct ehci_qtd *qtd; 563 564 qtd = list_entry (entry, struct ehci_qtd, qtd_list); 565 list_del (&qtd->qtd_list); 566 ehci_qtd_free (ehci, qtd); 567 } 568} 569 570/* 571 * create a list of filled qtds for this URB; won't link into qh. 572 */ 573static struct list_head * 574qh_urb_transaction ( 575 struct ehci_hcd *ehci, 576 struct urb *urb, 577 struct list_head *head, 578 gfp_t flags 579) { 580 struct ehci_qtd *qtd, *qtd_prev; 581 dma_addr_t buf; 582 int len, maxpacket; 583 int is_input; 584 u32 token; 585 586 /* 587 * URBs map to sequences of QTDs: one logical transaction 588 */ 589 qtd = ehci_qtd_alloc (ehci, flags); 590 if (unlikely (!qtd)) 591 return NULL; 592 list_add_tail (&qtd->qtd_list, head); 593 qtd->urb = urb; 594 595 token = QTD_STS_ACTIVE; 596 token |= (EHCI_TUNE_CERR << 10); 597 /* for split transactions, SplitXState initialized to zero */ 598 599 len = urb->transfer_buffer_length; 600 is_input = usb_pipein (urb->pipe); 601 if (usb_pipecontrol (urb->pipe)) { 602 /* SETUP pid */ 603 qtd_fill(ehci, qtd, urb->setup_dma, 604 sizeof (struct usb_ctrlrequest), 605 token | (2 /* "setup" */ << 8), 8); 606 607 /* ... and always at least one more pid */ 608 token ^= QTD_TOGGLE; 609 qtd_prev = qtd; 610 qtd = ehci_qtd_alloc (ehci, flags); 611 if (unlikely (!qtd)) 612 goto cleanup; 613 qtd->urb = urb; 614 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 615 list_add_tail (&qtd->qtd_list, head); 616 617 /* for zero length DATA stages, STATUS is always IN */ 618 if (len == 0) 619 token |= (1 /* "in" */ << 8); 620 } 621 622 /* 623 * data transfer stage: buffer setup 624 */ 625 buf = urb->transfer_dma; 626 627 if (is_input) 628 token |= (1 /* "in" */ << 8); 629 /* else it's already initted to "out" pid (0 << 8) */ 630 631 maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); 632 633 /* 634 * buffer gets wrapped in one or more qtds; 635 * last one may be "short" (including zero len) 636 * and may serve as a control status ack 637 */ 638 for (;;) { 639 int this_qtd_len; 640 641 this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket); 642 len -= this_qtd_len; 643 buf += this_qtd_len; 644 645 /* 646 * short reads advance to a "magic" dummy instead of the next 647 * qtd ... that forces the queue to stop, for manual cleanup. 648 * (this will usually be overridden later.) 649 */ 650 if (is_input) 651 qtd->hw_alt_next = ehci->async->hw_alt_next; 652 653 /* qh makes control packets use qtd toggle; maybe switch it */ 654 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) 655 token ^= QTD_TOGGLE; 656 657 if (likely (len <= 0)) 658 break; 659 660 qtd_prev = qtd; 661 qtd = ehci_qtd_alloc (ehci, flags); 662 if (unlikely (!qtd)) 663 goto cleanup; 664 qtd->urb = urb; 665 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 666 list_add_tail (&qtd->qtd_list, head); 667 } 668 669 /* 670 * unless the caller requires manual cleanup after short reads, 671 * have the alt_next mechanism keep the queue running after the 672 * last data qtd (the only one, for control and most other cases). 673 */ 674 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 675 || usb_pipecontrol (urb->pipe))) 676 qtd->hw_alt_next = EHCI_LIST_END(ehci); 677 678 /* 679 * control requests may need a terminating data "status" ack; 680 * bulk ones may need a terminating short packet (zero length). 681 */ 682 if (likely (urb->transfer_buffer_length != 0)) { 683 int one_more = 0; 684 685 if (usb_pipecontrol (urb->pipe)) { 686 one_more = 1; 687 token ^= 0x0100; /* "in" <--> "out" */ 688 token |= QTD_TOGGLE; /* force DATA1 */ 689 } else if (usb_pipebulk (urb->pipe) 690 && (urb->transfer_flags & URB_ZERO_PACKET) 691 && !(urb->transfer_buffer_length % maxpacket)) { 692 one_more = 1; 693 } 694 if (one_more) { 695 qtd_prev = qtd; 696 qtd = ehci_qtd_alloc (ehci, flags); 697 if (unlikely (!qtd)) 698 goto cleanup; 699 qtd->urb = urb; 700 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 701 list_add_tail (&qtd->qtd_list, head); 702 703 /* never any data in such packets */ 704 qtd_fill(ehci, qtd, 0, 0, token, 0); 705 } 706 } 707 708 /* by default, enable interrupt on urb completion */ 709 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) 710 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); 711 return head; 712 713cleanup: 714 qtd_list_free (ehci, urb, head); 715 return NULL; 716} 717 718/*-------------------------------------------------------------------------*/ 719 720// Would be best to create all qh's from config descriptors, 721// when each interface/altsetting is established. Unlink 722// any previous qh and cancel its urbs first; endpoints are 723// implicitly reset then (data toggle too). 724// That'd mean updating how usbcore talks to HCDs. (2.7?) 725 726 727/* 728 * Each QH holds a qtd list; a QH is used for everything except iso. 729 * 730 * For interrupt urbs, the scheduler must set the microframe scheduling 731 * mask(s) each time the QH gets scheduled. For highspeed, that's 732 * just one microframe in the s-mask. For split interrupt transactions 733 * there are additional complications: c-mask, maybe FSTNs. 734 */ 735static struct ehci_qh * 736qh_make ( 737 struct ehci_hcd *ehci, 738 struct urb *urb, 739 gfp_t flags 740) { 741 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); 742 u32 info1 = 0, info2 = 0; 743 int is_input, type; 744 int maxp = 0; 745 struct usb_tt *tt = urb->dev->tt; 746 747 if (!qh) 748 return qh; 749 750 /* 751 * init endpoint/device data for this QH 752 */ 753 info1 |= usb_pipeendpoint (urb->pipe) << 8; 754 info1 |= usb_pipedevice (urb->pipe) << 0; 755 756 is_input = usb_pipein (urb->pipe); 757 type = usb_pipetype (urb->pipe); 758 maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input); 759 760 /* 1024 byte maxpacket is a hardware ceiling. High bandwidth 761 * acts like up to 3KB, but is built from smaller packets. 762 */ 763 if (max_packet(maxp) > 1024) { 764 ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp)); 765 goto done; 766 } 767 768 /* Compute interrupt scheduling parameters just once, and save. 769 * - allowing for high bandwidth, how many nsec/uframe are used? 770 * - split transactions need a second CSPLIT uframe; same question 771 * - splits also need a schedule gap (for full/low speed I/O) 772 * - qh has a polling interval 773 * 774 * For control/bulk requests, the HC or TT handles these. 775 */ 776 if (type == PIPE_INTERRUPT) { 777 qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, 778 is_input, 0, 779 hb_mult(maxp) * max_packet(maxp))); 780 qh->start = NO_FRAME; 781 782 if (urb->dev->speed == USB_SPEED_HIGH) { 783 qh->c_usecs = 0; 784 qh->gap_uf = 0; 785 786 qh->period = urb->interval >> 3; 787 if (qh->period == 0 && urb->interval != 1) { 788 /* NOTE interval 2 or 4 uframes could work. 789 * But interval 1 scheduling is simpler, and 790 * includes high bandwidth. 791 */ 792 dbg ("intr period %d uframes, NYET!", 793 urb->interval); 794 goto done; 795 } 796 } else { 797 int think_time; 798 799 /* gap is f(FS/LS transfer times) */ 800 qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, 801 is_input, 0, maxp) / (125 * 1000); 802 803 /* FIXME this just approximates SPLIT/CSPLIT times */ 804 if (is_input) { // SPLIT, gap, CSPLIT+DATA 805 qh->c_usecs = qh->usecs + HS_USECS (0); 806 qh->usecs = HS_USECS (1); 807 } else { // SPLIT+DATA, gap, CSPLIT 808 qh->usecs += HS_USECS (1); 809 qh->c_usecs = HS_USECS (0); 810 } 811 812 think_time = tt ? tt->think_time : 0; 813 qh->tt_usecs = NS_TO_US (think_time + 814 usb_calc_bus_time (urb->dev->speed, 815 is_input, 0, max_packet (maxp))); 816 qh->period = urb->interval; 817 } 818 } 819 820 /* support for tt scheduling, and access to toggles */ 821 qh->dev = urb->dev; 822 823 /* using TT? */ 824 switch (urb->dev->speed) { 825 case USB_SPEED_LOW: 826 info1 |= (1 << 12); /* EPS "low" */ 827 /* FALL THROUGH */ 828 829 case USB_SPEED_FULL: 830 /* EPS 0 means "full" */ 831 if (type != PIPE_INTERRUPT) 832 info1 |= (EHCI_TUNE_RL_TT << 28); 833 if (type == PIPE_CONTROL) { 834 info1 |= (1 << 27); /* for TT */ 835 info1 |= 1 << 14; /* toggle from qtd */ 836 } 837 info1 |= maxp << 16; 838 839 info2 |= (EHCI_TUNE_MULT_TT << 30); 840 841 /* Some Freescale processors have an erratum in which the 842 * port number in the queue head was 0..N-1 instead of 1..N. 843 */ 844 if (ehci_has_fsl_portno_bug(ehci)) 845 info2 |= (urb->dev->ttport-1) << 23; 846 else 847 info2 |= urb->dev->ttport << 23; 848 849 /* set the address of the TT; for TDI's integrated 850 * root hub tt, leave it zeroed. 851 */ 852 if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub) 853 info2 |= tt->hub->devnum << 16; 854 855 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ 856 857 break; 858 859 case USB_SPEED_HIGH: /* no TT involved */ 860 info1 |= (2 << 12); /* EPS "high" */ 861 if (type == PIPE_CONTROL) { 862 info1 |= (EHCI_TUNE_RL_HS << 28); 863 info1 |= 64 << 16; /* usb2 fixed maxpacket */ 864 info1 |= 1 << 14; /* toggle from qtd */ 865 info2 |= (EHCI_TUNE_MULT_HS << 30); 866 } else if (type == PIPE_BULK) { 867 info1 |= (EHCI_TUNE_RL_HS << 28); 868 /* The USB spec says that high speed bulk endpoints 869 * always use 512 byte maxpacket. But some device 870 * vendors decided to ignore that, and MSFT is happy 871 * to help them do so. So now people expect to use 872 * such nonconformant devices with Linux too; sigh. 873 */ 874 info1 |= max_packet(maxp) << 16; 875 info2 |= (EHCI_TUNE_MULT_HS << 30); 876 } else { /* PIPE_INTERRUPT */ 877 info1 |= max_packet (maxp) << 16; 878 info2 |= hb_mult (maxp) << 30; 879 } 880 break; 881 default: 882 dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed); 883done: 884 qh_put (qh); 885 return NULL; 886 } 887 888 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ 889 890 /* init as live, toggle clear, advance to dummy */ 891 qh->qh_state = QH_STATE_IDLE; 892 qh->hw_info1 = cpu_to_hc32(ehci, info1); 893 qh->hw_info2 = cpu_to_hc32(ehci, info2); 894 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); 895 qh_refresh (ehci, qh); 896 return qh; 897} 898 899/*-------------------------------------------------------------------------*/ 900 901/* move qh (and its qtds) onto async queue; maybe enable queue. */ 902 903static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 904{ 905 __hc32 dma = QH_NEXT(ehci, qh->qh_dma); 906 struct ehci_qh *head; 907 908 /* Don't link a QH if there's a Clear-TT-Buffer pending */ 909 if (unlikely(qh->clearing_tt)) 910 return; 911 912 /* (re)start the async schedule? */ 913 head = ehci->async; 914 timer_action_done (ehci, TIMER_ASYNC_OFF); 915 if (!head->qh_next.qh) { 916 u32 cmd = ehci_readl(ehci, &ehci->regs->command); 917 918 if (!(cmd & CMD_ASE)) { 919 /* in case a clear of CMD_ASE didn't take yet */ 920 (void)handshake(ehci, &ehci->regs->status, 921 STS_ASS, 0, 150); 922 cmd |= CMD_ASE | CMD_RUN; 923 ehci_writel(ehci, cmd, &ehci->regs->command); 924 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; 925 /* posted write need not be known to HC yet ... */ 926 } 927 } 928 929 /* clear halt and/or toggle; and maybe recover from silicon quirk */ 930 if (qh->qh_state == QH_STATE_IDLE) 931 qh_refresh (ehci, qh); 932 933 /* splice right after start */ 934 qh->qh_next = head->qh_next; 935 qh->hw_next = head->hw_next; 936 wmb (); 937 938 head->qh_next.qh = qh; 939 head->hw_next = dma; 940 941 qh->xacterrs = QH_XACTERR_MAX; 942 qh->qh_state = QH_STATE_LINKED; 943 /* qtd completions reported later by interrupt */ 944} 945 946/*-------------------------------------------------------------------------*/ 947 948/* 949 * For control/bulk/interrupt, return QH with these TDs appended. 950 * Allocates and initializes the QH if necessary. 951 * Returns null if it can't allocate a QH it needs to. 952 * If the QH has TDs (urbs) already, that's great. 953 */ 954static struct ehci_qh *qh_append_tds ( 955 struct ehci_hcd *ehci, 956 struct urb *urb, 957 struct list_head *qtd_list, 958 int epnum, 959 void **ptr 960) 961{ 962 struct ehci_qh *qh = NULL; 963 __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f); 964 965 qh = (struct ehci_qh *) *ptr; 966 if (unlikely (qh == NULL)) { 967 /* can't sleep here, we have ehci->lock... */ 968 qh = qh_make (ehci, urb, GFP_ATOMIC); 969 *ptr = qh; 970 } 971 if (likely (qh != NULL)) { 972 struct ehci_qtd *qtd; 973 974 if (unlikely (list_empty (qtd_list))) 975 qtd = NULL; 976 else 977 qtd = list_entry (qtd_list->next, struct ehci_qtd, 978 qtd_list); 979 980 /* control qh may need patching ... */ 981 if (unlikely (epnum == 0)) { 982 983 /* usb_reset_device() briefly reverts to address 0 */ 984 if (usb_pipedevice (urb->pipe) == 0) 985 qh->hw_info1 &= ~qh_addr_mask; 986 } 987 988 /* just one way to queue requests: swap with the dummy qtd. 989 * only hc or qh_refresh() ever modify the overlay. 990 */ 991 if (likely (qtd != NULL)) { 992 struct ehci_qtd *dummy; 993 dma_addr_t dma; 994 __hc32 token; 995 996 /* to avoid racing the HC, use the dummy td instead of 997 * the first td of our list (becomes new dummy). both 998 * tds stay deactivated until we're done, when the 999 * HC is allowed to fetch the old dummy (4.10.2). 1000 */ 1001 token = qtd->hw_token; 1002 qtd->hw_token = HALT_BIT(ehci); 1003 wmb (); 1004 dummy = qh->dummy; 1005 1006 dma = dummy->qtd_dma; 1007 *dummy = *qtd; 1008 dummy->qtd_dma = dma; 1009 1010 list_del (&qtd->qtd_list); 1011 list_add (&dummy->qtd_list, qtd_list); 1012 list_splice_tail(qtd_list, &qh->qtd_list); 1013 1014 ehci_qtd_init(ehci, qtd, qtd->qtd_dma); 1015 qh->dummy = qtd; 1016 1017 /* hc must see the new dummy at list end */ 1018 dma = qtd->qtd_dma; 1019 qtd = list_entry (qh->qtd_list.prev, 1020 struct ehci_qtd, qtd_list); 1021 qtd->hw_next = QTD_NEXT(ehci, dma); 1022 1023 /* let the hc process these next qtds */ 1024 wmb (); 1025 dummy->hw_token = token; 1026 1027 urb->hcpriv = qh_get (qh); 1028 } 1029 } 1030 return qh; 1031} 1032 1033/*-------------------------------------------------------------------------*/ 1034 1035static int 1036submit_async ( 1037 struct ehci_hcd *ehci, 1038 struct urb *urb, 1039 struct list_head *qtd_list, 1040 gfp_t mem_flags 1041) { 1042 struct ehci_qtd *qtd; 1043 int epnum; 1044 unsigned long flags; 1045 struct ehci_qh *qh = NULL; 1046 int rc; 1047 1048 qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list); 1049 epnum = urb->ep->desc.bEndpointAddress; 1050 1051#ifdef EHCI_URB_TRACE 1052 ehci_dbg (ehci, 1053 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", 1054 __func__, urb->dev->devpath, urb, 1055 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", 1056 urb->transfer_buffer_length, 1057 qtd, urb->ep->hcpriv); 1058#endif 1059 1060 spin_lock_irqsave (&ehci->lock, flags); 1061 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, 1062 &ehci_to_hcd(ehci)->flags))) { 1063 rc = -ESHUTDOWN; 1064 goto done; 1065 } 1066 rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); 1067 if (unlikely(rc)) 1068 goto done; 1069 1070 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); 1071 if (unlikely(qh == NULL)) { 1072 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 1073 rc = -ENOMEM; 1074 goto done; 1075 } 1076 1077 /* Control/bulk operations through TTs don't need scheduling, 1078 * the HC and TT handle it when the TT has a buffer ready. 1079 */ 1080 if (likely (qh->qh_state == QH_STATE_IDLE)) 1081 qh_link_async (ehci, qh_get (qh)); 1082 done: 1083 spin_unlock_irqrestore (&ehci->lock, flags); 1084 if (unlikely (qh == NULL)) 1085 qtd_list_free (ehci, urb, qtd_list); 1086 return rc; 1087} 1088 1089/*-------------------------------------------------------------------------*/ 1090 1091/* the async qh for the qtds being reclaimed are now unlinked from the HC */ 1092 1093static void end_unlink_async (struct ehci_hcd *ehci) 1094{ 1095 struct ehci_qh *qh = ehci->reclaim; 1096 struct ehci_qh *next; 1097 1098 iaa_watchdog_done(ehci); 1099 1100 // qh->hw_next = cpu_to_hc32(qh->qh_dma); 1101 qh->qh_state = QH_STATE_IDLE; 1102 qh->qh_next.qh = NULL; 1103 qh_put (qh); // refcount from reclaim 1104 1105 /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */ 1106 next = qh->reclaim; 1107 ehci->reclaim = next; 1108 qh->reclaim = NULL; 1109 1110 qh_completions (ehci, qh); 1111 1112 if (!list_empty (&qh->qtd_list) 1113 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 1114 qh_link_async (ehci, qh); 1115 else { 1116 qh_put (qh); // refcount from async list 1117 1118 /* it's not free to turn the async schedule on/off; leave it 1119 * active but idle for a while once it empties. 1120 */ 1121 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) 1122 && ehci->async->qh_next.qh == NULL) 1123 timer_action (ehci, TIMER_ASYNC_OFF); 1124 } 1125 1126 if (next) { 1127 ehci->reclaim = NULL; 1128 start_unlink_async (ehci, next); 1129 } 1130} 1131 1132/* makes sure the async qh will become idle */ 1133/* caller must own ehci->lock */ 1134 1135static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 1136{ 1137 int cmd = ehci_readl(ehci, &ehci->regs->command); 1138 struct ehci_qh *prev; 1139 1140#ifdef DEBUG 1141 assert_spin_locked(&ehci->lock); 1142 if (ehci->reclaim 1143 || (qh->qh_state != QH_STATE_LINKED 1144 && qh->qh_state != QH_STATE_UNLINK_WAIT) 1145 ) 1146 BUG (); 1147#endif 1148 1149 /* stop async schedule right now? */ 1150 if (unlikely (qh == ehci->async)) { 1151 /* can't get here without STS_ASS set */ 1152 if (ehci_to_hcd(ehci)->state != HC_STATE_HALT 1153 && !ehci->reclaim) { 1154 /* ... and CMD_IAAD clear */ 1155 ehci_writel(ehci, cmd & ~CMD_ASE, 1156 &ehci->regs->command); 1157 wmb (); 1158 // handshake later, if we need to 1159 timer_action_done (ehci, TIMER_ASYNC_OFF); 1160 } 1161 return; 1162 } 1163 1164 qh->qh_state = QH_STATE_UNLINK; 1165 ehci->reclaim = qh = qh_get (qh); 1166 1167 prev = ehci->async; 1168 while (prev->qh_next.qh != qh) 1169 prev = prev->qh_next.qh; 1170 1171 prev->hw_next = qh->hw_next; 1172 prev->qh_next = qh->qh_next; 1173 wmb (); 1174 1175 /* If the controller isn't running, we don't have to wait for it */ 1176 if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))) { 1177 /* if (unlikely (qh->reclaim != 0)) 1178 * this will recurse, probably not much 1179 */ 1180 end_unlink_async (ehci); 1181 return; 1182 } 1183 1184 cmd |= CMD_IAAD; 1185 ehci_writel(ehci, cmd, &ehci->regs->command); 1186 (void)ehci_readl(ehci, &ehci->regs->command); 1187 iaa_watchdog_start(ehci); 1188} 1189 1190/*-------------------------------------------------------------------------*/ 1191 1192static void scan_async (struct ehci_hcd *ehci) 1193{ 1194 struct ehci_qh *qh; 1195 enum ehci_timer_action action = TIMER_IO_WATCHDOG; 1196 1197 ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index); 1198 timer_action_done (ehci, TIMER_ASYNC_SHRINK); 1199rescan: 1200 qh = ehci->async->qh_next.qh; 1201 if (likely (qh != NULL)) { 1202 do { 1203 /* clean any finished work for this qh */ 1204 if (!list_empty (&qh->qtd_list) 1205 && qh->stamp != ehci->stamp) { 1206 int temp; 1207 1208 /* unlinks could happen here; completion 1209 * reporting drops the lock. rescan using 1210 * the latest schedule, but don't rescan 1211 * qhs we already finished (no looping). 1212 */ 1213 qh = qh_get (qh); 1214 qh->stamp = ehci->stamp; 1215 temp = qh_completions (ehci, qh); 1216 qh_put (qh); 1217 if (temp != 0) { 1218 goto rescan; 1219 } 1220 } 1221 1222 /* unlink idle entries, reducing DMA usage as well 1223 * as HCD schedule-scanning costs. delay for any qh 1224 * we just scanned, there's a not-unusual case that it 1225 * doesn't stay idle for long. 1226 * (plus, avoids some kind of re-activation race.) 1227 */ 1228 if (list_empty(&qh->qtd_list) 1229 && qh->qh_state == QH_STATE_LINKED) { 1230 if (!ehci->reclaim 1231 && ((ehci->stamp - qh->stamp) & 0x1fff) 1232 >= (EHCI_SHRINK_FRAMES * 8)) 1233 start_unlink_async(ehci, qh); 1234 else 1235 action = TIMER_ASYNC_SHRINK; 1236 } 1237 1238 qh = qh->qh_next.qh; 1239 } while (qh); 1240 } 1241 if (action == TIMER_ASYNC_SHRINK) 1242 timer_action (ehci, TIMER_ASYNC_SHRINK); 1243} 1244