ehci-q.c revision 004c19682884d4f40000ce1ded53f4a1d0b18206
1/* 2 * Copyright (C) 2001-2004 by David Brownell 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the 6 * Free Software Foundation; either version 2 of the License, or (at your 7 * option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software Foundation, 16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 */ 18 19/* this file is part of ehci-hcd.c */ 20 21/*-------------------------------------------------------------------------*/ 22 23/* 24 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. 25 * 26 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" 27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned 28 * buffers needed for the larger number). We use one QH per endpoint, queue 29 * multiple urbs (all three types) per endpoint. URBs may need several qtds. 30 * 31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with 32 * interrupts) needs careful scheduling. Performance improvements can be 33 * an ongoing challenge. That's in "ehci-sched.c". 34 * 35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, 36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using 37 * (b) special fields in qh entries or (c) split iso entries. TTs will 38 * buffer low/full speed data so the host collects it at high speed. 39 */ 40 41/*-------------------------------------------------------------------------*/ 42 43/* fill a qtd, returning how much of the buffer we were able to queue up */ 44 45static int 46qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, 47 size_t len, int token, int maxpacket) 48{ 49 int i, count; 50 u64 addr = buf; 51 52 /* one buffer entry per 4K ... first might be short or unaligned */ 53 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr); 54 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32)); 55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */ 56 if (likely (len < count)) /* ... iff needed */ 57 count = len; 58 else { 59 buf += 0x1000; 60 buf &= ~0x0fff; 61 62 /* per-qtd limit: from 16K to 20K (best alignment) */ 63 for (i = 1; count < len && i < 5; i++) { 64 addr = buf; 65 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr); 66 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci, 67 (u32)(addr >> 32)); 68 buf += 0x1000; 69 if ((count + 0x1000) < len) 70 count += 0x1000; 71 else 72 count = len; 73 } 74 75 /* short packets may only terminate transfers */ 76 if (count != len) 77 count -= (count % maxpacket); 78 } 79 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token); 80 qtd->length = count; 81 82 return count; 83} 84 85/*-------------------------------------------------------------------------*/ 86 87static inline void 88qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) 89{ 90 struct ehci_qh_hw *hw = qh->hw; 91 92 /* writes to an active overlay are unsafe */ 93 BUG_ON(qh->qh_state != QH_STATE_IDLE); 94 95 hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); 96 hw->hw_alt_next = EHCI_LIST_END(ehci); 97 98 /* Except for control endpoints, we make hardware maintain data 99 * toggle (like OHCI) ... here (re)initialize the toggle in the QH, 100 * and set the pseudo-toggle in udev. Only usb_clear_halt() will 101 * ever clear it. 102 */ 103 if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) { 104 unsigned is_out, epnum; 105 106 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8)); 107 epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; 108 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { 109 hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); 110 usb_settoggle (qh->dev, epnum, is_out, 1); 111 } 112 } 113 114 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ 115 wmb (); 116 hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); 117} 118 119/* if it weren't for a common silicon quirk (writing the dummy into the qh 120 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault 121 * recovery (including urb dequeue) would need software changes to a QH... 122 */ 123static void 124qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) 125{ 126 struct ehci_qtd *qtd; 127 128 if (list_empty (&qh->qtd_list)) 129 qtd = qh->dummy; 130 else { 131 qtd = list_entry (qh->qtd_list.next, 132 struct ehci_qtd, qtd_list); 133 /* first qtd may already be partially processed */ 134 if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) 135 qtd = NULL; 136 } 137 138 if (qtd) 139 qh_update (ehci, qh, qtd); 140} 141 142/*-------------------------------------------------------------------------*/ 143 144static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh); 145 146static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd, 147 struct usb_host_endpoint *ep) 148{ 149 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 150 struct ehci_qh *qh = ep->hcpriv; 151 unsigned long flags; 152 153 spin_lock_irqsave(&ehci->lock, flags); 154 qh->clearing_tt = 0; 155 if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) 156 && HC_IS_RUNNING(hcd->state)) 157 qh_link_async(ehci, qh); 158 spin_unlock_irqrestore(&ehci->lock, flags); 159} 160 161static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, 162 struct urb *urb, u32 token) 163{ 164 165 /* If an async split transaction gets an error or is unlinked, 166 * the TT buffer may be left in an indeterminate state. We 167 * have to clear the TT buffer. 168 * 169 * Note: this routine is never called for Isochronous transfers. 170 */ 171 if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { 172#ifdef DEBUG 173 struct usb_device *tt = urb->dev->tt->hub; 174 dev_dbg(&tt->dev, 175 "clear tt buffer port %d, a%d ep%d t%08x\n", 176 urb->dev->ttport, urb->dev->devnum, 177 usb_pipeendpoint(urb->pipe), token); 178#endif /* DEBUG */ 179 if (!ehci_is_TDI(ehci) 180 || urb->dev->tt->hub != 181 ehci_to_hcd(ehci)->self.root_hub) { 182 if (usb_hub_clear_tt_buffer(urb) == 0) 183 qh->clearing_tt = 1; 184 } else { 185 186 /* REVISIT ARC-derived cores don't clear the root 187 * hub TT buffer in this way... 188 */ 189 } 190 } 191} 192 193static int qtd_copy_status ( 194 struct ehci_hcd *ehci, 195 struct urb *urb, 196 size_t length, 197 u32 token 198) 199{ 200 int status = -EINPROGRESS; 201 202 /* count IN/OUT bytes, not SETUP (even short packets) */ 203 if (likely (QTD_PID (token) != 2)) 204 urb->actual_length += length - QTD_LENGTH (token); 205 206 /* don't modify error codes */ 207 if (unlikely(urb->unlinked)) 208 return status; 209 210 /* force cleanup after short read; not always an error */ 211 if (unlikely (IS_SHORT_READ (token))) 212 status = -EREMOTEIO; 213 214 /* serious "can't proceed" faults reported by the hardware */ 215 if (token & QTD_STS_HALT) { 216 if (token & QTD_STS_BABBLE) { 217 /* FIXME "must" disable babbling device's port too */ 218 status = -EOVERFLOW; 219 /* CERR nonzero + halt --> stall */ 220 } else if (QTD_CERR(token)) { 221 status = -EPIPE; 222 223 /* In theory, more than one of the following bits can be set 224 * since they are sticky and the transaction is retried. 225 * Which to test first is rather arbitrary. 226 */ 227 } else if (token & QTD_STS_MMF) { 228 /* fs/ls interrupt xfer missed the complete-split */ 229 status = -EPROTO; 230 } else if (token & QTD_STS_DBE) { 231 status = (QTD_PID (token) == 1) /* IN ? */ 232 ? -ENOSR /* hc couldn't read data */ 233 : -ECOMM; /* hc couldn't write data */ 234 } else if (token & QTD_STS_XACT) { 235 /* timeout, bad CRC, wrong PID, etc */ 236 ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n", 237 urb->dev->devpath, 238 usb_pipeendpoint(urb->pipe), 239 usb_pipein(urb->pipe) ? "in" : "out"); 240 status = -EPROTO; 241 } else { /* unknown */ 242 status = -EPROTO; 243 } 244 245 ehci_vdbg (ehci, 246 "dev%d ep%d%s qtd token %08x --> status %d\n", 247 usb_pipedevice (urb->pipe), 248 usb_pipeendpoint (urb->pipe), 249 usb_pipein (urb->pipe) ? "in" : "out", 250 token, status); 251 } 252 253 return status; 254} 255 256static void 257ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) 258__releases(ehci->lock) 259__acquires(ehci->lock) 260{ 261 if (likely (urb->hcpriv != NULL)) { 262 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; 263 264 /* S-mask in a QH means it's an interrupt urb */ 265 if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) { 266 267 /* ... update hc-wide periodic stats (for usbfs) */ 268 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; 269 } 270 qh_put (qh); 271 } 272 273 if (unlikely(urb->unlinked)) { 274 COUNT(ehci->stats.unlink); 275 } else { 276 /* report non-error and short read status as zero */ 277 if (status == -EINPROGRESS || status == -EREMOTEIO) 278 status = 0; 279 COUNT(ehci->stats.complete); 280 } 281 282#ifdef EHCI_URB_TRACE 283 ehci_dbg (ehci, 284 "%s %s urb %p ep%d%s status %d len %d/%d\n", 285 __func__, urb->dev->devpath, urb, 286 usb_pipeendpoint (urb->pipe), 287 usb_pipein (urb->pipe) ? "in" : "out", 288 status, 289 urb->actual_length, urb->transfer_buffer_length); 290#endif 291 292 /* complete() can reenter this HCD */ 293 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 294 spin_unlock (&ehci->lock); 295 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); 296 spin_lock (&ehci->lock); 297} 298 299static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); 300static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); 301 302static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); 303 304/* 305 * Process and free completed qtds for a qh, returning URBs to drivers. 306 * Chases up to qh->hw_current. Returns number of completions called, 307 * indicating how much "real" work we did. 308 */ 309static unsigned 310qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) 311{ 312 struct ehci_qtd *last, *end = qh->dummy; 313 struct list_head *entry, *tmp; 314 int last_status; 315 int stopped; 316 unsigned count = 0; 317 u8 state; 318 struct ehci_qh_hw *hw = qh->hw; 319 320 if (unlikely (list_empty (&qh->qtd_list))) 321 return count; 322 323 /* completions (or tasks on other cpus) must never clobber HALT 324 * till we've gone through and cleaned everything up, even when 325 * they add urbs to this qh's queue or mark them for unlinking. 326 * 327 * NOTE: unlinking expects to be done in queue order. 328 * 329 * It's a bug for qh->qh_state to be anything other than 330 * QH_STATE_IDLE, unless our caller is scan_async() or 331 * scan_periodic(). 332 */ 333 state = qh->qh_state; 334 qh->qh_state = QH_STATE_COMPLETING; 335 stopped = (state == QH_STATE_IDLE); 336 337 rescan: 338 last = NULL; 339 last_status = -EINPROGRESS; 340 qh->needs_rescan = 0; 341 342 /* remove de-activated QTDs from front of queue. 343 * after faults (including short reads), cleanup this urb 344 * then let the queue advance. 345 * if queue is stopped, handles unlinks. 346 */ 347 list_for_each_safe (entry, tmp, &qh->qtd_list) { 348 struct ehci_qtd *qtd; 349 struct urb *urb; 350 u32 token = 0; 351 352 qtd = list_entry (entry, struct ehci_qtd, qtd_list); 353 urb = qtd->urb; 354 355 /* clean up any state from previous QTD ...*/ 356 if (last) { 357 if (likely (last->urb != urb)) { 358 ehci_urb_done(ehci, last->urb, last_status); 359 count++; 360 last_status = -EINPROGRESS; 361 } 362 ehci_qtd_free (ehci, last); 363 last = NULL; 364 } 365 366 /* ignore urbs submitted during completions we reported */ 367 if (qtd == end) 368 break; 369 370 /* hardware copies qtd out of qh overlay */ 371 rmb (); 372 token = hc32_to_cpu(ehci, qtd->hw_token); 373 374 /* always clean up qtds the hc de-activated */ 375 retry_xacterr: 376 if ((token & QTD_STS_ACTIVE) == 0) { 377 378 /* on STALL, error, and short reads this urb must 379 * complete and all its qtds must be recycled. 380 */ 381 if ((token & QTD_STS_HALT) != 0) { 382 383 /* retry transaction errors until we 384 * reach the software xacterr limit 385 */ 386 if ((token & QTD_STS_XACT) && 387 QTD_CERR(token) == 0 && 388 ++qh->xacterrs < QH_XACTERR_MAX && 389 !urb->unlinked) { 390 ehci_dbg(ehci, 391 "detected XactErr len %zu/%zu retry %d\n", 392 qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); 393 394 /* reset the token in the qtd and the 395 * qh overlay (which still contains 396 * the qtd) so that we pick up from 397 * where we left off 398 */ 399 token &= ~QTD_STS_HALT; 400 token |= QTD_STS_ACTIVE | 401 (EHCI_TUNE_CERR << 10); 402 qtd->hw_token = cpu_to_hc32(ehci, 403 token); 404 wmb(); 405 hw->hw_token = cpu_to_hc32(ehci, 406 token); 407 goto retry_xacterr; 408 } 409 stopped = 1; 410 411 /* magic dummy for some short reads; qh won't advance. 412 * that silicon quirk can kick in with this dummy too. 413 * 414 * other short reads won't stop the queue, including 415 * control transfers (status stage handles that) or 416 * most other single-qtd reads ... the queue stops if 417 * URB_SHORT_NOT_OK was set so the driver submitting 418 * the urbs could clean it up. 419 */ 420 } else if (IS_SHORT_READ (token) 421 && !(qtd->hw_alt_next 422 & EHCI_LIST_END(ehci))) { 423 stopped = 1; 424 } 425 426 /* stop scanning when we reach qtds the hc is using */ 427 } else if (likely (!stopped 428 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) { 429 break; 430 431 /* scan the whole queue for unlinks whenever it stops */ 432 } else { 433 stopped = 1; 434 435 /* cancel everything if we halt, suspend, etc */ 436 if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) 437 last_status = -ESHUTDOWN; 438 439 /* this qtd is active; skip it unless a previous qtd 440 * for its urb faulted, or its urb was canceled. 441 */ 442 else if (last_status == -EINPROGRESS && !urb->unlinked) 443 continue; 444 445 /* qh unlinked; token in overlay may be most current */ 446 if (state == QH_STATE_IDLE 447 && cpu_to_hc32(ehci, qtd->qtd_dma) 448 == hw->hw_current) { 449 token = hc32_to_cpu(ehci, hw->hw_token); 450 451 /* An unlink may leave an incomplete 452 * async transaction in the TT buffer. 453 * We have to clear it. 454 */ 455 ehci_clear_tt_buffer(ehci, qh, urb, token); 456 } 457 } 458 459 /* unless we already know the urb's status, collect qtd status 460 * and update count of bytes transferred. in common short read 461 * cases with only one data qtd (including control transfers), 462 * queue processing won't halt. but with two or more qtds (for 463 * example, with a 32 KB transfer), when the first qtd gets a 464 * short read the second must be removed by hand. 465 */ 466 if (last_status == -EINPROGRESS) { 467 last_status = qtd_copy_status(ehci, urb, 468 qtd->length, token); 469 if (last_status == -EREMOTEIO 470 && (qtd->hw_alt_next 471 & EHCI_LIST_END(ehci))) 472 last_status = -EINPROGRESS; 473 474 /* As part of low/full-speed endpoint-halt processing 475 * we must clear the TT buffer (11.17.5). 476 */ 477 if (unlikely(last_status != -EINPROGRESS && 478 last_status != -EREMOTEIO)) { 479 /* The TT's in some hubs malfunction when they 480 * receive this request following a STALL (they 481 * stop sending isochronous packets). Since a 482 * STALL can't leave the TT buffer in a busy 483 * state (if you believe Figures 11-48 - 11-51 484 * in the USB 2.0 spec), we won't clear the TT 485 * buffer in this case. Strictly speaking this 486 * is a violation of the spec. 487 */ 488 if (last_status != -EPIPE) 489 ehci_clear_tt_buffer(ehci, qh, urb, 490 token); 491 } 492 } 493 494 /* if we're removing something not at the queue head, 495 * patch the hardware queue pointer. 496 */ 497 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { 498 last = list_entry (qtd->qtd_list.prev, 499 struct ehci_qtd, qtd_list); 500 last->hw_next = qtd->hw_next; 501 } 502 503 /* remove qtd; it's recycled after possible urb completion */ 504 list_del (&qtd->qtd_list); 505 last = qtd; 506 507 /* reinit the xacterr counter for the next qtd */ 508 qh->xacterrs = 0; 509 } 510 511 /* last urb's completion might still need calling */ 512 if (likely (last != NULL)) { 513 ehci_urb_done(ehci, last->urb, last_status); 514 count++; 515 ehci_qtd_free (ehci, last); 516 } 517 518 /* Do we need to rescan for URBs dequeued during a giveback? */ 519 if (unlikely(qh->needs_rescan)) { 520 /* If the QH is already unlinked, do the rescan now. */ 521 if (state == QH_STATE_IDLE) 522 goto rescan; 523 524 /* Otherwise we have to wait until the QH is fully unlinked. 525 * Our caller will start an unlink if qh->needs_rescan is 526 * set. But if an unlink has already started, nothing needs 527 * to be done. 528 */ 529 if (state != QH_STATE_LINKED) 530 qh->needs_rescan = 0; 531 } 532 533 /* restore original state; caller must unlink or relink */ 534 qh->qh_state = state; 535 536 /* be sure the hardware's done with the qh before refreshing 537 * it after fault cleanup, or recovering from silicon wrongly 538 * overlaying the dummy qtd (which reduces DMA chatter). 539 */ 540 if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) { 541 switch (state) { 542 case QH_STATE_IDLE: 543 qh_refresh(ehci, qh); 544 break; 545 case QH_STATE_LINKED: 546 /* We won't refresh a QH that's linked (after the HC 547 * stopped the queue). That avoids a race: 548 * - HC reads first part of QH; 549 * - CPU updates that first part and the token; 550 * - HC reads rest of that QH, including token 551 * Result: HC gets an inconsistent image, and then 552 * DMAs to/from the wrong memory (corrupting it). 553 * 554 * That should be rare for interrupt transfers, 555 * except maybe high bandwidth ... 556 */ 557 558 /* Tell the caller to start an unlink */ 559 qh->needs_rescan = 1; 560 break; 561 /* otherwise, unlink already started */ 562 } 563 } 564 565 return count; 566} 567 568/*-------------------------------------------------------------------------*/ 569 570// high bandwidth multiplier, as encoded in highspeed endpoint descriptors 571#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) 572// ... and packet size, for any kind of endpoint descriptor 573#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) 574 575/* 576 * reverse of qh_urb_transaction: free a list of TDs. 577 * used for cleanup after errors, before HC sees an URB's TDs. 578 */ 579static void qtd_list_free ( 580 struct ehci_hcd *ehci, 581 struct urb *urb, 582 struct list_head *qtd_list 583) { 584 struct list_head *entry, *temp; 585 586 list_for_each_safe (entry, temp, qtd_list) { 587 struct ehci_qtd *qtd; 588 589 qtd = list_entry (entry, struct ehci_qtd, qtd_list); 590 list_del (&qtd->qtd_list); 591 ehci_qtd_free (ehci, qtd); 592 } 593} 594 595/* 596 * create a list of filled qtds for this URB; won't link into qh. 597 */ 598static struct list_head * 599qh_urb_transaction ( 600 struct ehci_hcd *ehci, 601 struct urb *urb, 602 struct list_head *head, 603 gfp_t flags 604) { 605 struct ehci_qtd *qtd, *qtd_prev; 606 dma_addr_t buf; 607 int len, this_sg_len, maxpacket; 608 int is_input; 609 u32 token; 610 int i; 611 struct scatterlist *sg; 612 613 /* 614 * URBs map to sequences of QTDs: one logical transaction 615 */ 616 qtd = ehci_qtd_alloc (ehci, flags); 617 if (unlikely (!qtd)) 618 return NULL; 619 list_add_tail (&qtd->qtd_list, head); 620 qtd->urb = urb; 621 622 token = QTD_STS_ACTIVE; 623 token |= (EHCI_TUNE_CERR << 10); 624 /* for split transactions, SplitXState initialized to zero */ 625 626 len = urb->transfer_buffer_length; 627 is_input = usb_pipein (urb->pipe); 628 if (usb_pipecontrol (urb->pipe)) { 629 /* SETUP pid */ 630 qtd_fill(ehci, qtd, urb->setup_dma, 631 sizeof (struct usb_ctrlrequest), 632 token | (2 /* "setup" */ << 8), 8); 633 634 /* ... and always at least one more pid */ 635 token ^= QTD_TOGGLE; 636 qtd_prev = qtd; 637 qtd = ehci_qtd_alloc (ehci, flags); 638 if (unlikely (!qtd)) 639 goto cleanup; 640 qtd->urb = urb; 641 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 642 list_add_tail (&qtd->qtd_list, head); 643 644 /* for zero length DATA stages, STATUS is always IN */ 645 if (len == 0) 646 token |= (1 /* "in" */ << 8); 647 } 648 649 /* 650 * data transfer stage: buffer setup 651 */ 652 i = urb->num_sgs; 653 if (len > 0 && i > 0) { 654 sg = urb->sg; 655 buf = sg_dma_address(sg); 656 657 /* urb->transfer_buffer_length may be smaller than the 658 * size of the scatterlist (or vice versa) 659 */ 660 this_sg_len = min_t(int, sg_dma_len(sg), len); 661 } else { 662 sg = NULL; 663 buf = urb->transfer_dma; 664 this_sg_len = len; 665 } 666 667 if (is_input) 668 token |= (1 /* "in" */ << 8); 669 /* else it's already initted to "out" pid (0 << 8) */ 670 671 maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); 672 673 /* 674 * buffer gets wrapped in one or more qtds; 675 * last one may be "short" (including zero len) 676 * and may serve as a control status ack 677 */ 678 for (;;) { 679 int this_qtd_len; 680 681 this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token, 682 maxpacket); 683 this_sg_len -= this_qtd_len; 684 len -= this_qtd_len; 685 buf += this_qtd_len; 686 687 /* 688 * short reads advance to a "magic" dummy instead of the next 689 * qtd ... that forces the queue to stop, for manual cleanup. 690 * (this will usually be overridden later.) 691 */ 692 if (is_input) 693 qtd->hw_alt_next = ehci->async->hw->hw_alt_next; 694 695 /* qh makes control packets use qtd toggle; maybe switch it */ 696 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) 697 token ^= QTD_TOGGLE; 698 699 if (likely(this_sg_len <= 0)) { 700 if (--i <= 0 || len <= 0) 701 break; 702 sg = sg_next(sg); 703 buf = sg_dma_address(sg); 704 this_sg_len = min_t(int, sg_dma_len(sg), len); 705 } 706 707 qtd_prev = qtd; 708 qtd = ehci_qtd_alloc (ehci, flags); 709 if (unlikely (!qtd)) 710 goto cleanup; 711 qtd->urb = urb; 712 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 713 list_add_tail (&qtd->qtd_list, head); 714 } 715 716 /* 717 * unless the caller requires manual cleanup after short reads, 718 * have the alt_next mechanism keep the queue running after the 719 * last data qtd (the only one, for control and most other cases). 720 */ 721 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 722 || usb_pipecontrol (urb->pipe))) 723 qtd->hw_alt_next = EHCI_LIST_END(ehci); 724 725 /* 726 * control requests may need a terminating data "status" ack; 727 * bulk ones may need a terminating short packet (zero length). 728 */ 729 if (likely (urb->transfer_buffer_length != 0)) { 730 int one_more = 0; 731 732 if (usb_pipecontrol (urb->pipe)) { 733 one_more = 1; 734 token ^= 0x0100; /* "in" <--> "out" */ 735 token |= QTD_TOGGLE; /* force DATA1 */ 736 } else if (usb_pipebulk (urb->pipe) 737 && (urb->transfer_flags & URB_ZERO_PACKET) 738 && !(urb->transfer_buffer_length % maxpacket)) { 739 one_more = 1; 740 } 741 if (one_more) { 742 qtd_prev = qtd; 743 qtd = ehci_qtd_alloc (ehci, flags); 744 if (unlikely (!qtd)) 745 goto cleanup; 746 qtd->urb = urb; 747 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 748 list_add_tail (&qtd->qtd_list, head); 749 750 /* never any data in such packets */ 751 qtd_fill(ehci, qtd, 0, 0, token, 0); 752 } 753 } 754 755 /* by default, enable interrupt on urb completion */ 756 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) 757 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); 758 return head; 759 760cleanup: 761 qtd_list_free (ehci, urb, head); 762 return NULL; 763} 764 765/*-------------------------------------------------------------------------*/ 766 767// Would be best to create all qh's from config descriptors, 768// when each interface/altsetting is established. Unlink 769// any previous qh and cancel its urbs first; endpoints are 770// implicitly reset then (data toggle too). 771// That'd mean updating how usbcore talks to HCDs. (2.7?) 772 773 774/* 775 * Each QH holds a qtd list; a QH is used for everything except iso. 776 * 777 * For interrupt urbs, the scheduler must set the microframe scheduling 778 * mask(s) each time the QH gets scheduled. For highspeed, that's 779 * just one microframe in the s-mask. For split interrupt transactions 780 * there are additional complications: c-mask, maybe FSTNs. 781 */ 782static struct ehci_qh * 783qh_make ( 784 struct ehci_hcd *ehci, 785 struct urb *urb, 786 gfp_t flags 787) { 788 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); 789 u32 info1 = 0, info2 = 0; 790 int is_input, type; 791 int maxp = 0; 792 struct usb_tt *tt = urb->dev->tt; 793 struct ehci_qh_hw *hw; 794 795 if (!qh) 796 return qh; 797 798 /* 799 * init endpoint/device data for this QH 800 */ 801 info1 |= usb_pipeendpoint (urb->pipe) << 8; 802 info1 |= usb_pipedevice (urb->pipe) << 0; 803 804 is_input = usb_pipein (urb->pipe); 805 type = usb_pipetype (urb->pipe); 806 maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input); 807 808 /* 1024 byte maxpacket is a hardware ceiling. High bandwidth 809 * acts like up to 3KB, but is built from smaller packets. 810 */ 811 if (max_packet(maxp) > 1024) { 812 ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp)); 813 goto done; 814 } 815 816 /* Compute interrupt scheduling parameters just once, and save. 817 * - allowing for high bandwidth, how many nsec/uframe are used? 818 * - split transactions need a second CSPLIT uframe; same question 819 * - splits also need a schedule gap (for full/low speed I/O) 820 * - qh has a polling interval 821 * 822 * For control/bulk requests, the HC or TT handles these. 823 */ 824 if (type == PIPE_INTERRUPT) { 825 qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, 826 is_input, 0, 827 hb_mult(maxp) * max_packet(maxp))); 828 qh->start = NO_FRAME; 829 qh->stamp = ehci->periodic_stamp; 830 831 if (urb->dev->speed == USB_SPEED_HIGH) { 832 qh->c_usecs = 0; 833 qh->gap_uf = 0; 834 835 qh->period = urb->interval >> 3; 836 if (qh->period == 0 && urb->interval != 1) { 837 /* NOTE interval 2 or 4 uframes could work. 838 * But interval 1 scheduling is simpler, and 839 * includes high bandwidth. 840 */ 841 urb->interval = 1; 842 } else if (qh->period > ehci->periodic_size) { 843 qh->period = ehci->periodic_size; 844 urb->interval = qh->period << 3; 845 } 846 } else { 847 int think_time; 848 849 /* gap is f(FS/LS transfer times) */ 850 qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, 851 is_input, 0, maxp) / (125 * 1000); 852 853 /* FIXME this just approximates SPLIT/CSPLIT times */ 854 if (is_input) { // SPLIT, gap, CSPLIT+DATA 855 qh->c_usecs = qh->usecs + HS_USECS (0); 856 qh->usecs = HS_USECS (1); 857 } else { // SPLIT+DATA, gap, CSPLIT 858 qh->usecs += HS_USECS (1); 859 qh->c_usecs = HS_USECS (0); 860 } 861 862 think_time = tt ? tt->think_time : 0; 863 qh->tt_usecs = NS_TO_US (think_time + 864 usb_calc_bus_time (urb->dev->speed, 865 is_input, 0, max_packet (maxp))); 866 qh->period = urb->interval; 867 if (qh->period > ehci->periodic_size) { 868 qh->period = ehci->periodic_size; 869 urb->interval = qh->period; 870 } 871 } 872 } 873 874 /* support for tt scheduling, and access to toggles */ 875 qh->dev = urb->dev; 876 877 /* using TT? */ 878 switch (urb->dev->speed) { 879 case USB_SPEED_LOW: 880 info1 |= (1 << 12); /* EPS "low" */ 881 /* FALL THROUGH */ 882 883 case USB_SPEED_FULL: 884 /* EPS 0 means "full" */ 885 if (type != PIPE_INTERRUPT) 886 info1 |= (EHCI_TUNE_RL_TT << 28); 887 if (type == PIPE_CONTROL) { 888 info1 |= (1 << 27); /* for TT */ 889 info1 |= 1 << 14; /* toggle from qtd */ 890 } 891 info1 |= maxp << 16; 892 893 info2 |= (EHCI_TUNE_MULT_TT << 30); 894 895 /* Some Freescale processors have an erratum in which the 896 * port number in the queue head was 0..N-1 instead of 1..N. 897 */ 898 if (ehci_has_fsl_portno_bug(ehci)) 899 info2 |= (urb->dev->ttport-1) << 23; 900 else 901 info2 |= urb->dev->ttport << 23; 902 903 /* set the address of the TT; for TDI's integrated 904 * root hub tt, leave it zeroed. 905 */ 906 if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub) 907 info2 |= tt->hub->devnum << 16; 908 909 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ 910 911 break; 912 913 case USB_SPEED_HIGH: /* no TT involved */ 914 info1 |= (2 << 12); /* EPS "high" */ 915 if (type == PIPE_CONTROL) { 916 info1 |= (EHCI_TUNE_RL_HS << 28); 917 info1 |= 64 << 16; /* usb2 fixed maxpacket */ 918 info1 |= 1 << 14; /* toggle from qtd */ 919 info2 |= (EHCI_TUNE_MULT_HS << 30); 920 } else if (type == PIPE_BULK) { 921 info1 |= (EHCI_TUNE_RL_HS << 28); 922 /* The USB spec says that high speed bulk endpoints 923 * always use 512 byte maxpacket. But some device 924 * vendors decided to ignore that, and MSFT is happy 925 * to help them do so. So now people expect to use 926 * such nonconformant devices with Linux too; sigh. 927 */ 928 info1 |= max_packet(maxp) << 16; 929 info2 |= (EHCI_TUNE_MULT_HS << 30); 930 } else { /* PIPE_INTERRUPT */ 931 info1 |= max_packet (maxp) << 16; 932 info2 |= hb_mult (maxp) << 30; 933 } 934 break; 935 default: 936 dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed); 937done: 938 qh_put (qh); 939 return NULL; 940 } 941 942 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ 943 944 /* init as live, toggle clear, advance to dummy */ 945 qh->qh_state = QH_STATE_IDLE; 946 hw = qh->hw; 947 hw->hw_info1 = cpu_to_hc32(ehci, info1); 948 hw->hw_info2 = cpu_to_hc32(ehci, info2); 949 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); 950 qh_refresh (ehci, qh); 951 return qh; 952} 953 954/*-------------------------------------------------------------------------*/ 955 956/* move qh (and its qtds) onto async queue; maybe enable queue. */ 957 958static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 959{ 960 __hc32 dma = QH_NEXT(ehci, qh->qh_dma); 961 struct ehci_qh *head; 962 963 /* Don't link a QH if there's a Clear-TT-Buffer pending */ 964 if (unlikely(qh->clearing_tt)) 965 return; 966 967 WARN_ON(qh->qh_state != QH_STATE_IDLE); 968 969 /* (re)start the async schedule? */ 970 head = ehci->async; 971 timer_action_done (ehci, TIMER_ASYNC_OFF); 972 if (!head->qh_next.qh) { 973 u32 cmd = ehci_readl(ehci, &ehci->regs->command); 974 975 if (!(cmd & CMD_ASE)) { 976 /* in case a clear of CMD_ASE didn't take yet */ 977 (void)handshake(ehci, &ehci->regs->status, 978 STS_ASS, 0, 150); 979 cmd |= CMD_ASE | CMD_RUN; 980 ehci_writel(ehci, cmd, &ehci->regs->command); 981 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; 982 /* posted write need not be known to HC yet ... */ 983 } 984 } 985 986 /* clear halt and/or toggle; and maybe recover from silicon quirk */ 987 qh_refresh(ehci, qh); 988 989 /* splice right after start */ 990 qh->qh_next = head->qh_next; 991 qh->hw->hw_next = head->hw->hw_next; 992 wmb (); 993 994 head->qh_next.qh = qh; 995 head->hw->hw_next = dma; 996 997 qh_get(qh); 998 qh->xacterrs = 0; 999 qh->qh_state = QH_STATE_LINKED; 1000 /* qtd completions reported later by interrupt */ 1001} 1002 1003/*-------------------------------------------------------------------------*/ 1004 1005/* 1006 * For control/bulk/interrupt, return QH with these TDs appended. 1007 * Allocates and initializes the QH if necessary. 1008 * Returns null if it can't allocate a QH it needs to. 1009 * If the QH has TDs (urbs) already, that's great. 1010 */ 1011static struct ehci_qh *qh_append_tds ( 1012 struct ehci_hcd *ehci, 1013 struct urb *urb, 1014 struct list_head *qtd_list, 1015 int epnum, 1016 void **ptr 1017) 1018{ 1019 struct ehci_qh *qh = NULL; 1020 __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f); 1021 1022 qh = (struct ehci_qh *) *ptr; 1023 if (unlikely (qh == NULL)) { 1024 /* can't sleep here, we have ehci->lock... */ 1025 qh = qh_make (ehci, urb, GFP_ATOMIC); 1026 *ptr = qh; 1027 } 1028 if (likely (qh != NULL)) { 1029 struct ehci_qtd *qtd; 1030 1031 if (unlikely (list_empty (qtd_list))) 1032 qtd = NULL; 1033 else 1034 qtd = list_entry (qtd_list->next, struct ehci_qtd, 1035 qtd_list); 1036 1037 /* control qh may need patching ... */ 1038 if (unlikely (epnum == 0)) { 1039 1040 /* usb_reset_device() briefly reverts to address 0 */ 1041 if (usb_pipedevice (urb->pipe) == 0) 1042 qh->hw->hw_info1 &= ~qh_addr_mask; 1043 } 1044 1045 /* just one way to queue requests: swap with the dummy qtd. 1046 * only hc or qh_refresh() ever modify the overlay. 1047 */ 1048 if (likely (qtd != NULL)) { 1049 struct ehci_qtd *dummy; 1050 dma_addr_t dma; 1051 __hc32 token; 1052 1053 /* to avoid racing the HC, use the dummy td instead of 1054 * the first td of our list (becomes new dummy). both 1055 * tds stay deactivated until we're done, when the 1056 * HC is allowed to fetch the old dummy (4.10.2). 1057 */ 1058 token = qtd->hw_token; 1059 qtd->hw_token = HALT_BIT(ehci); 1060 wmb (); 1061 dummy = qh->dummy; 1062 1063 dma = dummy->qtd_dma; 1064 *dummy = *qtd; 1065 dummy->qtd_dma = dma; 1066 1067 list_del (&qtd->qtd_list); 1068 list_add (&dummy->qtd_list, qtd_list); 1069 list_splice_tail(qtd_list, &qh->qtd_list); 1070 1071 ehci_qtd_init(ehci, qtd, qtd->qtd_dma); 1072 qh->dummy = qtd; 1073 1074 /* hc must see the new dummy at list end */ 1075 dma = qtd->qtd_dma; 1076 qtd = list_entry (qh->qtd_list.prev, 1077 struct ehci_qtd, qtd_list); 1078 qtd->hw_next = QTD_NEXT(ehci, dma); 1079 1080 /* let the hc process these next qtds */ 1081 wmb (); 1082 dummy->hw_token = token; 1083 1084 urb->hcpriv = qh_get (qh); 1085 } 1086 } 1087 return qh; 1088} 1089 1090/*-------------------------------------------------------------------------*/ 1091 1092static int 1093submit_async ( 1094 struct ehci_hcd *ehci, 1095 struct urb *urb, 1096 struct list_head *qtd_list, 1097 gfp_t mem_flags 1098) { 1099 int epnum; 1100 unsigned long flags; 1101 struct ehci_qh *qh = NULL; 1102 int rc; 1103 1104 epnum = urb->ep->desc.bEndpointAddress; 1105 1106#ifdef EHCI_URB_TRACE 1107 { 1108 struct ehci_qtd *qtd; 1109 qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list); 1110 ehci_dbg(ehci, 1111 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", 1112 __func__, urb->dev->devpath, urb, 1113 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", 1114 urb->transfer_buffer_length, 1115 qtd, urb->ep->hcpriv); 1116 } 1117#endif 1118 1119 spin_lock_irqsave (&ehci->lock, flags); 1120 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { 1121 rc = -ESHUTDOWN; 1122 goto done; 1123 } 1124 rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); 1125 if (unlikely(rc)) 1126 goto done; 1127 1128 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); 1129 if (unlikely(qh == NULL)) { 1130 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 1131 rc = -ENOMEM; 1132 goto done; 1133 } 1134 1135 /* Control/bulk operations through TTs don't need scheduling, 1136 * the HC and TT handle it when the TT has a buffer ready. 1137 */ 1138 if (likely (qh->qh_state == QH_STATE_IDLE)) 1139 qh_link_async(ehci, qh); 1140 done: 1141 spin_unlock_irqrestore (&ehci->lock, flags); 1142 if (unlikely (qh == NULL)) 1143 qtd_list_free (ehci, urb, qtd_list); 1144 return rc; 1145} 1146 1147/*-------------------------------------------------------------------------*/ 1148 1149/* the async qh for the qtds being reclaimed are now unlinked from the HC */ 1150 1151static void end_unlink_async (struct ehci_hcd *ehci) 1152{ 1153 struct ehci_qh *qh = ehci->reclaim; 1154 struct ehci_qh *next; 1155 1156 iaa_watchdog_done(ehci); 1157 1158 // qh->hw_next = cpu_to_hc32(qh->qh_dma); 1159 qh->qh_state = QH_STATE_IDLE; 1160 qh->qh_next.qh = NULL; 1161 qh_put (qh); // refcount from reclaim 1162 1163 /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */ 1164 next = qh->reclaim; 1165 ehci->reclaim = next; 1166 qh->reclaim = NULL; 1167 1168 qh_completions (ehci, qh); 1169 1170 if (!list_empty (&qh->qtd_list) 1171 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 1172 qh_link_async (ehci, qh); 1173 else { 1174 /* it's not free to turn the async schedule on/off; leave it 1175 * active but idle for a while once it empties. 1176 */ 1177 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) 1178 && ehci->async->qh_next.qh == NULL) 1179 timer_action (ehci, TIMER_ASYNC_OFF); 1180 } 1181 qh_put(qh); /* refcount from async list */ 1182 1183 if (next) { 1184 ehci->reclaim = NULL; 1185 start_unlink_async (ehci, next); 1186 } 1187 1188 if (ehci->has_synopsys_hc_bug) 1189 ehci_writel(ehci, (u32) ehci->async->qh_dma, 1190 &ehci->regs->async_next); 1191} 1192 1193/* makes sure the async qh will become idle */ 1194/* caller must own ehci->lock */ 1195 1196static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 1197{ 1198 int cmd = ehci_readl(ehci, &ehci->regs->command); 1199 struct ehci_qh *prev; 1200 1201#ifdef DEBUG 1202 assert_spin_locked(&ehci->lock); 1203 if (ehci->reclaim 1204 || (qh->qh_state != QH_STATE_LINKED 1205 && qh->qh_state != QH_STATE_UNLINK_WAIT) 1206 ) 1207 BUG (); 1208#endif 1209 1210 /* stop async schedule right now? */ 1211 if (unlikely (qh == ehci->async)) { 1212 /* can't get here without STS_ASS set */ 1213 if (ehci_to_hcd(ehci)->state != HC_STATE_HALT 1214 && !ehci->reclaim) { 1215 /* ... and CMD_IAAD clear */ 1216 ehci_writel(ehci, cmd & ~CMD_ASE, 1217 &ehci->regs->command); 1218 wmb (); 1219 // handshake later, if we need to 1220 timer_action_done (ehci, TIMER_ASYNC_OFF); 1221 } 1222 return; 1223 } 1224 1225 qh->qh_state = QH_STATE_UNLINK; 1226 ehci->reclaim = qh = qh_get (qh); 1227 1228 prev = ehci->async; 1229 while (prev->qh_next.qh != qh) 1230 prev = prev->qh_next.qh; 1231 1232 prev->hw->hw_next = qh->hw->hw_next; 1233 prev->qh_next = qh->qh_next; 1234 if (ehci->qh_scan_next == qh) 1235 ehci->qh_scan_next = qh->qh_next.qh; 1236 wmb (); 1237 1238 /* If the controller isn't running, we don't have to wait for it */ 1239 if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))) { 1240 /* if (unlikely (qh->reclaim != 0)) 1241 * this will recurse, probably not much 1242 */ 1243 end_unlink_async (ehci); 1244 return; 1245 } 1246 1247 cmd |= CMD_IAAD; 1248 ehci_writel(ehci, cmd, &ehci->regs->command); 1249 (void)ehci_readl(ehci, &ehci->regs->command); 1250 iaa_watchdog_start(ehci); 1251} 1252 1253/*-------------------------------------------------------------------------*/ 1254 1255static void scan_async (struct ehci_hcd *ehci) 1256{ 1257 bool stopped; 1258 struct ehci_qh *qh; 1259 enum ehci_timer_action action = TIMER_IO_WATCHDOG; 1260 1261 timer_action_done (ehci, TIMER_ASYNC_SHRINK); 1262 stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state); 1263 1264 ehci->qh_scan_next = ehci->async->qh_next.qh; 1265 while (ehci->qh_scan_next) { 1266 qh = ehci->qh_scan_next; 1267 ehci->qh_scan_next = qh->qh_next.qh; 1268 rescan: 1269 /* clean any finished work for this qh */ 1270 if (!list_empty(&qh->qtd_list)) { 1271 int temp; 1272 1273 /* 1274 * Unlinks could happen here; completion reporting 1275 * drops the lock. That's why ehci->qh_scan_next 1276 * always holds the next qh to scan; if the next qh 1277 * gets unlinked then ehci->qh_scan_next is adjusted 1278 * in start_unlink_async(). 1279 */ 1280 qh = qh_get(qh); 1281 temp = qh_completions(ehci, qh); 1282 if (qh->needs_rescan) 1283 unlink_async(ehci, qh); 1284 qh->unlink_time = jiffies + EHCI_SHRINK_JIFFIES; 1285 qh_put(qh); 1286 if (temp != 0) 1287 goto rescan; 1288 } 1289 1290 /* unlink idle entries, reducing DMA usage as well 1291 * as HCD schedule-scanning costs. delay for any qh 1292 * we just scanned, there's a not-unusual case that it 1293 * doesn't stay idle for long. 1294 * (plus, avoids some kind of re-activation race.) 1295 */ 1296 if (list_empty(&qh->qtd_list) 1297 && qh->qh_state == QH_STATE_LINKED) { 1298 if (!ehci->reclaim && (stopped || 1299 time_after_eq(jiffies, qh->unlink_time))) 1300 start_unlink_async(ehci, qh); 1301 else 1302 action = TIMER_ASYNC_SHRINK; 1303 } 1304 } 1305 if (action == TIMER_ASYNC_SHRINK) 1306 timer_action (ehci, TIMER_ASYNC_SHRINK); 1307} 1308