1/* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23#include <linux/pci.h> 24#include <linux/irq.h> 25#include <linux/log2.h> 26#include <linux/module.h> 27#include <linux/moduleparam.h> 28#include <linux/slab.h> 29 30#include "xhci.h" 31 32#define DRIVER_AUTHOR "Sarah Sharp" 33#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 34 35/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 36static int link_quirk; 37module_param(link_quirk, int, S_IRUGO | S_IWUSR); 38MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); 39 40/* TODO: copied from ehci-hcd.c - can this be refactored? */ 41/* 42 * handshake - spin reading hc until handshake completes or fails 43 * @ptr: address of hc register to be read 44 * @mask: bits to look at in result of read 45 * @done: value of those bits when handshake succeeds 46 * @usec: timeout in microseconds 47 * 48 * Returns negative errno, or zero on success 49 * 50 * Success happens when the "mask" bits have the specified value (hardware 51 * handshake done). There are two failure modes: "usec" have passed (major 52 * hardware flakeout), or the register reads as all-ones (hardware removed). 53 */ 54static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, 55 u32 mask, u32 done, int usec) 56{ 57 u32 result; 58 59 do { 60 result = xhci_readl(xhci, ptr); 61 if (result == ~(u32)0) /* card removed */ 62 return -ENODEV; 63 result &= mask; 64 if (result == done) 65 return 0; 66 udelay(1); 67 usec--; 68 } while (usec > 0); 69 return -ETIMEDOUT; 70} 71 72/* 73 * Disable interrupts and begin the xHCI halting process. 74 */ 75void xhci_quiesce(struct xhci_hcd *xhci) 76{ 77 u32 halted; 78 u32 cmd; 79 u32 mask; 80 81 mask = ~(XHCI_IRQS); 82 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; 83 if (!halted) 84 mask &= ~CMD_RUN; 85 86 cmd = xhci_readl(xhci, &xhci->op_regs->command); 87 cmd &= mask; 88 xhci_writel(xhci, cmd, &xhci->op_regs->command); 89} 90 91/* 92 * Force HC into halt state. 93 * 94 * Disable any IRQs and clear the run/stop bit. 95 * HC will complete any current and actively pipelined transactions, and 96 * should halt within 16 ms of the run/stop bit being cleared. 97 * Read HC Halted bit in the status register to see when the HC is finished. 98 */ 99int xhci_halt(struct xhci_hcd *xhci) 100{ 101 int ret; 102 xhci_dbg(xhci, "// Halt the HC\n"); 103 xhci_quiesce(xhci); 104 105 ret = handshake(xhci, &xhci->op_regs->status, 106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 107 if (!ret) 108 xhci->xhc_state |= XHCI_STATE_HALTED; 109 return ret; 110} 111 112/* 113 * Set the run bit and wait for the host to be running. 114 */ 115static int xhci_start(struct xhci_hcd *xhci) 116{ 117 u32 temp; 118 int ret; 119 120 temp = xhci_readl(xhci, &xhci->op_regs->command); 121 temp |= (CMD_RUN); 122 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", 123 temp); 124 xhci_writel(xhci, temp, &xhci->op_regs->command); 125 126 /* 127 * Wait for the HCHalted Status bit to be 0 to indicate the host is 128 * running. 129 */ 130 ret = handshake(xhci, &xhci->op_regs->status, 131 STS_HALT, 0, XHCI_MAX_HALT_USEC); 132 if (ret == -ETIMEDOUT) 133 xhci_err(xhci, "Host took too long to start, " 134 "waited %u microseconds.\n", 135 XHCI_MAX_HALT_USEC); 136 if (!ret) 137 xhci->xhc_state &= ~XHCI_STATE_HALTED; 138 return ret; 139} 140 141/* 142 * Reset a halted HC. 143 * 144 * This resets pipelines, timers, counters, state machines, etc. 145 * Transactions will be terminated immediately, and operational registers 146 * will be set to their defaults. 147 */ 148int xhci_reset(struct xhci_hcd *xhci) 149{ 150 u32 command; 151 u32 state; 152 int ret; 153 154 state = xhci_readl(xhci, &xhci->op_regs->status); 155 if ((state & STS_HALT) == 0) { 156 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); 157 return 0; 158 } 159 160 xhci_dbg(xhci, "// Reset the HC\n"); 161 command = xhci_readl(xhci, &xhci->op_regs->command); 162 command |= CMD_RESET; 163 xhci_writel(xhci, command, &xhci->op_regs->command); 164 165 ret = handshake(xhci, &xhci->op_regs->command, 166 CMD_RESET, 0, 250 * 1000); 167 if (ret) 168 return ret; 169 170 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); 171 /* 172 * xHCI cannot write to any doorbells or operational registers other 173 * than status until the "Controller Not Ready" flag is cleared. 174 */ 175 return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); 176} 177 178#ifdef CONFIG_PCI 179static int xhci_free_msi(struct xhci_hcd *xhci) 180{ 181 int i; 182 183 if (!xhci->msix_entries) 184 return -EINVAL; 185 186 for (i = 0; i < xhci->msix_count; i++) 187 if (xhci->msix_entries[i].vector) 188 free_irq(xhci->msix_entries[i].vector, 189 xhci_to_hcd(xhci)); 190 return 0; 191} 192 193/* 194 * Set up MSI 195 */ 196static int xhci_setup_msi(struct xhci_hcd *xhci) 197{ 198 int ret; 199 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 200 201 ret = pci_enable_msi(pdev); 202 if (ret) { 203 xhci_dbg(xhci, "failed to allocate MSI entry\n"); 204 return ret; 205 } 206 207 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq, 208 0, "xhci_hcd", xhci_to_hcd(xhci)); 209 if (ret) { 210 xhci_dbg(xhci, "disable MSI interrupt\n"); 211 pci_disable_msi(pdev); 212 } 213 214 return ret; 215} 216 217/* 218 * Free IRQs 219 * free all IRQs request 220 */ 221static void xhci_free_irq(struct xhci_hcd *xhci) 222{ 223 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 224 int ret; 225 226 /* return if using legacy interrupt */ 227 if (xhci_to_hcd(xhci)->irq >= 0) 228 return; 229 230 ret = xhci_free_msi(xhci); 231 if (!ret) 232 return; 233 if (pdev->irq >= 0) 234 free_irq(pdev->irq, xhci_to_hcd(xhci)); 235 236 return; 237} 238 239/* 240 * Set up MSI-X 241 */ 242static int xhci_setup_msix(struct xhci_hcd *xhci) 243{ 244 int i, ret = 0; 245 struct usb_hcd *hcd = xhci_to_hcd(xhci); 246 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 247 248 /* 249 * calculate number of msi-x vectors supported. 250 * - HCS_MAX_INTRS: the max number of interrupts the host can handle, 251 * with max number of interrupters based on the xhci HCSPARAMS1. 252 * - num_online_cpus: maximum msi-x vectors per CPUs core. 253 * Add additional 1 vector to ensure always available interrupt. 254 */ 255 xhci->msix_count = min(num_online_cpus() + 1, 256 HCS_MAX_INTRS(xhci->hcs_params1)); 257 258 xhci->msix_entries = 259 kmalloc((sizeof(struct msix_entry))*xhci->msix_count, 260 GFP_KERNEL); 261 if (!xhci->msix_entries) { 262 xhci_err(xhci, "Failed to allocate MSI-X entries\n"); 263 return -ENOMEM; 264 } 265 266 for (i = 0; i < xhci->msix_count; i++) { 267 xhci->msix_entries[i].entry = i; 268 xhci->msix_entries[i].vector = 0; 269 } 270 271 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); 272 if (ret) { 273 xhci_dbg(xhci, "Failed to enable MSI-X\n"); 274 goto free_entries; 275 } 276 277 for (i = 0; i < xhci->msix_count; i++) { 278 ret = request_irq(xhci->msix_entries[i].vector, 279 (irq_handler_t)xhci_msi_irq, 280 0, "xhci_hcd", xhci_to_hcd(xhci)); 281 if (ret) 282 goto disable_msix; 283 } 284 285 hcd->msix_enabled = 1; 286 return ret; 287 288disable_msix: 289 xhci_dbg(xhci, "disable MSI-X interrupt\n"); 290 xhci_free_irq(xhci); 291 pci_disable_msix(pdev); 292free_entries: 293 kfree(xhci->msix_entries); 294 xhci->msix_entries = NULL; 295 return ret; 296} 297 298/* Free any IRQs and disable MSI-X */ 299static void xhci_cleanup_msix(struct xhci_hcd *xhci) 300{ 301 struct usb_hcd *hcd = xhci_to_hcd(xhci); 302 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 303 304 xhci_free_irq(xhci); 305 306 if (xhci->msix_entries) { 307 pci_disable_msix(pdev); 308 kfree(xhci->msix_entries); 309 xhci->msix_entries = NULL; 310 } else { 311 pci_disable_msi(pdev); 312 } 313 314 hcd->msix_enabled = 0; 315 return; 316} 317 318static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 319{ 320 int i; 321 322 if (xhci->msix_entries) { 323 for (i = 0; i < xhci->msix_count; i++) 324 synchronize_irq(xhci->msix_entries[i].vector); 325 } 326} 327 328static int xhci_try_enable_msi(struct usb_hcd *hcd) 329{ 330 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 331 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 332 int ret; 333 334 /* 335 * Some Fresco Logic host controllers advertise MSI, but fail to 336 * generate interrupts. Don't even try to enable MSI. 337 */ 338 if (xhci->quirks & XHCI_BROKEN_MSI) 339 return 0; 340 341 /* unregister the legacy interrupt */ 342 if (hcd->irq) 343 free_irq(hcd->irq, hcd); 344 hcd->irq = -1; 345 346 ret = xhci_setup_msix(xhci); 347 if (ret) 348 /* fall back to msi*/ 349 ret = xhci_setup_msi(xhci); 350 351 if (!ret) 352 /* hcd->irq is -1, we have MSI */ 353 return 0; 354 355 if (!pdev->irq) { 356 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); 357 return -EINVAL; 358 } 359 360 /* fall back to legacy interrupt*/ 361 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, 362 hcd->irq_descr, hcd); 363 if (ret) { 364 xhci_err(xhci, "request interrupt %d failed\n", 365 pdev->irq); 366 return ret; 367 } 368 hcd->irq = pdev->irq; 369 return 0; 370} 371 372#else 373 374static int xhci_try_enable_msi(struct usb_hcd *hcd) 375{ 376 return 0; 377} 378 379static void xhci_cleanup_msix(struct xhci_hcd *xhci) 380{ 381} 382 383static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 384{ 385} 386 387#endif 388 389/* 390 * Initialize memory for HCD and xHC (one-time init). 391 * 392 * Program the PAGESIZE register, initialize the device context array, create 393 * device contexts (?), set up a command ring segment (or two?), create event 394 * ring (one for now). 395 */ 396int xhci_init(struct usb_hcd *hcd) 397{ 398 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 399 int retval = 0; 400 401 xhci_dbg(xhci, "xhci_init\n"); 402 spin_lock_init(&xhci->lock); 403 if (xhci->hci_version == 0x95 && link_quirk) { 404 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); 405 xhci->quirks |= XHCI_LINK_TRB_QUIRK; 406 } else { 407 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); 408 } 409 retval = xhci_mem_init(xhci, GFP_KERNEL); 410 xhci_dbg(xhci, "Finished xhci_init\n"); 411 412 return retval; 413} 414 415/*-------------------------------------------------------------------------*/ 416 417 418#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 419static void xhci_event_ring_work(unsigned long arg) 420{ 421 unsigned long flags; 422 int temp; 423 u64 temp_64; 424 struct xhci_hcd *xhci = (struct xhci_hcd *) arg; 425 int i, j; 426 427 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); 428 429 spin_lock_irqsave(&xhci->lock, flags); 430 temp = xhci_readl(xhci, &xhci->op_regs->status); 431 xhci_dbg(xhci, "op reg status = 0x%x\n", temp); 432 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 433 (xhci->xhc_state & XHCI_STATE_HALTED)) { 434 xhci_dbg(xhci, "HW died, polling stopped.\n"); 435 spin_unlock_irqrestore(&xhci->lock, flags); 436 return; 437 } 438 439 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 440 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); 441 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); 442 xhci->error_bitmask = 0; 443 xhci_dbg(xhci, "Event ring:\n"); 444 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 445 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 446 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 447 temp_64 &= ~ERST_PTR_MASK; 448 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); 449 xhci_dbg(xhci, "Command ring:\n"); 450 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); 451 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 452 xhci_dbg_cmd_ptrs(xhci); 453 for (i = 0; i < MAX_HC_SLOTS; ++i) { 454 if (!xhci->devs[i]) 455 continue; 456 for (j = 0; j < 31; ++j) { 457 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); 458 } 459 } 460 spin_unlock_irqrestore(&xhci->lock, flags); 461 462 if (!xhci->zombie) 463 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); 464 else 465 xhci_dbg(xhci, "Quit polling the event ring.\n"); 466} 467#endif 468 469static int xhci_run_finished(struct xhci_hcd *xhci) 470{ 471 if (xhci_start(xhci)) { 472 xhci_halt(xhci); 473 return -ENODEV; 474 } 475 xhci->shared_hcd->state = HC_STATE_RUNNING; 476 477 if (xhci->quirks & XHCI_NEC_HOST) 478 xhci_ring_cmd_db(xhci); 479 480 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n"); 481 return 0; 482} 483 484/* 485 * Start the HC after it was halted. 486 * 487 * This function is called by the USB core when the HC driver is added. 488 * Its opposite is xhci_stop(). 489 * 490 * xhci_init() must be called once before this function can be called. 491 * Reset the HC, enable device slot contexts, program DCBAAP, and 492 * set command ring pointer and event ring pointer. 493 * 494 * Setup MSI-X vectors and enable interrupts. 495 */ 496int xhci_run(struct usb_hcd *hcd) 497{ 498 u32 temp; 499 u64 temp_64; 500 int ret; 501 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 502 503 /* Start the xHCI host controller running only after the USB 2.0 roothub 504 * is setup. 505 */ 506 507 hcd->uses_new_polling = 1; 508 if (!usb_hcd_is_primary_hcd(hcd)) 509 return xhci_run_finished(xhci); 510 511 xhci_dbg(xhci, "xhci_run\n"); 512 513 ret = xhci_try_enable_msi(hcd); 514 if (ret) 515 return ret; 516 517#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 518 init_timer(&xhci->event_ring_timer); 519 xhci->event_ring_timer.data = (unsigned long) xhci; 520 xhci->event_ring_timer.function = xhci_event_ring_work; 521 /* Poll the event ring */ 522 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; 523 xhci->zombie = 0; 524 xhci_dbg(xhci, "Setting event ring polling timer\n"); 525 add_timer(&xhci->event_ring_timer); 526#endif 527 528 xhci_dbg(xhci, "Command ring memory map follows:\n"); 529 xhci_debug_ring(xhci, xhci->cmd_ring); 530 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 531 xhci_dbg_cmd_ptrs(xhci); 532 533 xhci_dbg(xhci, "ERST memory map follows:\n"); 534 xhci_dbg_erst(xhci, &xhci->erst); 535 xhci_dbg(xhci, "Event ring:\n"); 536 xhci_debug_ring(xhci, xhci->event_ring); 537 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 538 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 539 temp_64 &= ~ERST_PTR_MASK; 540 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); 541 542 xhci_dbg(xhci, "// Set the interrupt modulation register\n"); 543 temp = xhci_readl(xhci, &xhci->ir_set->irq_control); 544 temp &= ~ER_IRQ_INTERVAL_MASK; 545 temp |= (u32) 160; 546 xhci_writel(xhci, temp, &xhci->ir_set->irq_control); 547 548 /* Set the HCD state before we enable the irqs */ 549 temp = xhci_readl(xhci, &xhci->op_regs->command); 550 temp |= (CMD_EIE); 551 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", 552 temp); 553 xhci_writel(xhci, temp, &xhci->op_regs->command); 554 555 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 556 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", 557 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 558 xhci_writel(xhci, ER_IRQ_ENABLE(temp), 559 &xhci->ir_set->irq_pending); 560 xhci_print_ir_set(xhci, 0); 561 562 if (xhci->quirks & XHCI_NEC_HOST) 563 xhci_queue_vendor_command(xhci, 0, 0, 0, 564 TRB_TYPE(TRB_NEC_GET_FW)); 565 566 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); 567 return 0; 568} 569 570static void xhci_only_stop_hcd(struct usb_hcd *hcd) 571{ 572 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 573 574 spin_lock_irq(&xhci->lock); 575 xhci_halt(xhci); 576 577 /* The shared_hcd is going to be deallocated shortly (the USB core only 578 * calls this function when allocation fails in usb_add_hcd(), or 579 * usb_remove_hcd() is called). So we need to unset xHCI's pointer. 580 */ 581 xhci->shared_hcd = NULL; 582 spin_unlock_irq(&xhci->lock); 583} 584 585/* 586 * Stop xHCI driver. 587 * 588 * This function is called by the USB core when the HC driver is removed. 589 * Its opposite is xhci_run(). 590 * 591 * Disable device contexts, disable IRQs, and quiesce the HC. 592 * Reset the HC, finish any completed transactions, and cleanup memory. 593 */ 594void xhci_stop(struct usb_hcd *hcd) 595{ 596 u32 temp; 597 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 598 599 if (!usb_hcd_is_primary_hcd(hcd)) { 600 xhci_only_stop_hcd(xhci->shared_hcd); 601 return; 602 } 603 604 spin_lock_irq(&xhci->lock); 605 /* Make sure the xHC is halted for a USB3 roothub 606 * (xhci_stop() could be called as part of failed init). 607 */ 608 xhci_halt(xhci); 609 xhci_reset(xhci); 610 spin_unlock_irq(&xhci->lock); 611 612 xhci_cleanup_msix(xhci); 613 614#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 615 /* Tell the event ring poll function not to reschedule */ 616 xhci->zombie = 1; 617 del_timer_sync(&xhci->event_ring_timer); 618#endif 619 620 if (xhci->quirks & XHCI_AMD_PLL_FIX) 621 usb_amd_dev_put(); 622 623 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 624 temp = xhci_readl(xhci, &xhci->op_regs->status); 625 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 626 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 627 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 628 &xhci->ir_set->irq_pending); 629 xhci_print_ir_set(xhci, 0); 630 631 xhci_dbg(xhci, "cleaning up memory\n"); 632 xhci_mem_cleanup(xhci); 633 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 634 xhci_readl(xhci, &xhci->op_regs->status)); 635} 636 637/* 638 * Shutdown HC (not bus-specific) 639 * 640 * This is called when the machine is rebooting or halting. We assume that the 641 * machine will be powered off, and the HC's internal state will be reset. 642 * Don't bother to free memory. 643 * 644 * This will only ever be called with the main usb_hcd (the USB3 roothub). 645 */ 646void xhci_shutdown(struct usb_hcd *hcd) 647{ 648 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 649 650 spin_lock_irq(&xhci->lock); 651 xhci_halt(xhci); 652 spin_unlock_irq(&xhci->lock); 653 654 xhci_cleanup_msix(xhci); 655 656 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", 657 xhci_readl(xhci, &xhci->op_regs->status)); 658} 659 660#ifdef CONFIG_PM 661static void xhci_save_registers(struct xhci_hcd *xhci) 662{ 663 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command); 664 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification); 665 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 666 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg); 667 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); 668 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control); 669 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size); 670 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); 671 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 672} 673 674static void xhci_restore_registers(struct xhci_hcd *xhci) 675{ 676 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command); 677 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification); 678 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); 679 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg); 680 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending); 681 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control); 682 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size); 683 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); 684} 685 686static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) 687{ 688 u64 val_64; 689 690 /* step 2: initialize command ring buffer */ 691 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 692 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 693 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 694 xhci->cmd_ring->dequeue) & 695 (u64) ~CMD_RING_RSVD_BITS) | 696 xhci->cmd_ring->cycle_state; 697 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", 698 (long unsigned long) val_64); 699 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 700} 701 702/* 703 * The whole command ring must be cleared to zero when we suspend the host. 704 * 705 * The host doesn't save the command ring pointer in the suspend well, so we 706 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte 707 * aligned, because of the reserved bits in the command ring dequeue pointer 708 * register. Therefore, we can't just set the dequeue pointer back in the 709 * middle of the ring (TRBs are 16-byte aligned). 710 */ 711static void xhci_clear_command_ring(struct xhci_hcd *xhci) 712{ 713 struct xhci_ring *ring; 714 struct xhci_segment *seg; 715 716 ring = xhci->cmd_ring; 717 seg = ring->deq_seg; 718 do { 719 memset(seg->trbs, 0, 720 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); 721 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= 722 cpu_to_le32(~TRB_CYCLE); 723 seg = seg->next; 724 } while (seg != ring->deq_seg); 725 726 /* Reset the software enqueue and dequeue pointers */ 727 ring->deq_seg = ring->first_seg; 728 ring->dequeue = ring->first_seg->trbs; 729 ring->enq_seg = ring->deq_seg; 730 ring->enqueue = ring->dequeue; 731 732 /* 733 * Ring is now zeroed, so the HW should look for change of ownership 734 * when the cycle bit is set to 1. 735 */ 736 ring->cycle_state = 1; 737 738 /* 739 * Reset the hardware dequeue pointer. 740 * Yes, this will need to be re-written after resume, but we're paranoid 741 * and want to make sure the hardware doesn't access bogus memory 742 * because, say, the BIOS or an SMI started the host without changing 743 * the command ring pointers. 744 */ 745 xhci_set_cmd_ring_deq(xhci); 746} 747 748/* 749 * Stop HC (not bus-specific) 750 * 751 * This is called when the machine transition into S3/S4 mode. 752 * 753 */ 754int xhci_suspend(struct xhci_hcd *xhci) 755{ 756 int rc = 0; 757 struct usb_hcd *hcd = xhci_to_hcd(xhci); 758 u32 command; 759 760 spin_lock_irq(&xhci->lock); 761 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 762 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 763 /* step 1: stop endpoint */ 764 /* skipped assuming that port suspend has done */ 765 766 /* step 2: clear Run/Stop bit */ 767 command = xhci_readl(xhci, &xhci->op_regs->command); 768 command &= ~CMD_RUN; 769 xhci_writel(xhci, command, &xhci->op_regs->command); 770 if (handshake(xhci, &xhci->op_regs->status, 771 STS_HALT, STS_HALT, 100*100)) { 772 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); 773 spin_unlock_irq(&xhci->lock); 774 return -ETIMEDOUT; 775 } 776 xhci_clear_command_ring(xhci); 777 778 /* step 3: save registers */ 779 xhci_save_registers(xhci); 780 781 /* step 4: set CSS flag */ 782 command = xhci_readl(xhci, &xhci->op_regs->command); 783 command |= CMD_CSS; 784 xhci_writel(xhci, command, &xhci->op_regs->command); 785 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { 786 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); 787 spin_unlock_irq(&xhci->lock); 788 return -ETIMEDOUT; 789 } 790 spin_unlock_irq(&xhci->lock); 791 792 /* step 5: remove core well power */ 793 /* synchronize irq when using MSI-X */ 794 xhci_msix_sync_irqs(xhci); 795 796 return rc; 797} 798 799/* 800 * start xHC (not bus-specific) 801 * 802 * This is called when the machine transition from S3/S4 mode. 803 * 804 */ 805int xhci_resume(struct xhci_hcd *xhci, bool hibernated) 806{ 807 u32 command, temp = 0; 808 struct usb_hcd *hcd = xhci_to_hcd(xhci); 809 struct usb_hcd *secondary_hcd; 810 int retval = 0; 811 812 /* Wait a bit if either of the roothubs need to settle from the 813 * transition into bus suspend. 814 */ 815 if (time_before(jiffies, xhci->bus_state[0].next_statechange) || 816 time_before(jiffies, 817 xhci->bus_state[1].next_statechange)) 818 msleep(100); 819 820 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 821 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 822 823 spin_lock_irq(&xhci->lock); 824 if (xhci->quirks & XHCI_RESET_ON_RESUME) 825 hibernated = true; 826 827 if (!hibernated) { 828 /* step 1: restore register */ 829 xhci_restore_registers(xhci); 830 /* step 2: initialize command ring buffer */ 831 xhci_set_cmd_ring_deq(xhci); 832 /* step 3: restore state and start state*/ 833 /* step 3: set CRS flag */ 834 command = xhci_readl(xhci, &xhci->op_regs->command); 835 command |= CMD_CRS; 836 xhci_writel(xhci, command, &xhci->op_regs->command); 837 if (handshake(xhci, &xhci->op_regs->status, 838 STS_RESTORE, 0, 10*100)) { 839 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); 840 spin_unlock_irq(&xhci->lock); 841 return -ETIMEDOUT; 842 } 843 temp = xhci_readl(xhci, &xhci->op_regs->status); 844 } 845 846 /* If restore operation fails, re-initialize the HC during resume */ 847 if ((temp & STS_SRE) || hibernated) { 848 /* Let the USB core know _both_ roothubs lost power. */ 849 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); 850 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); 851 852 xhci_dbg(xhci, "Stop HCD\n"); 853 xhci_halt(xhci); 854 xhci_reset(xhci); 855 spin_unlock_irq(&xhci->lock); 856 xhci_cleanup_msix(xhci); 857 858#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 859 /* Tell the event ring poll function not to reschedule */ 860 xhci->zombie = 1; 861 del_timer_sync(&xhci->event_ring_timer); 862#endif 863 864 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 865 temp = xhci_readl(xhci, &xhci->op_regs->status); 866 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 867 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 868 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 869 &xhci->ir_set->irq_pending); 870 xhci_print_ir_set(xhci, 0); 871 872 xhci_dbg(xhci, "cleaning up memory\n"); 873 xhci_mem_cleanup(xhci); 874 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 875 xhci_readl(xhci, &xhci->op_regs->status)); 876 877 /* USB core calls the PCI reinit and start functions twice: 878 * first with the primary HCD, and then with the secondary HCD. 879 * If we don't do the same, the host will never be started. 880 */ 881 if (!usb_hcd_is_primary_hcd(hcd)) 882 secondary_hcd = hcd; 883 else 884 secondary_hcd = xhci->shared_hcd; 885 886 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); 887 retval = xhci_init(hcd->primary_hcd); 888 if (retval) 889 return retval; 890 xhci_dbg(xhci, "Start the primary HCD\n"); 891 retval = xhci_run(hcd->primary_hcd); 892 if (!retval) { 893 xhci_dbg(xhci, "Start the secondary HCD\n"); 894 retval = xhci_run(secondary_hcd); 895 } 896 hcd->state = HC_STATE_SUSPENDED; 897 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 898 goto done; 899 } 900 901 /* step 4: set Run/Stop bit */ 902 command = xhci_readl(xhci, &xhci->op_regs->command); 903 command |= CMD_RUN; 904 xhci_writel(xhci, command, &xhci->op_regs->command); 905 handshake(xhci, &xhci->op_regs->status, STS_HALT, 906 0, 250 * 1000); 907 908 /* step 5: walk topology and initialize portsc, 909 * portpmsc and portli 910 */ 911 /* this is done in bus_resume */ 912 913 /* step 6: restart each of the previously 914 * Running endpoints by ringing their doorbells 915 */ 916 917 spin_unlock_irq(&xhci->lock); 918 919 done: 920 if (retval == 0) { 921 usb_hcd_resume_root_hub(hcd); 922 usb_hcd_resume_root_hub(xhci->shared_hcd); 923 } 924 return retval; 925} 926#endif /* CONFIG_PM */ 927 928/*-------------------------------------------------------------------------*/ 929 930/** 931 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and 932 * HCDs. Find the index for an endpoint given its descriptor. Use the return 933 * value to right shift 1 for the bitmask. 934 * 935 * Index = (epnum * 2) + direction - 1, 936 * where direction = 0 for OUT, 1 for IN. 937 * For control endpoints, the IN index is used (OUT index is unused), so 938 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 939 */ 940unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) 941{ 942 unsigned int index; 943 if (usb_endpoint_xfer_control(desc)) 944 index = (unsigned int) (usb_endpoint_num(desc)*2); 945 else 946 index = (unsigned int) (usb_endpoint_num(desc)*2) + 947 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 948 return index; 949} 950 951/* Find the flag for this endpoint (for use in the control context). Use the 952 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 953 * bit 1, etc. 954 */ 955unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) 956{ 957 return 1 << (xhci_get_endpoint_index(desc) + 1); 958} 959 960/* Find the flag for this endpoint (for use in the control context). Use the 961 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 962 * bit 1, etc. 963 */ 964unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) 965{ 966 return 1 << (ep_index + 1); 967} 968 969/* Compute the last valid endpoint context index. Basically, this is the 970 * endpoint index plus one. For slot contexts with more than valid endpoint, 971 * we find the most significant bit set in the added contexts flags. 972 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 973 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. 974 */ 975unsigned int xhci_last_valid_endpoint(u32 added_ctxs) 976{ 977 return fls(added_ctxs) - 1; 978} 979 980/* Returns 1 if the arguments are OK; 981 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 982 */ 983static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 984 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 985 const char *func) { 986 struct xhci_hcd *xhci; 987 struct xhci_virt_device *virt_dev; 988 989 if (!hcd || (check_ep && !ep) || !udev) { 990 printk(KERN_DEBUG "xHCI %s called with invalid args\n", 991 func); 992 return -EINVAL; 993 } 994 if (!udev->parent) { 995 printk(KERN_DEBUG "xHCI %s called for root hub\n", 996 func); 997 return 0; 998 } 999 1000 xhci = hcd_to_xhci(hcd); 1001 if (xhci->xhc_state & XHCI_STATE_HALTED) 1002 return -ENODEV; 1003 1004 if (check_virt_dev) { 1005 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { 1006 printk(KERN_DEBUG "xHCI %s called with unaddressed " 1007 "device\n", func); 1008 return -EINVAL; 1009 } 1010 1011 virt_dev = xhci->devs[udev->slot_id]; 1012 if (virt_dev->udev != udev) { 1013 printk(KERN_DEBUG "xHCI %s called with udev and " 1014 "virt_dev does not match\n", func); 1015 return -EINVAL; 1016 } 1017 } 1018 1019 return 1; 1020} 1021 1022static int xhci_configure_endpoint(struct xhci_hcd *xhci, 1023 struct usb_device *udev, struct xhci_command *command, 1024 bool ctx_change, bool must_succeed); 1025 1026/* 1027 * Full speed devices may have a max packet size greater than 8 bytes, but the 1028 * USB core doesn't know that until it reads the first 8 bytes of the 1029 * descriptor. If the usb_device's max packet size changes after that point, 1030 * we need to issue an evaluate context command and wait on it. 1031 */ 1032static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, 1033 unsigned int ep_index, struct urb *urb) 1034{ 1035 struct xhci_container_ctx *in_ctx; 1036 struct xhci_container_ctx *out_ctx; 1037 struct xhci_input_control_ctx *ctrl_ctx; 1038 struct xhci_ep_ctx *ep_ctx; 1039 int max_packet_size; 1040 int hw_max_packet_size; 1041 int ret = 0; 1042 1043 out_ctx = xhci->devs[slot_id]->out_ctx; 1044 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1045 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); 1046 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); 1047 if (hw_max_packet_size != max_packet_size) { 1048 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); 1049 xhci_dbg(xhci, "Max packet size in usb_device = %d\n", 1050 max_packet_size); 1051 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", 1052 hw_max_packet_size); 1053 xhci_dbg(xhci, "Issuing evaluate context command.\n"); 1054 1055 /* Set up the modified control endpoint 0 */ 1056 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 1057 xhci->devs[slot_id]->out_ctx, ep_index); 1058 in_ctx = xhci->devs[slot_id]->in_ctx; 1059 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 1060 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); 1061 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); 1062 1063 /* Set up the input context flags for the command */ 1064 /* FIXME: This won't work if a non-default control endpoint 1065 * changes max packet sizes. 1066 */ 1067 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1068 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); 1069 ctrl_ctx->drop_flags = 0; 1070 1071 xhci_dbg(xhci, "Slot %d input context\n", slot_id); 1072 xhci_dbg_ctx(xhci, in_ctx, ep_index); 1073 xhci_dbg(xhci, "Slot %d output context\n", slot_id); 1074 xhci_dbg_ctx(xhci, out_ctx, ep_index); 1075 1076 ret = xhci_configure_endpoint(xhci, urb->dev, NULL, 1077 true, false); 1078 1079 /* Clean up the input context for later use by bandwidth 1080 * functions. 1081 */ 1082 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); 1083 } 1084 return ret; 1085} 1086 1087/* 1088 * non-error returns are a promise to giveback() the urb later 1089 * we drop ownership so next owner (or urb unlink) can get it 1090 */ 1091int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 1092{ 1093 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1094 struct xhci_td *buffer; 1095 unsigned long flags; 1096 int ret = 0; 1097 unsigned int slot_id, ep_index; 1098 struct urb_priv *urb_priv; 1099 int size, i; 1100 1101 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, 1102 true, true, __func__) <= 0) 1103 return -EINVAL; 1104 1105 slot_id = urb->dev->slot_id; 1106 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1107 1108 if (!HCD_HW_ACCESSIBLE(hcd)) { 1109 if (!in_interrupt()) 1110 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); 1111 ret = -ESHUTDOWN; 1112 goto exit; 1113 } 1114 1115 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1116 size = urb->number_of_packets; 1117 else 1118 size = 1; 1119 1120 urb_priv = kzalloc(sizeof(struct urb_priv) + 1121 size * sizeof(struct xhci_td *), mem_flags); 1122 if (!urb_priv) 1123 return -ENOMEM; 1124 1125 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); 1126 if (!buffer) { 1127 kfree(urb_priv); 1128 return -ENOMEM; 1129 } 1130 1131 for (i = 0; i < size; i++) { 1132 urb_priv->td[i] = buffer; 1133 buffer++; 1134 } 1135 1136 urb_priv->length = size; 1137 urb_priv->td_cnt = 0; 1138 urb->hcpriv = urb_priv; 1139 1140 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1141 /* Check to see if the max packet size for the default control 1142 * endpoint changed during FS device enumeration 1143 */ 1144 if (urb->dev->speed == USB_SPEED_FULL) { 1145 ret = xhci_check_maxpacket(xhci, slot_id, 1146 ep_index, urb); 1147 if (ret < 0) { 1148 xhci_urb_free_priv(xhci, urb_priv); 1149 urb->hcpriv = NULL; 1150 return ret; 1151 } 1152 } 1153 1154 /* We have a spinlock and interrupts disabled, so we must pass 1155 * atomic context to this function, which may allocate memory. 1156 */ 1157 spin_lock_irqsave(&xhci->lock, flags); 1158 if (xhci->xhc_state & XHCI_STATE_DYING) 1159 goto dying; 1160 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1161 slot_id, ep_index); 1162 if (ret) 1163 goto free_priv; 1164 spin_unlock_irqrestore(&xhci->lock, flags); 1165 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1166 spin_lock_irqsave(&xhci->lock, flags); 1167 if (xhci->xhc_state & XHCI_STATE_DYING) 1168 goto dying; 1169 if (xhci->devs[slot_id]->eps[ep_index].ep_state & 1170 EP_GETTING_STREAMS) { 1171 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " 1172 "is transitioning to using streams.\n"); 1173 ret = -EINVAL; 1174 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & 1175 EP_GETTING_NO_STREAMS) { 1176 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " 1177 "is transitioning to " 1178 "not having streams.\n"); 1179 ret = -EINVAL; 1180 } else { 1181 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1182 slot_id, ep_index); 1183 } 1184 if (ret) 1185 goto free_priv; 1186 spin_unlock_irqrestore(&xhci->lock, flags); 1187 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1188 spin_lock_irqsave(&xhci->lock, flags); 1189 if (xhci->xhc_state & XHCI_STATE_DYING) 1190 goto dying; 1191 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1192 slot_id, ep_index); 1193 if (ret) 1194 goto free_priv; 1195 spin_unlock_irqrestore(&xhci->lock, flags); 1196 } else { 1197 spin_lock_irqsave(&xhci->lock, flags); 1198 if (xhci->xhc_state & XHCI_STATE_DYING) 1199 goto dying; 1200 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1201 slot_id, ep_index); 1202 if (ret) 1203 goto free_priv; 1204 spin_unlock_irqrestore(&xhci->lock, flags); 1205 } 1206exit: 1207 return ret; 1208dying: 1209 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1210 "non-responsive xHCI host.\n", 1211 urb->ep->desc.bEndpointAddress, urb); 1212 ret = -ESHUTDOWN; 1213free_priv: 1214 xhci_urb_free_priv(xhci, urb_priv); 1215 urb->hcpriv = NULL; 1216 spin_unlock_irqrestore(&xhci->lock, flags); 1217 return ret; 1218} 1219 1220/* Get the right ring for the given URB. 1221 * If the endpoint supports streams, boundary check the URB's stream ID. 1222 * If the endpoint doesn't support streams, return the singular endpoint ring. 1223 */ 1224static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 1225 struct urb *urb) 1226{ 1227 unsigned int slot_id; 1228 unsigned int ep_index; 1229 unsigned int stream_id; 1230 struct xhci_virt_ep *ep; 1231 1232 slot_id = urb->dev->slot_id; 1233 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1234 stream_id = urb->stream_id; 1235 ep = &xhci->devs[slot_id]->eps[ep_index]; 1236 /* Common case: no streams */ 1237 if (!(ep->ep_state & EP_HAS_STREAMS)) 1238 return ep->ring; 1239 1240 if (stream_id == 0) { 1241 xhci_warn(xhci, 1242 "WARN: Slot ID %u, ep index %u has streams, " 1243 "but URB has no stream ID.\n", 1244 slot_id, ep_index); 1245 return NULL; 1246 } 1247 1248 if (stream_id < ep->stream_info->num_streams) 1249 return ep->stream_info->stream_rings[stream_id]; 1250 1251 xhci_warn(xhci, 1252 "WARN: Slot ID %u, ep index %u has " 1253 "stream IDs 1 to %u allocated, " 1254 "but stream ID %u is requested.\n", 1255 slot_id, ep_index, 1256 ep->stream_info->num_streams - 1, 1257 stream_id); 1258 return NULL; 1259} 1260 1261/* 1262 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop 1263 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC 1264 * should pick up where it left off in the TD, unless a Set Transfer Ring 1265 * Dequeue Pointer is issued. 1266 * 1267 * The TRBs that make up the buffers for the canceled URB will be "removed" from 1268 * the ring. Since the ring is a contiguous structure, they can't be physically 1269 * removed. Instead, there are two options: 1270 * 1271 * 1) If the HC is in the middle of processing the URB to be canceled, we 1272 * simply move the ring's dequeue pointer past those TRBs using the Set 1273 * Transfer Ring Dequeue Pointer command. This will be the common case, 1274 * when drivers timeout on the last submitted URB and attempt to cancel. 1275 * 1276 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a 1277 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The 1278 * HC will need to invalidate the any TRBs it has cached after the stop 1279 * endpoint command, as noted in the xHCI 0.95 errata. 1280 * 1281 * 3) The TD may have completed by the time the Stop Endpoint Command 1282 * completes, so software needs to handle that case too. 1283 * 1284 * This function should protect against the TD enqueueing code ringing the 1285 * doorbell while this code is waiting for a Stop Endpoint command to complete. 1286 * It also needs to account for multiple cancellations on happening at the same 1287 * time for the same endpoint. 1288 * 1289 * Note that this function can be called in any context, or so says 1290 * usb_hcd_unlink_urb() 1291 */ 1292int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 1293{ 1294 unsigned long flags; 1295 int ret, i; 1296 u32 temp; 1297 struct xhci_hcd *xhci; 1298 struct urb_priv *urb_priv; 1299 struct xhci_td *td; 1300 unsigned int ep_index; 1301 struct xhci_ring *ep_ring; 1302 struct xhci_virt_ep *ep; 1303 1304 xhci = hcd_to_xhci(hcd); 1305 spin_lock_irqsave(&xhci->lock, flags); 1306 /* Make sure the URB hasn't completed or been unlinked already */ 1307 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1308 if (ret || !urb->hcpriv) 1309 goto done; 1310 temp = xhci_readl(xhci, &xhci->op_regs->status); 1311 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1312 xhci_dbg(xhci, "HW died, freeing TD.\n"); 1313 urb_priv = urb->hcpriv; 1314 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { 1315 td = urb_priv->td[i]; 1316 if (!list_empty(&td->td_list)) 1317 list_del_init(&td->td_list); 1318 if (!list_empty(&td->cancelled_td_list)) 1319 list_del_init(&td->cancelled_td_list); 1320 } 1321 1322 usb_hcd_unlink_urb_from_ep(hcd, urb); 1323 spin_unlock_irqrestore(&xhci->lock, flags); 1324 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); 1325 xhci_urb_free_priv(xhci, urb_priv); 1326 return ret; 1327 } 1328 if ((xhci->xhc_state & XHCI_STATE_DYING) || 1329 (xhci->xhc_state & XHCI_STATE_HALTED)) { 1330 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " 1331 "non-responsive xHCI host.\n", 1332 urb->ep->desc.bEndpointAddress, urb); 1333 /* Let the stop endpoint command watchdog timer (which set this 1334 * state) finish cleaning up the endpoint TD lists. We must 1335 * have caught it in the middle of dropping a lock and giving 1336 * back an URB. 1337 */ 1338 goto done; 1339 } 1340 1341 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1342 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; 1343 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1344 if (!ep_ring) { 1345 ret = -EINVAL; 1346 goto done; 1347 } 1348 1349 urb_priv = urb->hcpriv; 1350 i = urb_priv->td_cnt; 1351 if (i < urb_priv->length) 1352 xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, " 1353 "starting at offset 0x%llx\n", 1354 urb, urb->dev->devpath, 1355 urb->ep->desc.bEndpointAddress, 1356 (unsigned long long) xhci_trb_virt_to_dma( 1357 urb_priv->td[i]->start_seg, 1358 urb_priv->td[i]->first_trb)); 1359 1360 for (; i < urb_priv->length; i++) { 1361 td = urb_priv->td[i]; 1362 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); 1363 } 1364 1365 /* Queue a stop endpoint command, but only if this is 1366 * the first cancellation to be handled. 1367 */ 1368 if (!(ep->ep_state & EP_HALT_PENDING)) { 1369 ep->ep_state |= EP_HALT_PENDING; 1370 ep->stop_cmds_pending++; 1371 ep->stop_cmd_timer.expires = jiffies + 1372 XHCI_STOP_EP_CMD_TIMEOUT * HZ; 1373 add_timer(&ep->stop_cmd_timer); 1374 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0); 1375 xhci_ring_cmd_db(xhci); 1376 } 1377done: 1378 spin_unlock_irqrestore(&xhci->lock, flags); 1379 return ret; 1380} 1381 1382/* Drop an endpoint from a new bandwidth configuration for this device. 1383 * Only one call to this function is allowed per endpoint before 1384 * check_bandwidth() or reset_bandwidth() must be called. 1385 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1386 * add the endpoint to the schedule with possibly new parameters denoted by a 1387 * different endpoint descriptor in usb_host_endpoint. 1388 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1389 * not allowed. 1390 * 1391 * The USB core will not allow URBs to be queued to an endpoint that is being 1392 * disabled, so there's no need for mutual exclusion to protect 1393 * the xhci->devs[slot_id] structure. 1394 */ 1395int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1396 struct usb_host_endpoint *ep) 1397{ 1398 struct xhci_hcd *xhci; 1399 struct xhci_container_ctx *in_ctx, *out_ctx; 1400 struct xhci_input_control_ctx *ctrl_ctx; 1401 struct xhci_slot_ctx *slot_ctx; 1402 unsigned int last_ctx; 1403 unsigned int ep_index; 1404 struct xhci_ep_ctx *ep_ctx; 1405 u32 drop_flag; 1406 u32 new_add_flags, new_drop_flags, new_slot_info; 1407 int ret; 1408 1409 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1410 if (ret <= 0) 1411 return ret; 1412 xhci = hcd_to_xhci(hcd); 1413 if (xhci->xhc_state & XHCI_STATE_DYING) 1414 return -ENODEV; 1415 1416 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1417 drop_flag = xhci_get_endpoint_flag(&ep->desc); 1418 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 1419 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 1420 __func__, drop_flag); 1421 return 0; 1422 } 1423 1424 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1425 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1426 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1427 ep_index = xhci_get_endpoint_index(&ep->desc); 1428 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1429 /* If the HC already knows the endpoint is disabled, 1430 * or the HCD has noted it is disabled, ignore this request 1431 */ 1432 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == 1433 cpu_to_le32(EP_STATE_DISABLED)) || 1434 le32_to_cpu(ctrl_ctx->drop_flags) & 1435 xhci_get_endpoint_flag(&ep->desc)) { 1436 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 1437 __func__, ep); 1438 return 0; 1439 } 1440 1441 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); 1442 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1443 1444 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); 1445 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1446 1447 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)); 1448 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1449 /* Update the last valid endpoint context, if we deleted the last one */ 1450 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) > 1451 LAST_CTX(last_ctx)) { 1452 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1453 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); 1454 } 1455 new_slot_info = le32_to_cpu(slot_ctx->dev_info); 1456 1457 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 1458 1459 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 1460 (unsigned int) ep->desc.bEndpointAddress, 1461 udev->slot_id, 1462 (unsigned int) new_drop_flags, 1463 (unsigned int) new_add_flags, 1464 (unsigned int) new_slot_info); 1465 return 0; 1466} 1467 1468/* Add an endpoint to a new possible bandwidth configuration for this device. 1469 * Only one call to this function is allowed per endpoint before 1470 * check_bandwidth() or reset_bandwidth() must be called. 1471 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1472 * add the endpoint to the schedule with possibly new parameters denoted by a 1473 * different endpoint descriptor in usb_host_endpoint. 1474 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1475 * not allowed. 1476 * 1477 * The USB core will not allow URBs to be queued to an endpoint until the 1478 * configuration or alt setting is installed in the device, so there's no need 1479 * for mutual exclusion to protect the xhci->devs[slot_id] structure. 1480 */ 1481int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1482 struct usb_host_endpoint *ep) 1483{ 1484 struct xhci_hcd *xhci; 1485 struct xhci_container_ctx *in_ctx, *out_ctx; 1486 unsigned int ep_index; 1487 struct xhci_ep_ctx *ep_ctx; 1488 struct xhci_slot_ctx *slot_ctx; 1489 struct xhci_input_control_ctx *ctrl_ctx; 1490 u32 added_ctxs; 1491 unsigned int last_ctx; 1492 u32 new_add_flags, new_drop_flags, new_slot_info; 1493 struct xhci_virt_device *virt_dev; 1494 int ret = 0; 1495 1496 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1497 if (ret <= 0) { 1498 /* So we won't queue a reset ep command for a root hub */ 1499 ep->hcpriv = NULL; 1500 return ret; 1501 } 1502 xhci = hcd_to_xhci(hcd); 1503 if (xhci->xhc_state & XHCI_STATE_DYING) 1504 return -ENODEV; 1505 1506 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 1507 last_ctx = xhci_last_valid_endpoint(added_ctxs); 1508 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 1509 /* FIXME when we have to issue an evaluate endpoint command to 1510 * deal with ep0 max packet size changing once we get the 1511 * descriptors 1512 */ 1513 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", 1514 __func__, added_ctxs); 1515 return 0; 1516 } 1517 1518 virt_dev = xhci->devs[udev->slot_id]; 1519 in_ctx = virt_dev->in_ctx; 1520 out_ctx = virt_dev->out_ctx; 1521 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1522 ep_index = xhci_get_endpoint_index(&ep->desc); 1523 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1524 1525 /* If this endpoint is already in use, and the upper layers are trying 1526 * to add it again without dropping it, reject the addition. 1527 */ 1528 if (virt_dev->eps[ep_index].ring && 1529 !(le32_to_cpu(ctrl_ctx->drop_flags) & 1530 xhci_get_endpoint_flag(&ep->desc))) { 1531 xhci_warn(xhci, "Trying to add endpoint 0x%x " 1532 "without dropping it.\n", 1533 (unsigned int) ep->desc.bEndpointAddress); 1534 return -EINVAL; 1535 } 1536 1537 /* If the HCD has already noted the endpoint is enabled, 1538 * ignore this request. 1539 */ 1540 if (le32_to_cpu(ctrl_ctx->add_flags) & 1541 xhci_get_endpoint_flag(&ep->desc)) { 1542 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 1543 __func__, ep); 1544 return 0; 1545 } 1546 1547 /* 1548 * Configuration and alternate setting changes must be done in 1549 * process context, not interrupt context (or so documenation 1550 * for usb_set_interface() and usb_set_configuration() claim). 1551 */ 1552 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { 1553 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1554 __func__, ep->desc.bEndpointAddress); 1555 return -ENOMEM; 1556 } 1557 1558 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); 1559 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1560 1561 /* If xhci_endpoint_disable() was called for this endpoint, but the 1562 * xHC hasn't been notified yet through the check_bandwidth() call, 1563 * this re-adds a new state for the endpoint from the new endpoint 1564 * descriptors. We must drop and re-add this endpoint, so we leave the 1565 * drop flags alone. 1566 */ 1567 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1568 1569 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1570 /* Update the last valid endpoint context, if we just added one past */ 1571 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) < 1572 LAST_CTX(last_ctx)) { 1573 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1574 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); 1575 } 1576 new_slot_info = le32_to_cpu(slot_ctx->dev_info); 1577 1578 /* Store the usb_device pointer for later use */ 1579 ep->hcpriv = udev; 1580 1581 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 1582 (unsigned int) ep->desc.bEndpointAddress, 1583 udev->slot_id, 1584 (unsigned int) new_drop_flags, 1585 (unsigned int) new_add_flags, 1586 (unsigned int) new_slot_info); 1587 return 0; 1588} 1589 1590static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) 1591{ 1592 struct xhci_input_control_ctx *ctrl_ctx; 1593 struct xhci_ep_ctx *ep_ctx; 1594 struct xhci_slot_ctx *slot_ctx; 1595 int i; 1596 1597 /* When a device's add flag and drop flag are zero, any subsequent 1598 * configure endpoint command will leave that endpoint's state 1599 * untouched. Make sure we don't leave any old state in the input 1600 * endpoint contexts. 1601 */ 1602 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 1603 ctrl_ctx->drop_flags = 0; 1604 ctrl_ctx->add_flags = 0; 1605 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 1606 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1607 /* Endpoint 0 is always valid */ 1608 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 1609 for (i = 1; i < 31; ++i) { 1610 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 1611 ep_ctx->ep_info = 0; 1612 ep_ctx->ep_info2 = 0; 1613 ep_ctx->deq = 0; 1614 ep_ctx->tx_info = 0; 1615 } 1616} 1617 1618static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, 1619 struct usb_device *udev, u32 *cmd_status) 1620{ 1621 int ret; 1622 1623 switch (*cmd_status) { 1624 case COMP_ENOMEM: 1625 dev_warn(&udev->dev, "Not enough host controller resources " 1626 "for new device state.\n"); 1627 ret = -ENOMEM; 1628 /* FIXME: can we allocate more resources for the HC? */ 1629 break; 1630 case COMP_BW_ERR: 1631 case COMP_2ND_BW_ERR: 1632 dev_warn(&udev->dev, "Not enough bandwidth " 1633 "for new device state.\n"); 1634 ret = -ENOSPC; 1635 /* FIXME: can we go back to the old state? */ 1636 break; 1637 case COMP_TRB_ERR: 1638 /* the HCD set up something wrong */ 1639 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " 1640 "add flag = 1, " 1641 "and endpoint is not disabled.\n"); 1642 ret = -EINVAL; 1643 break; 1644 case COMP_DEV_ERR: 1645 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint " 1646 "configure command.\n"); 1647 ret = -ENODEV; 1648 break; 1649 case COMP_SUCCESS: 1650 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); 1651 ret = 0; 1652 break; 1653 default: 1654 xhci_err(xhci, "ERROR: unexpected command completion " 1655 "code 0x%x.\n", *cmd_status); 1656 ret = -EINVAL; 1657 break; 1658 } 1659 return ret; 1660} 1661 1662static int xhci_evaluate_context_result(struct xhci_hcd *xhci, 1663 struct usb_device *udev, u32 *cmd_status) 1664{ 1665 int ret; 1666 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; 1667 1668 switch (*cmd_status) { 1669 case COMP_EINVAL: 1670 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " 1671 "context command.\n"); 1672 ret = -EINVAL; 1673 break; 1674 case COMP_EBADSLT: 1675 dev_warn(&udev->dev, "WARN: slot not enabled for" 1676 "evaluate context command.\n"); 1677 case COMP_CTX_STATE: 1678 dev_warn(&udev->dev, "WARN: invalid context state for " 1679 "evaluate context command.\n"); 1680 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); 1681 ret = -EINVAL; 1682 break; 1683 case COMP_DEV_ERR: 1684 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate " 1685 "context command.\n"); 1686 ret = -ENODEV; 1687 break; 1688 case COMP_MEL_ERR: 1689 /* Max Exit Latency too large error */ 1690 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); 1691 ret = -EINVAL; 1692 break; 1693 case COMP_SUCCESS: 1694 dev_dbg(&udev->dev, "Successful evaluate context command\n"); 1695 ret = 0; 1696 break; 1697 default: 1698 xhci_err(xhci, "ERROR: unexpected command completion " 1699 "code 0x%x.\n", *cmd_status); 1700 ret = -EINVAL; 1701 break; 1702 } 1703 return ret; 1704} 1705 1706static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, 1707 struct xhci_container_ctx *in_ctx) 1708{ 1709 struct xhci_input_control_ctx *ctrl_ctx; 1710 u32 valid_add_flags; 1711 u32 valid_drop_flags; 1712 1713 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1714 /* Ignore the slot flag (bit 0), and the default control endpoint flag 1715 * (bit 1). The default control endpoint is added during the Address 1716 * Device command and is never removed until the slot is disabled. 1717 */ 1718 valid_add_flags = ctrl_ctx->add_flags >> 2; 1719 valid_drop_flags = ctrl_ctx->drop_flags >> 2; 1720 1721 /* Use hweight32 to count the number of ones in the add flags, or 1722 * number of endpoints added. Don't count endpoints that are changed 1723 * (both added and dropped). 1724 */ 1725 return hweight32(valid_add_flags) - 1726 hweight32(valid_add_flags & valid_drop_flags); 1727} 1728 1729static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, 1730 struct xhci_container_ctx *in_ctx) 1731{ 1732 struct xhci_input_control_ctx *ctrl_ctx; 1733 u32 valid_add_flags; 1734 u32 valid_drop_flags; 1735 1736 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1737 valid_add_flags = ctrl_ctx->add_flags >> 2; 1738 valid_drop_flags = ctrl_ctx->drop_flags >> 2; 1739 1740 return hweight32(valid_drop_flags) - 1741 hweight32(valid_add_flags & valid_drop_flags); 1742} 1743 1744/* 1745 * We need to reserve the new number of endpoints before the configure endpoint 1746 * command completes. We can't subtract the dropped endpoints from the number 1747 * of active endpoints until the command completes because we can oversubscribe 1748 * the host in this case: 1749 * 1750 * - the first configure endpoint command drops more endpoints than it adds 1751 * - a second configure endpoint command that adds more endpoints is queued 1752 * - the first configure endpoint command fails, so the config is unchanged 1753 * - the second command may succeed, even though there isn't enough resources 1754 * 1755 * Must be called with xhci->lock held. 1756 */ 1757static int xhci_reserve_host_resources(struct xhci_hcd *xhci, 1758 struct xhci_container_ctx *in_ctx) 1759{ 1760 u32 added_eps; 1761 1762 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx); 1763 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { 1764 xhci_dbg(xhci, "Not enough ep ctxs: " 1765 "%u active, need to add %u, limit is %u.\n", 1766 xhci->num_active_eps, added_eps, 1767 xhci->limit_active_eps); 1768 return -ENOMEM; 1769 } 1770 xhci->num_active_eps += added_eps; 1771 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps, 1772 xhci->num_active_eps); 1773 return 0; 1774} 1775 1776/* 1777 * The configure endpoint was failed by the xHC for some other reason, so we 1778 * need to revert the resources that failed configuration would have used. 1779 * 1780 * Must be called with xhci->lock held. 1781 */ 1782static void xhci_free_host_resources(struct xhci_hcd *xhci, 1783 struct xhci_container_ctx *in_ctx) 1784{ 1785 u32 num_failed_eps; 1786 1787 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx); 1788 xhci->num_active_eps -= num_failed_eps; 1789 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n", 1790 num_failed_eps, 1791 xhci->num_active_eps); 1792} 1793 1794/* 1795 * Now that the command has completed, clean up the active endpoint count by 1796 * subtracting out the endpoints that were dropped (but not changed). 1797 * 1798 * Must be called with xhci->lock held. 1799 */ 1800static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, 1801 struct xhci_container_ctx *in_ctx) 1802{ 1803 u32 num_dropped_eps; 1804 1805 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx); 1806 xhci->num_active_eps -= num_dropped_eps; 1807 if (num_dropped_eps) 1808 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n", 1809 num_dropped_eps, 1810 xhci->num_active_eps); 1811} 1812 1813unsigned int xhci_get_block_size(struct usb_device *udev) 1814{ 1815 switch (udev->speed) { 1816 case USB_SPEED_LOW: 1817 case USB_SPEED_FULL: 1818 return FS_BLOCK; 1819 case USB_SPEED_HIGH: 1820 return HS_BLOCK; 1821 case USB_SPEED_SUPER: 1822 return SS_BLOCK; 1823 case USB_SPEED_UNKNOWN: 1824 case USB_SPEED_WIRELESS: 1825 default: 1826 /* Should never happen */ 1827 return 1; 1828 } 1829} 1830 1831unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) 1832{ 1833 if (interval_bw->overhead[LS_OVERHEAD_TYPE]) 1834 return LS_OVERHEAD; 1835 if (interval_bw->overhead[FS_OVERHEAD_TYPE]) 1836 return FS_OVERHEAD; 1837 return HS_OVERHEAD; 1838} 1839 1840/* If we are changing a LS/FS device under a HS hub, 1841 * make sure (if we are activating a new TT) that the HS bus has enough 1842 * bandwidth for this new TT. 1843 */ 1844static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, 1845 struct xhci_virt_device *virt_dev, 1846 int old_active_eps) 1847{ 1848 struct xhci_interval_bw_table *bw_table; 1849 struct xhci_tt_bw_info *tt_info; 1850 1851 /* Find the bandwidth table for the root port this TT is attached to. */ 1852 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; 1853 tt_info = virt_dev->tt_info; 1854 /* If this TT already had active endpoints, the bandwidth for this TT 1855 * has already been added. Removing all periodic endpoints (and thus 1856 * making the TT enactive) will only decrease the bandwidth used. 1857 */ 1858 if (old_active_eps) 1859 return 0; 1860 if (old_active_eps == 0 && tt_info->active_eps != 0) { 1861 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) 1862 return -ENOMEM; 1863 return 0; 1864 } 1865 /* Not sure why we would have no new active endpoints... 1866 * 1867 * Maybe because of an Evaluate Context change for a hub update or a 1868 * control endpoint 0 max packet size change? 1869 * FIXME: skip the bandwidth calculation in that case. 1870 */ 1871 return 0; 1872} 1873 1874static int xhci_check_ss_bw(struct xhci_hcd *xhci, 1875 struct xhci_virt_device *virt_dev) 1876{ 1877 unsigned int bw_reserved; 1878 1879 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); 1880 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) 1881 return -ENOMEM; 1882 1883 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); 1884 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) 1885 return -ENOMEM; 1886 1887 return 0; 1888} 1889 1890/* 1891 * This algorithm is a very conservative estimate of the worst-case scheduling 1892 * scenario for any one interval. The hardware dynamically schedules the 1893 * packets, so we can't tell which microframe could be the limiting factor in 1894 * the bandwidth scheduling. This only takes into account periodic endpoints. 1895 * 1896 * Obviously, we can't solve an NP complete problem to find the minimum worst 1897 * case scenario. Instead, we come up with an estimate that is no less than 1898 * the worst case bandwidth used for any one microframe, but may be an 1899 * over-estimate. 1900 * 1901 * We walk the requirements for each endpoint by interval, starting with the 1902 * smallest interval, and place packets in the schedule where there is only one 1903 * possible way to schedule packets for that interval. In order to simplify 1904 * this algorithm, we record the largest max packet size for each interval, and 1905 * assume all packets will be that size. 1906 * 1907 * For interval 0, we obviously must schedule all packets for each interval. 1908 * The bandwidth for interval 0 is just the amount of data to be transmitted 1909 * (the sum of all max ESIT payload sizes, plus any overhead per packet times 1910 * the number of packets). 1911 * 1912 * For interval 1, we have two possible microframes to schedule those packets 1913 * in. For this algorithm, if we can schedule the same number of packets for 1914 * each possible scheduling opportunity (each microframe), we will do so. The 1915 * remaining number of packets will be saved to be transmitted in the gaps in 1916 * the next interval's scheduling sequence. 1917 * 1918 * As we move those remaining packets to be scheduled with interval 2 packets, 1919 * we have to double the number of remaining packets to transmit. This is 1920 * because the intervals are actually powers of 2, and we would be transmitting 1921 * the previous interval's packets twice in this interval. We also have to be 1922 * sure that when we look at the largest max packet size for this interval, we 1923 * also look at the largest max packet size for the remaining packets and take 1924 * the greater of the two. 1925 * 1926 * The algorithm continues to evenly distribute packets in each scheduling 1927 * opportunity, and push the remaining packets out, until we get to the last 1928 * interval. Then those packets and their associated overhead are just added 1929 * to the bandwidth used. 1930 */ 1931static int xhci_check_bw_table(struct xhci_hcd *xhci, 1932 struct xhci_virt_device *virt_dev, 1933 int old_active_eps) 1934{ 1935 unsigned int bw_reserved; 1936 unsigned int max_bandwidth; 1937 unsigned int bw_used; 1938 unsigned int block_size; 1939 struct xhci_interval_bw_table *bw_table; 1940 unsigned int packet_size = 0; 1941 unsigned int overhead = 0; 1942 unsigned int packets_transmitted = 0; 1943 unsigned int packets_remaining = 0; 1944 unsigned int i; 1945 1946 if (virt_dev->udev->speed == USB_SPEED_SUPER) 1947 return xhci_check_ss_bw(xhci, virt_dev); 1948 1949 if (virt_dev->udev->speed == USB_SPEED_HIGH) { 1950 max_bandwidth = HS_BW_LIMIT; 1951 /* Convert percent of bus BW reserved to blocks reserved */ 1952 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); 1953 } else { 1954 max_bandwidth = FS_BW_LIMIT; 1955 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); 1956 } 1957 1958 bw_table = virt_dev->bw_table; 1959 /* We need to translate the max packet size and max ESIT payloads into 1960 * the units the hardware uses. 1961 */ 1962 block_size = xhci_get_block_size(virt_dev->udev); 1963 1964 /* If we are manipulating a LS/FS device under a HS hub, double check 1965 * that the HS bus has enough bandwidth if we are activing a new TT. 1966 */ 1967 if (virt_dev->tt_info) { 1968 xhci_dbg(xhci, "Recalculating BW for rootport %u\n", 1969 virt_dev->real_port); 1970 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { 1971 xhci_warn(xhci, "Not enough bandwidth on HS bus for " 1972 "newly activated TT.\n"); 1973 return -ENOMEM; 1974 } 1975 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n", 1976 virt_dev->tt_info->slot_id, 1977 virt_dev->tt_info->ttport); 1978 } else { 1979 xhci_dbg(xhci, "Recalculating BW for rootport %u\n", 1980 virt_dev->real_port); 1981 } 1982 1983 /* Add in how much bandwidth will be used for interval zero, or the 1984 * rounded max ESIT payload + number of packets * largest overhead. 1985 */ 1986 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + 1987 bw_table->interval_bw[0].num_packets * 1988 xhci_get_largest_overhead(&bw_table->interval_bw[0]); 1989 1990 for (i = 1; i < XHCI_MAX_INTERVAL; i++) { 1991 unsigned int bw_added; 1992 unsigned int largest_mps; 1993 unsigned int interval_overhead; 1994 1995 /* 1996 * How many packets could we transmit in this interval? 1997 * If packets didn't fit in the previous interval, we will need 1998 * to transmit that many packets twice within this interval. 1999 */ 2000 packets_remaining = 2 * packets_remaining + 2001 bw_table->interval_bw[i].num_packets; 2002 2003 /* Find the largest max packet size of this or the previous 2004 * interval. 2005 */ 2006 if (list_empty(&bw_table->interval_bw[i].endpoints)) 2007 largest_mps = 0; 2008 else { 2009 struct xhci_virt_ep *virt_ep; 2010 struct list_head *ep_entry; 2011 2012 ep_entry = bw_table->interval_bw[i].endpoints.next; 2013 virt_ep = list_entry(ep_entry, 2014 struct xhci_virt_ep, bw_endpoint_list); 2015 /* Convert to blocks, rounding up */ 2016 largest_mps = DIV_ROUND_UP( 2017 virt_ep->bw_info.max_packet_size, 2018 block_size); 2019 } 2020 if (largest_mps > packet_size) 2021 packet_size = largest_mps; 2022 2023 /* Use the larger overhead of this or the previous interval. */ 2024 interval_overhead = xhci_get_largest_overhead( 2025 &bw_table->interval_bw[i]); 2026 if (interval_overhead > overhead) 2027 overhead = interval_overhead; 2028 2029 /* How many packets can we evenly distribute across 2030 * (1 << (i + 1)) possible scheduling opportunities? 2031 */ 2032 packets_transmitted = packets_remaining >> (i + 1); 2033 2034 /* Add in the bandwidth used for those scheduled packets */ 2035 bw_added = packets_transmitted * (overhead + packet_size); 2036 2037 /* How many packets do we have remaining to transmit? */ 2038 packets_remaining = packets_remaining % (1 << (i + 1)); 2039 2040 /* What largest max packet size should those packets have? */ 2041 /* If we've transmitted all packets, don't carry over the 2042 * largest packet size. 2043 */ 2044 if (packets_remaining == 0) { 2045 packet_size = 0; 2046 overhead = 0; 2047 } else if (packets_transmitted > 0) { 2048 /* Otherwise if we do have remaining packets, and we've 2049 * scheduled some packets in this interval, take the 2050 * largest max packet size from endpoints with this 2051 * interval. 2052 */ 2053 packet_size = largest_mps; 2054 overhead = interval_overhead; 2055 } 2056 /* Otherwise carry over packet_size and overhead from the last 2057 * time we had a remainder. 2058 */ 2059 bw_used += bw_added; 2060 if (bw_used > max_bandwidth) { 2061 xhci_warn(xhci, "Not enough bandwidth. " 2062 "Proposed: %u, Max: %u\n", 2063 bw_used, max_bandwidth); 2064 return -ENOMEM; 2065 } 2066 } 2067 /* 2068 * Ok, we know we have some packets left over after even-handedly 2069 * scheduling interval 15. We don't know which microframes they will 2070 * fit into, so we over-schedule and say they will be scheduled every 2071 * microframe. 2072 */ 2073 if (packets_remaining > 0) 2074 bw_used += overhead + packet_size; 2075 2076 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { 2077 unsigned int port_index = virt_dev->real_port - 1; 2078 2079 /* OK, we're manipulating a HS device attached to a 2080 * root port bandwidth domain. Include the number of active TTs 2081 * in the bandwidth used. 2082 */ 2083 bw_used += TT_HS_OVERHEAD * 2084 xhci->rh_bw[port_index].num_active_tts; 2085 } 2086 2087 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, " 2088 "Available: %u " "percent\n", 2089 bw_used, max_bandwidth, bw_reserved, 2090 (max_bandwidth - bw_used - bw_reserved) * 100 / 2091 max_bandwidth); 2092 2093 bw_used += bw_reserved; 2094 if (bw_used > max_bandwidth) { 2095 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", 2096 bw_used, max_bandwidth); 2097 return -ENOMEM; 2098 } 2099 2100 bw_table->bw_used = bw_used; 2101 return 0; 2102} 2103 2104static bool xhci_is_async_ep(unsigned int ep_type) 2105{ 2106 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 2107 ep_type != ISOC_IN_EP && 2108 ep_type != INT_IN_EP); 2109} 2110 2111static bool xhci_is_sync_in_ep(unsigned int ep_type) 2112{ 2113 return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP); 2114} 2115 2116static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) 2117{ 2118 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); 2119 2120 if (ep_bw->ep_interval == 0) 2121 return SS_OVERHEAD_BURST + 2122 (ep_bw->mult * ep_bw->num_packets * 2123 (SS_OVERHEAD + mps)); 2124 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * 2125 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), 2126 1 << ep_bw->ep_interval); 2127 2128} 2129 2130void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, 2131 struct xhci_bw_info *ep_bw, 2132 struct xhci_interval_bw_table *bw_table, 2133 struct usb_device *udev, 2134 struct xhci_virt_ep *virt_ep, 2135 struct xhci_tt_bw_info *tt_info) 2136{ 2137 struct xhci_interval_bw *interval_bw; 2138 int normalized_interval; 2139 2140 if (xhci_is_async_ep(ep_bw->type)) 2141 return; 2142 2143 if (udev->speed == USB_SPEED_SUPER) { 2144 if (xhci_is_sync_in_ep(ep_bw->type)) 2145 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= 2146 xhci_get_ss_bw_consumed(ep_bw); 2147 else 2148 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= 2149 xhci_get_ss_bw_consumed(ep_bw); 2150 return; 2151 } 2152 2153 /* SuperSpeed endpoints never get added to intervals in the table, so 2154 * this check is only valid for HS/FS/LS devices. 2155 */ 2156 if (list_empty(&virt_ep->bw_endpoint_list)) 2157 return; 2158 /* For LS/FS devices, we need to translate the interval expressed in 2159 * microframes to frames. 2160 */ 2161 if (udev->speed == USB_SPEED_HIGH) 2162 normalized_interval = ep_bw->ep_interval; 2163 else 2164 normalized_interval = ep_bw->ep_interval - 3; 2165 2166 if (normalized_interval == 0) 2167 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; 2168 interval_bw = &bw_table->interval_bw[normalized_interval]; 2169 interval_bw->num_packets -= ep_bw->num_packets; 2170 switch (udev->speed) { 2171 case USB_SPEED_LOW: 2172 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; 2173 break; 2174 case USB_SPEED_FULL: 2175 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; 2176 break; 2177 case USB_SPEED_HIGH: 2178 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; 2179 break; 2180 case USB_SPEED_SUPER: 2181 case USB_SPEED_UNKNOWN: 2182 case USB_SPEED_WIRELESS: 2183 /* Should never happen because only LS/FS/HS endpoints will get 2184 * added to the endpoint list. 2185 */ 2186 return; 2187 } 2188 if (tt_info) 2189 tt_info->active_eps -= 1; 2190 list_del_init(&virt_ep->bw_endpoint_list); 2191} 2192 2193static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, 2194 struct xhci_bw_info *ep_bw, 2195 struct xhci_interval_bw_table *bw_table, 2196 struct usb_device *udev, 2197 struct xhci_virt_ep *virt_ep, 2198 struct xhci_tt_bw_info *tt_info) 2199{ 2200 struct xhci_interval_bw *interval_bw; 2201 struct xhci_virt_ep *smaller_ep; 2202 int normalized_interval; 2203 2204 if (xhci_is_async_ep(ep_bw->type)) 2205 return; 2206 2207 if (udev->speed == USB_SPEED_SUPER) { 2208 if (xhci_is_sync_in_ep(ep_bw->type)) 2209 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += 2210 xhci_get_ss_bw_consumed(ep_bw); 2211 else 2212 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += 2213 xhci_get_ss_bw_consumed(ep_bw); 2214 return; 2215 } 2216 2217 /* For LS/FS devices, we need to translate the interval expressed in 2218 * microframes to frames. 2219 */ 2220 if (udev->speed == USB_SPEED_HIGH) 2221 normalized_interval = ep_bw->ep_interval; 2222 else 2223 normalized_interval = ep_bw->ep_interval - 3; 2224 2225 if (normalized_interval == 0) 2226 bw_table->interval0_esit_payload += ep_bw->max_esit_payload; 2227 interval_bw = &bw_table->interval_bw[normalized_interval]; 2228 interval_bw->num_packets += ep_bw->num_packets; 2229 switch (udev->speed) { 2230 case USB_SPEED_LOW: 2231 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; 2232 break; 2233 case USB_SPEED_FULL: 2234 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; 2235 break; 2236 case USB_SPEED_HIGH: 2237 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; 2238 break; 2239 case USB_SPEED_SUPER: 2240 case USB_SPEED_UNKNOWN: 2241 case USB_SPEED_WIRELESS: 2242 /* Should never happen because only LS/FS/HS endpoints will get 2243 * added to the endpoint list. 2244 */ 2245 return; 2246 } 2247 2248 if (tt_info) 2249 tt_info->active_eps += 1; 2250 /* Insert the endpoint into the list, largest max packet size first. */ 2251 list_for_each_entry(smaller_ep, &interval_bw->endpoints, 2252 bw_endpoint_list) { 2253 if (ep_bw->max_packet_size >= 2254 smaller_ep->bw_info.max_packet_size) { 2255 /* Add the new ep before the smaller endpoint */ 2256 list_add_tail(&virt_ep->bw_endpoint_list, 2257 &smaller_ep->bw_endpoint_list); 2258 return; 2259 } 2260 } 2261 /* Add the new endpoint at the end of the list. */ 2262 list_add_tail(&virt_ep->bw_endpoint_list, 2263 &interval_bw->endpoints); 2264} 2265 2266void xhci_update_tt_active_eps(struct xhci_hcd *xhci, 2267 struct xhci_virt_device *virt_dev, 2268 int old_active_eps) 2269{ 2270 struct xhci_root_port_bw_info *rh_bw_info; 2271 if (!virt_dev->tt_info) 2272 return; 2273 2274 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; 2275 if (old_active_eps == 0 && 2276 virt_dev->tt_info->active_eps != 0) { 2277 rh_bw_info->num_active_tts += 1; 2278 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; 2279 } else if (old_active_eps != 0 && 2280 virt_dev->tt_info->active_eps == 0) { 2281 rh_bw_info->num_active_tts -= 1; 2282 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; 2283 } 2284} 2285 2286static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, 2287 struct xhci_virt_device *virt_dev, 2288 struct xhci_container_ctx *in_ctx) 2289{ 2290 struct xhci_bw_info ep_bw_info[31]; 2291 int i; 2292 struct xhci_input_control_ctx *ctrl_ctx; 2293 int old_active_eps = 0; 2294 2295 if (virt_dev->tt_info) 2296 old_active_eps = virt_dev->tt_info->active_eps; 2297 2298 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 2299 2300 for (i = 0; i < 31; i++) { 2301 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2302 continue; 2303 2304 /* Make a copy of the BW info in case we need to revert this */ 2305 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, 2306 sizeof(ep_bw_info[i])); 2307 /* Drop the endpoint from the interval table if the endpoint is 2308 * being dropped or changed. 2309 */ 2310 if (EP_IS_DROPPED(ctrl_ctx, i)) 2311 xhci_drop_ep_from_interval_table(xhci, 2312 &virt_dev->eps[i].bw_info, 2313 virt_dev->bw_table, 2314 virt_dev->udev, 2315 &virt_dev->eps[i], 2316 virt_dev->tt_info); 2317 } 2318 /* Overwrite the information stored in the endpoints' bw_info */ 2319 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); 2320 for (i = 0; i < 31; i++) { 2321 /* Add any changed or added endpoints to the interval table */ 2322 if (EP_IS_ADDED(ctrl_ctx, i)) 2323 xhci_add_ep_to_interval_table(xhci, 2324 &virt_dev->eps[i].bw_info, 2325 virt_dev->bw_table, 2326 virt_dev->udev, 2327 &virt_dev->eps[i], 2328 virt_dev->tt_info); 2329 } 2330 2331 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { 2332 /* Ok, this fits in the bandwidth we have. 2333 * Update the number of active TTs. 2334 */ 2335 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 2336 return 0; 2337 } 2338 2339 /* We don't have enough bandwidth for this, revert the stored info. */ 2340 for (i = 0; i < 31; i++) { 2341 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2342 continue; 2343 2344 /* Drop the new copies of any added or changed endpoints from 2345 * the interval table. 2346 */ 2347 if (EP_IS_ADDED(ctrl_ctx, i)) { 2348 xhci_drop_ep_from_interval_table(xhci, 2349 &virt_dev->eps[i].bw_info, 2350 virt_dev->bw_table, 2351 virt_dev->udev, 2352 &virt_dev->eps[i], 2353 virt_dev->tt_info); 2354 } 2355 /* Revert the endpoint back to its old information */ 2356 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], 2357 sizeof(ep_bw_info[i])); 2358 /* Add any changed or dropped endpoints back into the table */ 2359 if (EP_IS_DROPPED(ctrl_ctx, i)) 2360 xhci_add_ep_to_interval_table(xhci, 2361 &virt_dev->eps[i].bw_info, 2362 virt_dev->bw_table, 2363 virt_dev->udev, 2364 &virt_dev->eps[i], 2365 virt_dev->tt_info); 2366 } 2367 return -ENOMEM; 2368} 2369 2370 2371/* Issue a configure endpoint command or evaluate context command 2372 * and wait for it to finish. 2373 */ 2374static int xhci_configure_endpoint(struct xhci_hcd *xhci, 2375 struct usb_device *udev, 2376 struct xhci_command *command, 2377 bool ctx_change, bool must_succeed) 2378{ 2379 int ret; 2380 int timeleft; 2381 unsigned long flags; 2382 struct xhci_container_ctx *in_ctx; 2383 struct completion *cmd_completion; 2384 u32 *cmd_status; 2385 struct xhci_virt_device *virt_dev; 2386 2387 spin_lock_irqsave(&xhci->lock, flags); 2388 virt_dev = xhci->devs[udev->slot_id]; 2389 2390 if (command) 2391 in_ctx = command->in_ctx; 2392 else 2393 in_ctx = virt_dev->in_ctx; 2394 2395 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && 2396 xhci_reserve_host_resources(xhci, in_ctx)) { 2397 spin_unlock_irqrestore(&xhci->lock, flags); 2398 xhci_warn(xhci, "Not enough host resources, " 2399 "active endpoint contexts = %u\n", 2400 xhci->num_active_eps); 2401 return -ENOMEM; 2402 } 2403 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && 2404 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) { 2405 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2406 xhci_free_host_resources(xhci, in_ctx); 2407 spin_unlock_irqrestore(&xhci->lock, flags); 2408 xhci_warn(xhci, "Not enough bandwidth\n"); 2409 return -ENOMEM; 2410 } 2411 2412 if (command) { 2413 cmd_completion = command->completion; 2414 cmd_status = &command->status; 2415 command->command_trb = xhci->cmd_ring->enqueue; 2416 2417 /* Enqueue pointer can be left pointing to the link TRB, 2418 * we must handle that 2419 */ 2420 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control)) 2421 command->command_trb = 2422 xhci->cmd_ring->enq_seg->next->trbs; 2423 2424 list_add_tail(&command->cmd_list, &virt_dev->cmd_list); 2425 } else { 2426 cmd_completion = &virt_dev->cmd_completion; 2427 cmd_status = &virt_dev->cmd_status; 2428 } 2429 init_completion(cmd_completion); 2430 2431 if (!ctx_change) 2432 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, 2433 udev->slot_id, must_succeed); 2434 else 2435 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, 2436 udev->slot_id); 2437 if (ret < 0) { 2438 if (command) 2439 list_del(&command->cmd_list); 2440 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2441 xhci_free_host_resources(xhci, in_ctx); 2442 spin_unlock_irqrestore(&xhci->lock, flags); 2443 xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); 2444 return -ENOMEM; 2445 } 2446 xhci_ring_cmd_db(xhci); 2447 spin_unlock_irqrestore(&xhci->lock, flags); 2448 2449 /* Wait for the configure endpoint command to complete */ 2450 timeleft = wait_for_completion_interruptible_timeout( 2451 cmd_completion, 2452 USB_CTRL_SET_TIMEOUT); 2453 if (timeleft <= 0) { 2454 xhci_warn(xhci, "%s while waiting for %s command\n", 2455 timeleft == 0 ? "Timeout" : "Signal", 2456 ctx_change == 0 ? 2457 "configure endpoint" : 2458 "evaluate context"); 2459 /* FIXME cancel the configure endpoint command */ 2460 return -ETIME; 2461 } 2462 2463 if (!ctx_change) 2464 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status); 2465 else 2466 ret = xhci_evaluate_context_result(xhci, udev, cmd_status); 2467 2468 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 2469 spin_lock_irqsave(&xhci->lock, flags); 2470 /* If the command failed, remove the reserved resources. 2471 * Otherwise, clean up the estimate to include dropped eps. 2472 */ 2473 if (ret) 2474 xhci_free_host_resources(xhci, in_ctx); 2475 else 2476 xhci_finish_resource_reservation(xhci, in_ctx); 2477 spin_unlock_irqrestore(&xhci->lock, flags); 2478 } 2479 return ret; 2480} 2481 2482/* Called after one or more calls to xhci_add_endpoint() or 2483 * xhci_drop_endpoint(). If this call fails, the USB core is expected 2484 * to call xhci_reset_bandwidth(). 2485 * 2486 * Since we are in the middle of changing either configuration or 2487 * installing a new alt setting, the USB core won't allow URBs to be 2488 * enqueued for any endpoint on the old config or interface. Nothing 2489 * else should be touching the xhci->devs[slot_id] structure, so we 2490 * don't need to take the xhci->lock for manipulating that. 2491 */ 2492int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2493{ 2494 int i; 2495 int ret = 0; 2496 struct xhci_hcd *xhci; 2497 struct xhci_virt_device *virt_dev; 2498 struct xhci_input_control_ctx *ctrl_ctx; 2499 struct xhci_slot_ctx *slot_ctx; 2500 2501 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2502 if (ret <= 0) 2503 return ret; 2504 xhci = hcd_to_xhci(hcd); 2505 if (xhci->xhc_state & XHCI_STATE_DYING) 2506 return -ENODEV; 2507 2508 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2509 virt_dev = xhci->devs[udev->slot_id]; 2510 2511 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 2512 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 2513 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2514 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 2515 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 2516 2517 /* Don't issue the command if there's no endpoints to update. */ 2518 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && 2519 ctrl_ctx->drop_flags == 0) 2520 return 0; 2521 2522 xhci_dbg(xhci, "New Input Control Context:\n"); 2523 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 2524 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2525 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); 2526 2527 ret = xhci_configure_endpoint(xhci, udev, NULL, 2528 false, false); 2529 if (ret) { 2530 /* Callee should call reset_bandwidth() */ 2531 return ret; 2532 } 2533 2534 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); 2535 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2536 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); 2537 2538 /* Free any rings that were dropped, but not changed. */ 2539 for (i = 1; i < 31; ++i) { 2540 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && 2541 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) 2542 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2543 } 2544 xhci_zero_in_ctx(xhci, virt_dev); 2545 /* 2546 * Install any rings for completely new endpoints or changed endpoints, 2547 * and free or cache any old rings from changed endpoints. 2548 */ 2549 for (i = 1; i < 31; ++i) { 2550 if (!virt_dev->eps[i].new_ring) 2551 continue; 2552 /* Only cache or free the old ring if it exists. 2553 * It may not if this is the first add of an endpoint. 2554 */ 2555 if (virt_dev->eps[i].ring) { 2556 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2557 } 2558 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 2559 virt_dev->eps[i].new_ring = NULL; 2560 } 2561 2562 return ret; 2563} 2564 2565void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2566{ 2567 struct xhci_hcd *xhci; 2568 struct xhci_virt_device *virt_dev; 2569 int i, ret; 2570 2571 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2572 if (ret <= 0) 2573 return; 2574 xhci = hcd_to_xhci(hcd); 2575 2576 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2577 virt_dev = xhci->devs[udev->slot_id]; 2578 /* Free any rings allocated for added endpoints */ 2579 for (i = 0; i < 31; ++i) { 2580 if (virt_dev->eps[i].new_ring) { 2581 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); 2582 virt_dev->eps[i].new_ring = NULL; 2583 } 2584 } 2585 xhci_zero_in_ctx(xhci, virt_dev); 2586} 2587 2588static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, 2589 struct xhci_container_ctx *in_ctx, 2590 struct xhci_container_ctx *out_ctx, 2591 u32 add_flags, u32 drop_flags) 2592{ 2593 struct xhci_input_control_ctx *ctrl_ctx; 2594 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 2595 ctrl_ctx->add_flags = cpu_to_le32(add_flags); 2596 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); 2597 xhci_slot_copy(xhci, in_ctx, out_ctx); 2598 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2599 2600 xhci_dbg(xhci, "Input Context:\n"); 2601 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); 2602} 2603 2604static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, 2605 unsigned int slot_id, unsigned int ep_index, 2606 struct xhci_dequeue_state *deq_state) 2607{ 2608 struct xhci_container_ctx *in_ctx; 2609 struct xhci_ep_ctx *ep_ctx; 2610 u32 added_ctxs; 2611 dma_addr_t addr; 2612 2613 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 2614 xhci->devs[slot_id]->out_ctx, ep_index); 2615 in_ctx = xhci->devs[slot_id]->in_ctx; 2616 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 2617 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, 2618 deq_state->new_deq_ptr); 2619 if (addr == 0) { 2620 xhci_warn(xhci, "WARN Cannot submit config ep after " 2621 "reset ep command\n"); 2622 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", 2623 deq_state->new_deq_seg, 2624 deq_state->new_deq_ptr); 2625 return; 2626 } 2627 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); 2628 2629 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); 2630 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, 2631 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); 2632} 2633 2634void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, 2635 struct usb_device *udev, unsigned int ep_index) 2636{ 2637 struct xhci_dequeue_state deq_state; 2638 struct xhci_virt_ep *ep; 2639 2640 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); 2641 ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 2642 /* We need to move the HW's dequeue pointer past this TD, 2643 * or it will attempt to resend it on the next doorbell ring. 2644 */ 2645 xhci_find_new_dequeue_state(xhci, udev->slot_id, 2646 ep_index, ep->stopped_stream, ep->stopped_td, 2647 &deq_state); 2648 2649 /* HW with the reset endpoint quirk will use the saved dequeue state to 2650 * issue a configure endpoint command later. 2651 */ 2652 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 2653 xhci_dbg(xhci, "Queueing new dequeue state\n"); 2654 xhci_queue_new_dequeue_state(xhci, udev->slot_id, 2655 ep_index, ep->stopped_stream, &deq_state); 2656 } else { 2657 /* Better hope no one uses the input context between now and the 2658 * reset endpoint completion! 2659 * XXX: No idea how this hardware will react when stream rings 2660 * are enabled. 2661 */ 2662 xhci_dbg(xhci, "Setting up input context for " 2663 "configure endpoint command\n"); 2664 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, 2665 ep_index, &deq_state); 2666 } 2667} 2668 2669/* Deal with stalled endpoints. The core should have sent the control message 2670 * to clear the halt condition. However, we need to make the xHCI hardware 2671 * reset its sequence number, since a device will expect a sequence number of 2672 * zero after the halt condition is cleared. 2673 * Context: in_interrupt 2674 */ 2675void xhci_endpoint_reset(struct usb_hcd *hcd, 2676 struct usb_host_endpoint *ep) 2677{ 2678 struct xhci_hcd *xhci; 2679 struct usb_device *udev; 2680 unsigned int ep_index; 2681 unsigned long flags; 2682 int ret; 2683 struct xhci_virt_ep *virt_ep; 2684 2685 xhci = hcd_to_xhci(hcd); 2686 udev = (struct usb_device *) ep->hcpriv; 2687 /* Called with a root hub endpoint (or an endpoint that wasn't added 2688 * with xhci_add_endpoint() 2689 */ 2690 if (!ep->hcpriv) 2691 return; 2692 ep_index = xhci_get_endpoint_index(&ep->desc); 2693 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 2694 if (!virt_ep->stopped_td) { 2695 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", 2696 ep->desc.bEndpointAddress); 2697 return; 2698 } 2699 if (usb_endpoint_xfer_control(&ep->desc)) { 2700 xhci_dbg(xhci, "Control endpoint stall already handled.\n"); 2701 return; 2702 } 2703 2704 xhci_dbg(xhci, "Queueing reset endpoint command\n"); 2705 spin_lock_irqsave(&xhci->lock, flags); 2706 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); 2707 /* 2708 * Can't change the ring dequeue pointer until it's transitioned to the 2709 * stopped state, which is only upon a successful reset endpoint 2710 * command. Better hope that last command worked! 2711 */ 2712 if (!ret) { 2713 xhci_cleanup_stalled_ring(xhci, udev, ep_index); 2714 kfree(virt_ep->stopped_td); 2715 xhci_ring_cmd_db(xhci); 2716 } 2717 virt_ep->stopped_td = NULL; 2718 virt_ep->stopped_trb = NULL; 2719 virt_ep->stopped_stream = 0; 2720 spin_unlock_irqrestore(&xhci->lock, flags); 2721 2722 if (ret) 2723 xhci_warn(xhci, "FIXME allocate a new ring segment\n"); 2724} 2725 2726static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 2727 struct usb_device *udev, struct usb_host_endpoint *ep, 2728 unsigned int slot_id) 2729{ 2730 int ret; 2731 unsigned int ep_index; 2732 unsigned int ep_state; 2733 2734 if (!ep) 2735 return -EINVAL; 2736 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); 2737 if (ret <= 0) 2738 return -EINVAL; 2739 if (ep->ss_ep_comp.bmAttributes == 0) { 2740 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" 2741 " descriptor for ep 0x%x does not support streams\n", 2742 ep->desc.bEndpointAddress); 2743 return -EINVAL; 2744 } 2745 2746 ep_index = xhci_get_endpoint_index(&ep->desc); 2747 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 2748 if (ep_state & EP_HAS_STREAMS || 2749 ep_state & EP_GETTING_STREAMS) { 2750 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " 2751 "already has streams set up.\n", 2752 ep->desc.bEndpointAddress); 2753 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " 2754 "dynamic stream context array reallocation.\n"); 2755 return -EINVAL; 2756 } 2757 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { 2758 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " 2759 "endpoint 0x%x; URBs are pending.\n", 2760 ep->desc.bEndpointAddress); 2761 return -EINVAL; 2762 } 2763 return 0; 2764} 2765 2766static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, 2767 unsigned int *num_streams, unsigned int *num_stream_ctxs) 2768{ 2769 unsigned int max_streams; 2770 2771 /* The stream context array size must be a power of two */ 2772 *num_stream_ctxs = roundup_pow_of_two(*num_streams); 2773 /* 2774 * Find out how many primary stream array entries the host controller 2775 * supports. Later we may use secondary stream arrays (similar to 2nd 2776 * level page entries), but that's an optional feature for xHCI host 2777 * controllers. xHCs must support at least 4 stream IDs. 2778 */ 2779 max_streams = HCC_MAX_PSA(xhci->hcc_params); 2780 if (*num_stream_ctxs > max_streams) { 2781 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", 2782 max_streams); 2783 *num_stream_ctxs = max_streams; 2784 *num_streams = max_streams; 2785 } 2786} 2787 2788/* Returns an error code if one of the endpoint already has streams. 2789 * This does not change any data structures, it only checks and gathers 2790 * information. 2791 */ 2792static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, 2793 struct usb_device *udev, 2794 struct usb_host_endpoint **eps, unsigned int num_eps, 2795 unsigned int *num_streams, u32 *changed_ep_bitmask) 2796{ 2797 unsigned int max_streams; 2798 unsigned int endpoint_flag; 2799 int i; 2800 int ret; 2801 2802 for (i = 0; i < num_eps; i++) { 2803 ret = xhci_check_streams_endpoint(xhci, udev, 2804 eps[i], udev->slot_id); 2805 if (ret < 0) 2806 return ret; 2807 2808 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); 2809 if (max_streams < (*num_streams - 1)) { 2810 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", 2811 eps[i]->desc.bEndpointAddress, 2812 max_streams); 2813 *num_streams = max_streams+1; 2814 } 2815 2816 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); 2817 if (*changed_ep_bitmask & endpoint_flag) 2818 return -EINVAL; 2819 *changed_ep_bitmask |= endpoint_flag; 2820 } 2821 return 0; 2822} 2823 2824static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, 2825 struct usb_device *udev, 2826 struct usb_host_endpoint **eps, unsigned int num_eps) 2827{ 2828 u32 changed_ep_bitmask = 0; 2829 unsigned int slot_id; 2830 unsigned int ep_index; 2831 unsigned int ep_state; 2832 int i; 2833 2834 slot_id = udev->slot_id; 2835 if (!xhci->devs[slot_id]) 2836 return 0; 2837 2838 for (i = 0; i < num_eps; i++) { 2839 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2840 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 2841 /* Are streams already being freed for the endpoint? */ 2842 if (ep_state & EP_GETTING_NO_STREAMS) { 2843 xhci_warn(xhci, "WARN Can't disable streams for " 2844 "endpoint 0x%x\n, " 2845 "streams are being disabled already.", 2846 eps[i]->desc.bEndpointAddress); 2847 return 0; 2848 } 2849 /* Are there actually any streams to free? */ 2850 if (!(ep_state & EP_HAS_STREAMS) && 2851 !(ep_state & EP_GETTING_STREAMS)) { 2852 xhci_warn(xhci, "WARN Can't disable streams for " 2853 "endpoint 0x%x\n, " 2854 "streams are already disabled!", 2855 eps[i]->desc.bEndpointAddress); 2856 xhci_warn(xhci, "WARN xhci_free_streams() called " 2857 "with non-streams endpoint\n"); 2858 return 0; 2859 } 2860 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); 2861 } 2862 return changed_ep_bitmask; 2863} 2864 2865/* 2866 * The USB device drivers use this function (though the HCD interface in USB 2867 * core) to prepare a set of bulk endpoints to use streams. Streams are used to 2868 * coordinate mass storage command queueing across multiple endpoints (basically 2869 * a stream ID == a task ID). 2870 * 2871 * Setting up streams involves allocating the same size stream context array 2872 * for each endpoint and issuing a configure endpoint command for all endpoints. 2873 * 2874 * Don't allow the call to succeed if one endpoint only supports one stream 2875 * (which means it doesn't support streams at all). 2876 * 2877 * Drivers may get less stream IDs than they asked for, if the host controller 2878 * hardware or endpoints claim they can't support the number of requested 2879 * stream IDs. 2880 */ 2881int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, 2882 struct usb_host_endpoint **eps, unsigned int num_eps, 2883 unsigned int num_streams, gfp_t mem_flags) 2884{ 2885 int i, ret; 2886 struct xhci_hcd *xhci; 2887 struct xhci_virt_device *vdev; 2888 struct xhci_command *config_cmd; 2889 unsigned int ep_index; 2890 unsigned int num_stream_ctxs; 2891 unsigned long flags; 2892 u32 changed_ep_bitmask = 0; 2893 2894 if (!eps) 2895 return -EINVAL; 2896 2897 /* Add one to the number of streams requested to account for 2898 * stream 0 that is reserved for xHCI usage. 2899 */ 2900 num_streams += 1; 2901 xhci = hcd_to_xhci(hcd); 2902 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", 2903 num_streams); 2904 2905 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); 2906 if (!config_cmd) { 2907 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 2908 return -ENOMEM; 2909 } 2910 2911 /* Check to make sure all endpoints are not already configured for 2912 * streams. While we're at it, find the maximum number of streams that 2913 * all the endpoints will support and check for duplicate endpoints. 2914 */ 2915 spin_lock_irqsave(&xhci->lock, flags); 2916 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, 2917 num_eps, &num_streams, &changed_ep_bitmask); 2918 if (ret < 0) { 2919 xhci_free_command(xhci, config_cmd); 2920 spin_unlock_irqrestore(&xhci->lock, flags); 2921 return ret; 2922 } 2923 if (num_streams <= 1) { 2924 xhci_warn(xhci, "WARN: endpoints can't handle " 2925 "more than one stream.\n"); 2926 xhci_free_command(xhci, config_cmd); 2927 spin_unlock_irqrestore(&xhci->lock, flags); 2928 return -EINVAL; 2929 } 2930 vdev = xhci->devs[udev->slot_id]; 2931 /* Mark each endpoint as being in transition, so 2932 * xhci_urb_enqueue() will reject all URBs. 2933 */ 2934 for (i = 0; i < num_eps; i++) { 2935 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2936 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; 2937 } 2938 spin_unlock_irqrestore(&xhci->lock, flags); 2939 2940 /* Setup internal data structures and allocate HW data structures for 2941 * streams (but don't install the HW structures in the input context 2942 * until we're sure all memory allocation succeeded). 2943 */ 2944 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); 2945 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", 2946 num_stream_ctxs, num_streams); 2947 2948 for (i = 0; i < num_eps; i++) { 2949 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2950 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, 2951 num_stream_ctxs, 2952 num_streams, mem_flags); 2953 if (!vdev->eps[ep_index].stream_info) 2954 goto cleanup; 2955 /* Set maxPstreams in endpoint context and update deq ptr to 2956 * point to stream context array. FIXME 2957 */ 2958 } 2959 2960 /* Set up the input context for a configure endpoint command. */ 2961 for (i = 0; i < num_eps; i++) { 2962 struct xhci_ep_ctx *ep_ctx; 2963 2964 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2965 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); 2966 2967 xhci_endpoint_copy(xhci, config_cmd->in_ctx, 2968 vdev->out_ctx, ep_index); 2969 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, 2970 vdev->eps[ep_index].stream_info); 2971 } 2972 /* Tell the HW to drop its old copy of the endpoint context info 2973 * and add the updated copy from the input context. 2974 */ 2975 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, 2976 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); 2977 2978 /* Issue and wait for the configure endpoint command */ 2979 ret = xhci_configure_endpoint(xhci, udev, config_cmd, 2980 false, false); 2981 2982 /* xHC rejected the configure endpoint command for some reason, so we 2983 * leave the old ring intact and free our internal streams data 2984 * structure. 2985 */ 2986 if (ret < 0) 2987 goto cleanup; 2988 2989 spin_lock_irqsave(&xhci->lock, flags); 2990 for (i = 0; i < num_eps; i++) { 2991 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2992 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 2993 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", 2994 udev->slot_id, ep_index); 2995 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; 2996 } 2997 xhci_free_command(xhci, config_cmd); 2998 spin_unlock_irqrestore(&xhci->lock, flags); 2999 3000 /* Subtract 1 for stream 0, which drivers can't use */ 3001 return num_streams - 1; 3002 3003cleanup: 3004 /* If it didn't work, free the streams! */ 3005 for (i = 0; i < num_eps; i++) { 3006 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3007 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3008 vdev->eps[ep_index].stream_info = NULL; 3009 /* FIXME Unset maxPstreams in endpoint context and 3010 * update deq ptr to point to normal string ring. 3011 */ 3012 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3013 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3014 xhci_endpoint_zero(xhci, vdev, eps[i]); 3015 } 3016 xhci_free_command(xhci, config_cmd); 3017 return -ENOMEM; 3018} 3019 3020/* Transition the endpoint from using streams to being a "normal" endpoint 3021 * without streams. 3022 * 3023 * Modify the endpoint context state, submit a configure endpoint command, 3024 * and free all endpoint rings for streams if that completes successfully. 3025 */ 3026int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, 3027 struct usb_host_endpoint **eps, unsigned int num_eps, 3028 gfp_t mem_flags) 3029{ 3030 int i, ret; 3031 struct xhci_hcd *xhci; 3032 struct xhci_virt_device *vdev; 3033 struct xhci_command *command; 3034 unsigned int ep_index; 3035 unsigned long flags; 3036 u32 changed_ep_bitmask; 3037 3038 xhci = hcd_to_xhci(hcd); 3039 vdev = xhci->devs[udev->slot_id]; 3040 3041 /* Set up a configure endpoint command to remove the streams rings */ 3042 spin_lock_irqsave(&xhci->lock, flags); 3043 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, 3044 udev, eps, num_eps); 3045 if (changed_ep_bitmask == 0) { 3046 spin_unlock_irqrestore(&xhci->lock, flags); 3047 return -EINVAL; 3048 } 3049 3050 /* Use the xhci_command structure from the first endpoint. We may have 3051 * allocated too many, but the driver may call xhci_free_streams() for 3052 * each endpoint it grouped into one call to xhci_alloc_streams(). 3053 */ 3054 ep_index = xhci_get_endpoint_index(&eps[0]->desc); 3055 command = vdev->eps[ep_index].stream_info->free_streams_command; 3056 for (i = 0; i < num_eps; i++) { 3057 struct xhci_ep_ctx *ep_ctx; 3058 3059 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3060 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 3061 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= 3062 EP_GETTING_NO_STREAMS; 3063 3064 xhci_endpoint_copy(xhci, command->in_ctx, 3065 vdev->out_ctx, ep_index); 3066 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, 3067 &vdev->eps[ep_index]); 3068 } 3069 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, 3070 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); 3071 spin_unlock_irqrestore(&xhci->lock, flags); 3072 3073 /* Issue and wait for the configure endpoint command, 3074 * which must succeed. 3075 */ 3076 ret = xhci_configure_endpoint(xhci, udev, command, 3077 false, true); 3078 3079 /* xHC rejected the configure endpoint command for some reason, so we 3080 * leave the streams rings intact. 3081 */ 3082 if (ret < 0) 3083 return ret; 3084 3085 spin_lock_irqsave(&xhci->lock, flags); 3086 for (i = 0; i < num_eps; i++) { 3087 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3088 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3089 vdev->eps[ep_index].stream_info = NULL; 3090 /* FIXME Unset maxPstreams in endpoint context and 3091 * update deq ptr to point to normal string ring. 3092 */ 3093 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; 3094 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3095 } 3096 spin_unlock_irqrestore(&xhci->lock, flags); 3097 3098 return 0; 3099} 3100 3101/* 3102 * Deletes endpoint resources for endpoints that were active before a Reset 3103 * Device command, or a Disable Slot command. The Reset Device command leaves 3104 * the control endpoint intact, whereas the Disable Slot command deletes it. 3105 * 3106 * Must be called with xhci->lock held. 3107 */ 3108void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, 3109 struct xhci_virt_device *virt_dev, bool drop_control_ep) 3110{ 3111 int i; 3112 unsigned int num_dropped_eps = 0; 3113 unsigned int drop_flags = 0; 3114 3115 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { 3116 if (virt_dev->eps[i].ring) { 3117 drop_flags |= 1 << i; 3118 num_dropped_eps++; 3119 } 3120 } 3121 xhci->num_active_eps -= num_dropped_eps; 3122 if (num_dropped_eps) 3123 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, " 3124 "%u now active.\n", 3125 num_dropped_eps, drop_flags, 3126 xhci->num_active_eps); 3127} 3128 3129/* 3130 * This submits a Reset Device Command, which will set the device state to 0, 3131 * set the device address to 0, and disable all the endpoints except the default 3132 * control endpoint. The USB core should come back and call 3133 * xhci_address_device(), and then re-set up the configuration. If this is 3134 * called because of a usb_reset_and_verify_device(), then the old alternate 3135 * settings will be re-installed through the normal bandwidth allocation 3136 * functions. 3137 * 3138 * Wait for the Reset Device command to finish. Remove all structures 3139 * associated with the endpoints that were disabled. Clear the input device 3140 * structure? Cache the rings? Reset the control endpoint 0 max packet size? 3141 * 3142 * If the virt_dev to be reset does not exist or does not match the udev, 3143 * it means the device is lost, possibly due to the xHC restore error and 3144 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to 3145 * re-allocate the device. 3146 */ 3147int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) 3148{ 3149 int ret, i; 3150 unsigned long flags; 3151 struct xhci_hcd *xhci; 3152 unsigned int slot_id; 3153 struct xhci_virt_device *virt_dev; 3154 struct xhci_command *reset_device_cmd; 3155 int timeleft; 3156 int last_freed_endpoint; 3157 struct xhci_slot_ctx *slot_ctx; 3158 int old_active_eps = 0; 3159 3160 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 3161 if (ret <= 0) 3162 return ret; 3163 xhci = hcd_to_xhci(hcd); 3164 slot_id = udev->slot_id; 3165 virt_dev = xhci->devs[slot_id]; 3166 if (!virt_dev) { 3167 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3168 "not exist. Re-allocate the device\n", slot_id); 3169 ret = xhci_alloc_dev(hcd, udev); 3170 if (ret == 1) 3171 return 0; 3172 else 3173 return -EINVAL; 3174 } 3175 3176 if (virt_dev->udev != udev) { 3177 /* If the virt_dev and the udev does not match, this virt_dev 3178 * may belong to another udev. 3179 * Re-allocate the device. 3180 */ 3181 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3182 "not match the udev. Re-allocate the device\n", 3183 slot_id); 3184 ret = xhci_alloc_dev(hcd, udev); 3185 if (ret == 1) 3186 return 0; 3187 else 3188 return -EINVAL; 3189 } 3190 3191 /* If device is not setup, there is no point in resetting it */ 3192 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3193 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3194 SLOT_STATE_DISABLED) 3195 return 0; 3196 3197 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 3198 /* Allocate the command structure that holds the struct completion. 3199 * Assume we're in process context, since the normal device reset 3200 * process has to wait for the device anyway. Storage devices are 3201 * reset as part of error handling, so use GFP_NOIO instead of 3202 * GFP_KERNEL. 3203 */ 3204 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); 3205 if (!reset_device_cmd) { 3206 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); 3207 return -ENOMEM; 3208 } 3209 3210 /* Attempt to submit the Reset Device command to the command ring */ 3211 spin_lock_irqsave(&xhci->lock, flags); 3212 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; 3213 3214 /* Enqueue pointer can be left pointing to the link TRB, 3215 * we must handle that 3216 */ 3217 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control)) 3218 reset_device_cmd->command_trb = 3219 xhci->cmd_ring->enq_seg->next->trbs; 3220 3221 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); 3222 ret = xhci_queue_reset_device(xhci, slot_id); 3223 if (ret) { 3224 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3225 list_del(&reset_device_cmd->cmd_list); 3226 spin_unlock_irqrestore(&xhci->lock, flags); 3227 goto command_cleanup; 3228 } 3229 xhci_ring_cmd_db(xhci); 3230 spin_unlock_irqrestore(&xhci->lock, flags); 3231 3232 /* Wait for the Reset Device command to finish */ 3233 timeleft = wait_for_completion_interruptible_timeout( 3234 reset_device_cmd->completion, 3235 USB_CTRL_SET_TIMEOUT); 3236 if (timeleft <= 0) { 3237 xhci_warn(xhci, "%s while waiting for reset device command\n", 3238 timeleft == 0 ? "Timeout" : "Signal"); 3239 spin_lock_irqsave(&xhci->lock, flags); 3240 /* The timeout might have raced with the event ring handler, so 3241 * only delete from the list if the item isn't poisoned. 3242 */ 3243 if (reset_device_cmd->cmd_list.next != LIST_POISON1) 3244 list_del(&reset_device_cmd->cmd_list); 3245 spin_unlock_irqrestore(&xhci->lock, flags); 3246 ret = -ETIME; 3247 goto command_cleanup; 3248 } 3249 3250 /* The Reset Device command can't fail, according to the 0.95/0.96 spec, 3251 * unless we tried to reset a slot ID that wasn't enabled, 3252 * or the device wasn't in the addressed or configured state. 3253 */ 3254 ret = reset_device_cmd->status; 3255 switch (ret) { 3256 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ 3257 case COMP_CTX_STATE: /* 0.96 completion code for same thing */ 3258 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", 3259 slot_id, 3260 xhci_get_slot_state(xhci, virt_dev->out_ctx)); 3261 xhci_info(xhci, "Not freeing device rings.\n"); 3262 /* Don't treat this as an error. May change my mind later. */ 3263 ret = 0; 3264 goto command_cleanup; 3265 case COMP_SUCCESS: 3266 xhci_dbg(xhci, "Successful reset device command.\n"); 3267 break; 3268 default: 3269 if (xhci_is_vendor_info_code(xhci, ret)) 3270 break; 3271 xhci_warn(xhci, "Unknown completion code %u for " 3272 "reset device command.\n", ret); 3273 ret = -EINVAL; 3274 goto command_cleanup; 3275 } 3276 3277 /* Free up host controller endpoint resources */ 3278 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3279 spin_lock_irqsave(&xhci->lock, flags); 3280 /* Don't delete the default control endpoint resources */ 3281 xhci_free_device_endpoint_resources(xhci, virt_dev, false); 3282 spin_unlock_irqrestore(&xhci->lock, flags); 3283 } 3284 3285 /* Everything but endpoint 0 is disabled, so free or cache the rings. */ 3286 last_freed_endpoint = 1; 3287 for (i = 1; i < 31; ++i) { 3288 struct xhci_virt_ep *ep = &virt_dev->eps[i]; 3289 3290 if (ep->ep_state & EP_HAS_STREAMS) { 3291 xhci_free_stream_info(xhci, ep->stream_info); 3292 ep->stream_info = NULL; 3293 ep->ep_state &= ~EP_HAS_STREAMS; 3294 } 3295 3296 if (ep->ring) { 3297 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 3298 last_freed_endpoint = i; 3299 } 3300 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) 3301 xhci_drop_ep_from_interval_table(xhci, 3302 &virt_dev->eps[i].bw_info, 3303 virt_dev->bw_table, 3304 udev, 3305 &virt_dev->eps[i], 3306 virt_dev->tt_info); 3307 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); 3308 } 3309 /* If necessary, update the number of active TTs on this root port */ 3310 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 3311 3312 xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); 3313 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); 3314 ret = 0; 3315 3316command_cleanup: 3317 xhci_free_command(xhci, reset_device_cmd); 3318 return ret; 3319} 3320 3321/* 3322 * At this point, the struct usb_device is about to go away, the device has 3323 * disconnected, and all traffic has been stopped and the endpoints have been 3324 * disabled. Free any HC data structures associated with that device. 3325 */ 3326void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 3327{ 3328 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3329 struct xhci_virt_device *virt_dev; 3330 unsigned long flags; 3331 u32 state; 3332 int i, ret; 3333 3334 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3335 /* If the host is halted due to driver unload, we still need to free the 3336 * device. 3337 */ 3338 if (ret <= 0 && ret != -ENODEV) 3339 return; 3340 3341 virt_dev = xhci->devs[udev->slot_id]; 3342 3343 /* Stop any wayward timer functions (which may grab the lock) */ 3344 for (i = 0; i < 31; ++i) { 3345 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; 3346 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); 3347 } 3348 3349 if (udev->usb2_hw_lpm_enabled) { 3350 xhci_set_usb2_hardware_lpm(hcd, udev, 0); 3351 udev->usb2_hw_lpm_enabled = 0; 3352 } 3353 3354 spin_lock_irqsave(&xhci->lock, flags); 3355 /* Don't disable the slot if the host controller is dead. */ 3356 state = xhci_readl(xhci, &xhci->op_regs->status); 3357 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 3358 (xhci->xhc_state & XHCI_STATE_HALTED)) { 3359 xhci_free_virt_device(xhci, udev->slot_id); 3360 spin_unlock_irqrestore(&xhci->lock, flags); 3361 return; 3362 } 3363 3364 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { 3365 spin_unlock_irqrestore(&xhci->lock, flags); 3366 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3367 return; 3368 } 3369 xhci_ring_cmd_db(xhci); 3370 spin_unlock_irqrestore(&xhci->lock, flags); 3371 /* 3372 * Event command completion handler will free any data structures 3373 * associated with the slot. XXX Can free sleep? 3374 */ 3375} 3376 3377/* 3378 * Checks if we have enough host controller resources for the default control 3379 * endpoint. 3380 * 3381 * Must be called with xhci->lock held. 3382 */ 3383static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) 3384{ 3385 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { 3386 xhci_dbg(xhci, "Not enough ep ctxs: " 3387 "%u active, need to add 1, limit is %u.\n", 3388 xhci->num_active_eps, xhci->limit_active_eps); 3389 return -ENOMEM; 3390 } 3391 xhci->num_active_eps += 1; 3392 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n", 3393 xhci->num_active_eps); 3394 return 0; 3395} 3396 3397 3398/* 3399 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 3400 * timed out, or allocating memory failed. Returns 1 on success. 3401 */ 3402int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) 3403{ 3404 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3405 unsigned long flags; 3406 int timeleft; 3407 int ret; 3408 3409 spin_lock_irqsave(&xhci->lock, flags); 3410 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); 3411 if (ret) { 3412 spin_unlock_irqrestore(&xhci->lock, flags); 3413 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3414 return 0; 3415 } 3416 xhci_ring_cmd_db(xhci); 3417 spin_unlock_irqrestore(&xhci->lock, flags); 3418 3419 /* XXX: how much time for xHC slot assignment? */ 3420 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, 3421 USB_CTRL_SET_TIMEOUT); 3422 if (timeleft <= 0) { 3423 xhci_warn(xhci, "%s while waiting for a slot\n", 3424 timeleft == 0 ? "Timeout" : "Signal"); 3425 /* FIXME cancel the enable slot request */ 3426 return 0; 3427 } 3428 3429 if (!xhci->slot_id) { 3430 xhci_err(xhci, "Error while assigning device slot ID\n"); 3431 return 0; 3432 } 3433 3434 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3435 spin_lock_irqsave(&xhci->lock, flags); 3436 ret = xhci_reserve_host_control_ep_resources(xhci); 3437 if (ret) { 3438 spin_unlock_irqrestore(&xhci->lock, flags); 3439 xhci_warn(xhci, "Not enough host resources, " 3440 "active endpoint contexts = %u\n", 3441 xhci->num_active_eps); 3442 goto disable_slot; 3443 } 3444 spin_unlock_irqrestore(&xhci->lock, flags); 3445 } 3446 /* Use GFP_NOIO, since this function can be called from 3447 * xhci_discover_or_reset_device(), which may be called as part of 3448 * mass storage driver error handling. 3449 */ 3450 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { 3451 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 3452 goto disable_slot; 3453 } 3454 udev->slot_id = xhci->slot_id; 3455 /* Is this a LS or FS device under a HS hub? */ 3456 /* Hub or peripherial? */ 3457 return 1; 3458 3459disable_slot: 3460 /* Disable slot, if we can do it without mem alloc */ 3461 spin_lock_irqsave(&xhci->lock, flags); 3462 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) 3463 xhci_ring_cmd_db(xhci); 3464 spin_unlock_irqrestore(&xhci->lock, flags); 3465 return 0; 3466} 3467 3468/* 3469 * Issue an Address Device command (which will issue a SetAddress request to 3470 * the device). 3471 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so 3472 * we should only issue and wait on one address command at the same time. 3473 * 3474 * We add one to the device address issued by the hardware because the USB core 3475 * uses address 1 for the root hubs (even though they're not really devices). 3476 */ 3477int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 3478{ 3479 unsigned long flags; 3480 int timeleft; 3481 struct xhci_virt_device *virt_dev; 3482 int ret = 0; 3483 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3484 struct xhci_slot_ctx *slot_ctx; 3485 struct xhci_input_control_ctx *ctrl_ctx; 3486 u64 temp_64; 3487 3488 if (!udev->slot_id) { 3489 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); 3490 return -EINVAL; 3491 } 3492 3493 virt_dev = xhci->devs[udev->slot_id]; 3494 3495 if (WARN_ON(!virt_dev)) { 3496 /* 3497 * In plug/unplug torture test with an NEC controller, 3498 * a zero-dereference was observed once due to virt_dev = 0. 3499 * Print useful debug rather than crash if it is observed again! 3500 */ 3501 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", 3502 udev->slot_id); 3503 return -EINVAL; 3504 } 3505 3506 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 3507 /* 3508 * If this is the first Set Address since device plug-in or 3509 * virt_device realloaction after a resume with an xHCI power loss, 3510 * then set up the slot context. 3511 */ 3512 if (!slot_ctx->dev_info) 3513 xhci_setup_addressable_virt_dev(xhci, udev); 3514 /* Otherwise, update the control endpoint ring enqueue pointer. */ 3515 else 3516 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 3517 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 3518 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 3519 ctrl_ctx->drop_flags = 0; 3520 3521 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3522 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3523 3524 spin_lock_irqsave(&xhci->lock, flags); 3525 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, 3526 udev->slot_id); 3527 if (ret) { 3528 spin_unlock_irqrestore(&xhci->lock, flags); 3529 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3530 return ret; 3531 } 3532 xhci_ring_cmd_db(xhci); 3533 spin_unlock_irqrestore(&xhci->lock, flags); 3534 3535 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 3536 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, 3537 USB_CTRL_SET_TIMEOUT); 3538 /* FIXME: From section 4.3.4: "Software shall be responsible for timing 3539 * the SetAddress() "recovery interval" required by USB and aborting the 3540 * command on a timeout. 3541 */ 3542 if (timeleft <= 0) { 3543 xhci_warn(xhci, "%s while waiting for address device command\n", 3544 timeleft == 0 ? "Timeout" : "Signal"); 3545 /* FIXME cancel the address device command */ 3546 return -ETIME; 3547 } 3548 3549 switch (virt_dev->cmd_status) { 3550 case COMP_CTX_STATE: 3551 case COMP_EBADSLT: 3552 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", 3553 udev->slot_id); 3554 ret = -EINVAL; 3555 break; 3556 case COMP_TX_ERR: 3557 dev_warn(&udev->dev, "Device not responding to set address.\n"); 3558 ret = -EPROTO; 3559 break; 3560 case COMP_DEV_ERR: 3561 dev_warn(&udev->dev, "ERROR: Incompatible device for address " 3562 "device command.\n"); 3563 ret = -ENODEV; 3564 break; 3565 case COMP_SUCCESS: 3566 xhci_dbg(xhci, "Successful Address Device command\n"); 3567 break; 3568 default: 3569 xhci_err(xhci, "ERROR: unexpected command completion " 3570 "code 0x%x.\n", virt_dev->cmd_status); 3571 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 3572 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 3573 ret = -EINVAL; 3574 break; 3575 } 3576 if (ret) { 3577 return ret; 3578 } 3579 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 3580 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); 3581 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", 3582 udev->slot_id, 3583 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 3584 (unsigned long long) 3585 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); 3586 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", 3587 (unsigned long long)virt_dev->out_ctx->dma); 3588 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3589 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3590 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 3591 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 3592 /* 3593 * USB core uses address 1 for the roothubs, so we add one to the 3594 * address given back to us by the HC. 3595 */ 3596 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3597 /* Use kernel assigned address for devices; store xHC assigned 3598 * address locally. */ 3599 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) 3600 + 1; 3601 /* Zero the input context control for later use */ 3602 ctrl_ctx->add_flags = 0; 3603 ctrl_ctx->drop_flags = 0; 3604 3605 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address); 3606 3607 return 0; 3608} 3609 3610#ifdef CONFIG_USB_SUSPEND 3611 3612/* BESL to HIRD Encoding array for USB2 LPM */ 3613static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, 3614 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; 3615 3616/* Calculate HIRD/BESL for USB2 PORTPMSC*/ 3617static int xhci_calculate_hird_besl(int u2del, bool use_besl) 3618{ 3619 int hird; 3620 3621 if (use_besl) { 3622 for (hird = 0; hird < 16; hird++) { 3623 if (xhci_besl_encoding[hird] >= u2del) 3624 break; 3625 } 3626 } else { 3627 if (u2del <= 50) 3628 hird = 0; 3629 else 3630 hird = (u2del - 51) / 75 + 1; 3631 3632 if (hird > 15) 3633 hird = 15; 3634 } 3635 3636 return hird; 3637} 3638 3639static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd, 3640 struct usb_device *udev) 3641{ 3642 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3643 struct dev_info *dev_info; 3644 __le32 __iomem **port_array; 3645 __le32 __iomem *addr, *pm_addr; 3646 u32 temp, dev_id; 3647 unsigned int port_num; 3648 unsigned long flags; 3649 int u2del, hird; 3650 int ret; 3651 3652 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support || 3653 !udev->lpm_capable) 3654 return -EINVAL; 3655 3656 /* we only support lpm for non-hub device connected to root hub yet */ 3657 if (!udev->parent || udev->parent->parent || 3658 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 3659 return -EINVAL; 3660 3661 spin_lock_irqsave(&xhci->lock, flags); 3662 3663 /* Look for devices in lpm_failed_devs list */ 3664 dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 | 3665 le16_to_cpu(udev->descriptor.idProduct); 3666 list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) { 3667 if (dev_info->dev_id == dev_id) { 3668 ret = -EINVAL; 3669 goto finish; 3670 } 3671 } 3672 3673 port_array = xhci->usb2_ports; 3674 port_num = udev->portnum - 1; 3675 3676 if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) { 3677 xhci_dbg(xhci, "invalid port number %d\n", udev->portnum); 3678 ret = -EINVAL; 3679 goto finish; 3680 } 3681 3682 /* 3683 * Test USB 2.0 software LPM. 3684 * FIXME: some xHCI 1.0 hosts may implement a new register to set up 3685 * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1 3686 * in the June 2011 errata release. 3687 */ 3688 xhci_dbg(xhci, "test port %d software LPM\n", port_num); 3689 /* 3690 * Set L1 Device Slot and HIRD/BESL. 3691 * Check device's USB 2.0 extension descriptor to determine whether 3692 * HIRD or BESL shoule be used. See USB2.0 LPM errata. 3693 */ 3694 pm_addr = port_array[port_num] + 1; 3695 u2del = HCS_U2_LATENCY(xhci->hcs_params3); 3696 if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2)) 3697 hird = xhci_calculate_hird_besl(u2del, 1); 3698 else 3699 hird = xhci_calculate_hird_besl(u2del, 0); 3700 3701 temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird); 3702 xhci_writel(xhci, temp, pm_addr); 3703 3704 /* Set port link state to U2(L1) */ 3705 addr = port_array[port_num]; 3706 xhci_set_link_state(xhci, port_array, port_num, XDEV_U2); 3707 3708 /* wait for ACK */ 3709 spin_unlock_irqrestore(&xhci->lock, flags); 3710 msleep(10); 3711 spin_lock_irqsave(&xhci->lock, flags); 3712 3713 /* Check L1 Status */ 3714 ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125); 3715 if (ret != -ETIMEDOUT) { 3716 /* enter L1 successfully */ 3717 temp = xhci_readl(xhci, addr); 3718 xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n", 3719 port_num, temp); 3720 ret = 0; 3721 } else { 3722 temp = xhci_readl(xhci, pm_addr); 3723 xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n", 3724 port_num, temp & PORT_L1S_MASK); 3725 ret = -EINVAL; 3726 } 3727 3728 /* Resume the port */ 3729 xhci_set_link_state(xhci, port_array, port_num, XDEV_U0); 3730 3731 spin_unlock_irqrestore(&xhci->lock, flags); 3732 msleep(10); 3733 spin_lock_irqsave(&xhci->lock, flags); 3734 3735 /* Clear PLC */ 3736 xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC); 3737 3738 /* Check PORTSC to make sure the device is in the right state */ 3739 if (!ret) { 3740 temp = xhci_readl(xhci, addr); 3741 xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp); 3742 if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) || 3743 (temp & PORT_PLS_MASK) != XDEV_U0) { 3744 xhci_dbg(xhci, "port L1 resume fail\n"); 3745 ret = -EINVAL; 3746 } 3747 } 3748 3749 if (ret) { 3750 /* Insert dev to lpm_failed_devs list */ 3751 xhci_warn(xhci, "device LPM test failed, may disconnect and " 3752 "re-enumerate\n"); 3753 dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC); 3754 if (!dev_info) { 3755 ret = -ENOMEM; 3756 goto finish; 3757 } 3758 dev_info->dev_id = dev_id; 3759 INIT_LIST_HEAD(&dev_info->list); 3760 list_add(&dev_info->list, &xhci->lpm_failed_devs); 3761 } else { 3762 xhci_ring_device(xhci, udev->slot_id); 3763 } 3764 3765finish: 3766 spin_unlock_irqrestore(&xhci->lock, flags); 3767 return ret; 3768} 3769 3770int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 3771 struct usb_device *udev, int enable) 3772{ 3773 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3774 __le32 __iomem **port_array; 3775 __le32 __iomem *pm_addr; 3776 u32 temp; 3777 unsigned int port_num; 3778 unsigned long flags; 3779 int u2del, hird; 3780 3781 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support || 3782 !udev->lpm_capable) 3783 return -EPERM; 3784 3785 if (!udev->parent || udev->parent->parent || 3786 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 3787 return -EPERM; 3788 3789 if (udev->usb2_hw_lpm_capable != 1) 3790 return -EPERM; 3791 3792 spin_lock_irqsave(&xhci->lock, flags); 3793 3794 port_array = xhci->usb2_ports; 3795 port_num = udev->portnum - 1; 3796 pm_addr = port_array[port_num] + 1; 3797 temp = xhci_readl(xhci, pm_addr); 3798 3799 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", 3800 enable ? "enable" : "disable", port_num); 3801 3802 u2del = HCS_U2_LATENCY(xhci->hcs_params3); 3803 if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2)) 3804 hird = xhci_calculate_hird_besl(u2del, 1); 3805 else 3806 hird = xhci_calculate_hird_besl(u2del, 0); 3807 3808 if (enable) { 3809 temp &= ~PORT_HIRD_MASK; 3810 temp |= PORT_HIRD(hird) | PORT_RWE; 3811 xhci_writel(xhci, temp, pm_addr); 3812 temp = xhci_readl(xhci, pm_addr); 3813 temp |= PORT_HLE; 3814 xhci_writel(xhci, temp, pm_addr); 3815 } else { 3816 temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK); 3817 xhci_writel(xhci, temp, pm_addr); 3818 } 3819 3820 spin_unlock_irqrestore(&xhci->lock, flags); 3821 return 0; 3822} 3823 3824int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 3825{ 3826 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3827 int ret; 3828 3829 ret = xhci_usb2_software_lpm_test(hcd, udev); 3830 if (!ret) { 3831 xhci_dbg(xhci, "software LPM test succeed\n"); 3832 if (xhci->hw_lpm_support == 1) { 3833 udev->usb2_hw_lpm_capable = 1; 3834 ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1); 3835 if (!ret) 3836 udev->usb2_hw_lpm_enabled = 1; 3837 } 3838 } 3839 3840 return 0; 3841} 3842 3843#else 3844 3845int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 3846 struct usb_device *udev, int enable) 3847{ 3848 return 0; 3849} 3850 3851int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 3852{ 3853 return 0; 3854} 3855 3856#endif /* CONFIG_USB_SUSPEND */ 3857 3858/* Once a hub descriptor is fetched for a device, we need to update the xHC's 3859 * internal data structures for the device. 3860 */ 3861int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 3862 struct usb_tt *tt, gfp_t mem_flags) 3863{ 3864 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3865 struct xhci_virt_device *vdev; 3866 struct xhci_command *config_cmd; 3867 struct xhci_input_control_ctx *ctrl_ctx; 3868 struct xhci_slot_ctx *slot_ctx; 3869 unsigned long flags; 3870 unsigned think_time; 3871 int ret; 3872 3873 /* Ignore root hubs */ 3874 if (!hdev->parent) 3875 return 0; 3876 3877 vdev = xhci->devs[hdev->slot_id]; 3878 if (!vdev) { 3879 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 3880 return -EINVAL; 3881 } 3882 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); 3883 if (!config_cmd) { 3884 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 3885 return -ENOMEM; 3886 } 3887 3888 spin_lock_irqsave(&xhci->lock, flags); 3889 if (hdev->speed == USB_SPEED_HIGH && 3890 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { 3891 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); 3892 xhci_free_command(xhci, config_cmd); 3893 spin_unlock_irqrestore(&xhci->lock, flags); 3894 return -ENOMEM; 3895 } 3896 3897 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); 3898 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); 3899 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 3900 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 3901 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 3902 if (tt->multi) 3903 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 3904 if (xhci->hci_version > 0x95) { 3905 xhci_dbg(xhci, "xHCI version %x needs hub " 3906 "TT think time and number of ports\n", 3907 (unsigned int) xhci->hci_version); 3908 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); 3909 /* Set TT think time - convert from ns to FS bit times. 3910 * 0 = 8 FS bit times, 1 = 16 FS bit times, 3911 * 2 = 24 FS bit times, 3 = 32 FS bit times. 3912 * 3913 * xHCI 1.0: this field shall be 0 if the device is not a 3914 * High-spped hub. 3915 */ 3916 think_time = tt->think_time; 3917 if (think_time != 0) 3918 think_time = (think_time / 666) - 1; 3919 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) 3920 slot_ctx->tt_info |= 3921 cpu_to_le32(TT_THINK_TIME(think_time)); 3922 } else { 3923 xhci_dbg(xhci, "xHCI version %x doesn't need hub " 3924 "TT think time or number of ports\n", 3925 (unsigned int) xhci->hci_version); 3926 } 3927 slot_ctx->dev_state = 0; 3928 spin_unlock_irqrestore(&xhci->lock, flags); 3929 3930 xhci_dbg(xhci, "Set up %s for hub device.\n", 3931 (xhci->hci_version > 0x95) ? 3932 "configure endpoint" : "evaluate context"); 3933 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); 3934 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); 3935 3936 /* Issue and wait for the configure endpoint or 3937 * evaluate context command. 3938 */ 3939 if (xhci->hci_version > 0x95) 3940 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 3941 false, false); 3942 else 3943 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 3944 true, false); 3945 3946 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); 3947 xhci_dbg_ctx(xhci, vdev->out_ctx, 0); 3948 3949 xhci_free_command(xhci, config_cmd); 3950 return ret; 3951} 3952 3953int xhci_get_frame(struct usb_hcd *hcd) 3954{ 3955 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3956 /* EHCI mods by the periodic size. Why? */ 3957 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; 3958} 3959 3960int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) 3961{ 3962 struct xhci_hcd *xhci; 3963 struct device *dev = hcd->self.controller; 3964 int retval; 3965 u32 temp; 3966 3967 hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2; 3968 3969 if (usb_hcd_is_primary_hcd(hcd)) { 3970 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL); 3971 if (!xhci) 3972 return -ENOMEM; 3973 *((struct xhci_hcd **) hcd->hcd_priv) = xhci; 3974 xhci->main_hcd = hcd; 3975 /* Mark the first roothub as being USB 2.0. 3976 * The xHCI driver will register the USB 3.0 roothub. 3977 */ 3978 hcd->speed = HCD_USB2; 3979 hcd->self.root_hub->speed = USB_SPEED_HIGH; 3980 /* 3981 * USB 2.0 roothub under xHCI has an integrated TT, 3982 * (rate matching hub) as opposed to having an OHCI/UHCI 3983 * companion controller. 3984 */ 3985 hcd->has_tt = 1; 3986 } else { 3987 /* xHCI private pointer was set in xhci_pci_probe for the second 3988 * registered roothub. 3989 */ 3990 xhci = hcd_to_xhci(hcd); 3991 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); 3992 if (HCC_64BIT_ADDR(temp)) { 3993 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 3994 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); 3995 } else { 3996 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); 3997 } 3998 return 0; 3999 } 4000 4001 xhci->cap_regs = hcd->regs; 4002 xhci->op_regs = hcd->regs + 4003 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase)); 4004 xhci->run_regs = hcd->regs + 4005 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK); 4006 /* Cache read-only capability registers */ 4007 xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1); 4008 xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2); 4009 xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3); 4010 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase); 4011 xhci->hci_version = HC_VERSION(xhci->hcc_params); 4012 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params); 4013 xhci_print_registers(xhci); 4014 4015 get_quirks(dev, xhci); 4016 4017 /* Make sure the HC is halted. */ 4018 retval = xhci_halt(xhci); 4019 if (retval) 4020 goto error; 4021 4022 xhci_dbg(xhci, "Resetting HCD\n"); 4023 /* Reset the internal HC memory state and registers. */ 4024 retval = xhci_reset(xhci); 4025 if (retval) 4026 goto error; 4027 xhci_dbg(xhci, "Reset complete\n"); 4028 4029 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); 4030 if (HCC_64BIT_ADDR(temp)) { 4031 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 4032 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); 4033 } else { 4034 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); 4035 } 4036 4037 xhci_dbg(xhci, "Calling HCD init\n"); 4038 /* Initialize HCD and host controller data structures. */ 4039 retval = xhci_init(hcd); 4040 if (retval) 4041 goto error; 4042 xhci_dbg(xhci, "Called HCD init\n"); 4043 return 0; 4044error: 4045 kfree(xhci); 4046 return retval; 4047} 4048 4049MODULE_DESCRIPTION(DRIVER_DESC); 4050MODULE_AUTHOR(DRIVER_AUTHOR); 4051MODULE_LICENSE("GPL"); 4052 4053static int __init xhci_hcd_init(void) 4054{ 4055 int retval; 4056 4057 retval = xhci_register_pci(); 4058 if (retval < 0) { 4059 printk(KERN_DEBUG "Problem registering PCI driver."); 4060 return retval; 4061 } 4062 /* 4063 * Check the compiler generated sizes of structures that must be laid 4064 * out in specific ways for hardware access. 4065 */ 4066 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 4067 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); 4068 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); 4069 /* xhci_device_control has eight fields, and also 4070 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 4071 */ 4072 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 4073 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 4074 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 4075 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); 4076 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); 4077 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ 4078 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); 4079 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 4080 return 0; 4081} 4082module_init(xhci_hcd_init); 4083 4084static void __exit xhci_hcd_cleanup(void) 4085{ 4086 xhci_unregister_pci(); 4087} 4088module_exit(xhci_hcd_cleanup); 4089