xhci-mem.c revision d18240db797ed749b511b8dc910c5dcf08be46d6
1/* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23#include <linux/usb.h> 24#include <linux/pci.h> 25#include <linux/slab.h> 26#include <linux/dmapool.h> 27 28#include "xhci.h" 29 30/* 31 * Allocates a generic ring segment from the ring pool, sets the dma address, 32 * initializes the segment to zero, and sets the private next pointer to NULL. 33 * 34 * Section 4.11.1.1: 35 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 36 */ 37static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags) 38{ 39 struct xhci_segment *seg; 40 dma_addr_t dma; 41 42 seg = kzalloc(sizeof *seg, flags); 43 if (!seg) 44 return NULL; 45 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg); 46 47 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); 48 if (!seg->trbs) { 49 kfree(seg); 50 return NULL; 51 } 52 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n", 53 seg->trbs, (unsigned long long)dma); 54 55 memset(seg->trbs, 0, SEGMENT_SIZE); 56 seg->dma = dma; 57 seg->next = NULL; 58 59 return seg; 60} 61 62static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) 63{ 64 if (!seg) 65 return; 66 if (seg->trbs) { 67 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n", 68 seg->trbs, (unsigned long long)seg->dma); 69 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); 70 seg->trbs = NULL; 71 } 72 xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg); 73 kfree(seg); 74} 75 76/* 77 * Make the prev segment point to the next segment. 78 * 79 * Change the last TRB in the prev segment to be a Link TRB which points to the 80 * DMA address of the next segment. The caller needs to set any Link TRB 81 * related flags, such as End TRB, Toggle Cycle, and no snoop. 82 */ 83static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, 84 struct xhci_segment *next, bool link_trbs) 85{ 86 u32 val; 87 88 if (!prev || !next) 89 return; 90 prev->next = next; 91 if (link_trbs) { 92 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; 93 94 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 95 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; 96 val &= ~TRB_TYPE_BITMASK; 97 val |= TRB_TYPE(TRB_LINK); 98 /* Always set the chain bit with 0.95 hardware */ 99 if (xhci_link_trb_quirk(xhci)) 100 val |= TRB_CHAIN; 101 prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; 102 } 103 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", 104 (unsigned long long)prev->dma, 105 (unsigned long long)next->dma); 106} 107 108/* XXX: Do we need the hcd structure in all these functions? */ 109void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) 110{ 111 struct xhci_segment *seg; 112 struct xhci_segment *first_seg; 113 114 if (!ring || !ring->first_seg) 115 return; 116 first_seg = ring->first_seg; 117 seg = first_seg->next; 118 xhci_dbg(xhci, "Freeing ring at %p\n", ring); 119 while (seg != first_seg) { 120 struct xhci_segment *next = seg->next; 121 xhci_segment_free(xhci, seg); 122 seg = next; 123 } 124 xhci_segment_free(xhci, first_seg); 125 ring->first_seg = NULL; 126 kfree(ring); 127} 128 129static void xhci_initialize_ring_info(struct xhci_ring *ring) 130{ 131 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 132 ring->enqueue = ring->first_seg->trbs; 133 ring->enq_seg = ring->first_seg; 134 ring->dequeue = ring->enqueue; 135 ring->deq_seg = ring->first_seg; 136 /* The ring is initialized to 0. The producer must write 1 to the cycle 137 * bit to handover ownership of the TRB, so PCS = 1. The consumer must 138 * compare CCS to the cycle bit to check ownership, so CCS = 1. 139 */ 140 ring->cycle_state = 1; 141 /* Not necessary for new rings, but needed for re-initialized rings */ 142 ring->enq_updates = 0; 143 ring->deq_updates = 0; 144} 145 146/** 147 * Create a new ring with zero or more segments. 148 * 149 * Link each segment together into a ring. 150 * Set the end flag and the cycle toggle bit on the last segment. 151 * See section 4.9.1 and figures 15 and 16. 152 */ 153static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 154 unsigned int num_segs, bool link_trbs, gfp_t flags) 155{ 156 struct xhci_ring *ring; 157 struct xhci_segment *prev; 158 159 ring = kzalloc(sizeof *(ring), flags); 160 xhci_dbg(xhci, "Allocating ring at %p\n", ring); 161 if (!ring) 162 return NULL; 163 164 INIT_LIST_HEAD(&ring->td_list); 165 if (num_segs == 0) 166 return ring; 167 168 ring->first_seg = xhci_segment_alloc(xhci, flags); 169 if (!ring->first_seg) 170 goto fail; 171 num_segs--; 172 173 prev = ring->first_seg; 174 while (num_segs > 0) { 175 struct xhci_segment *next; 176 177 next = xhci_segment_alloc(xhci, flags); 178 if (!next) 179 goto fail; 180 xhci_link_segments(xhci, prev, next, link_trbs); 181 182 prev = next; 183 num_segs--; 184 } 185 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); 186 187 if (link_trbs) { 188 /* See section 4.9.2.1 and 6.4.4.1 */ 189 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); 190 xhci_dbg(xhci, "Wrote link toggle flag to" 191 " segment %p (virtual), 0x%llx (DMA)\n", 192 prev, (unsigned long long)prev->dma); 193 } 194 xhci_initialize_ring_info(ring); 195 return ring; 196 197fail: 198 xhci_ring_free(xhci, ring); 199 return NULL; 200} 201 202void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, 203 struct xhci_virt_device *virt_dev, 204 unsigned int ep_index) 205{ 206 int rings_cached; 207 208 rings_cached = virt_dev->num_rings_cached; 209 if (rings_cached < XHCI_MAX_RINGS_CACHED) { 210 virt_dev->num_rings_cached++; 211 rings_cached = virt_dev->num_rings_cached; 212 virt_dev->ring_cache[rings_cached] = 213 virt_dev->eps[ep_index].ring; 214 xhci_dbg(xhci, "Cached old ring, " 215 "%d ring%s cached\n", 216 rings_cached, 217 (rings_cached > 1) ? "s" : ""); 218 } else { 219 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); 220 xhci_dbg(xhci, "Ring cache full (%d rings), " 221 "freeing ring\n", 222 virt_dev->num_rings_cached); 223 } 224 virt_dev->eps[ep_index].ring = NULL; 225} 226 227/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue 228 * pointers to the beginning of the ring. 229 */ 230static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, 231 struct xhci_ring *ring) 232{ 233 struct xhci_segment *seg = ring->first_seg; 234 do { 235 memset(seg->trbs, 0, 236 sizeof(union xhci_trb)*TRBS_PER_SEGMENT); 237 /* All endpoint rings have link TRBs */ 238 xhci_link_segments(xhci, seg, seg->next, 1); 239 seg = seg->next; 240 } while (seg != ring->first_seg); 241 xhci_initialize_ring_info(ring); 242 /* td list should be empty since all URBs have been cancelled, 243 * but just in case... 244 */ 245 INIT_LIST_HEAD(&ring->td_list); 246} 247 248#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) 249 250static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 251 int type, gfp_t flags) 252{ 253 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); 254 if (!ctx) 255 return NULL; 256 257 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); 258 ctx->type = type; 259 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; 260 if (type == XHCI_CTX_TYPE_INPUT) 261 ctx->size += CTX_SIZE(xhci->hcc_params); 262 263 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); 264 memset(ctx->bytes, 0, ctx->size); 265 return ctx; 266} 267 268static void xhci_free_container_ctx(struct xhci_hcd *xhci, 269 struct xhci_container_ctx *ctx) 270{ 271 if (!ctx) 272 return; 273 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); 274 kfree(ctx); 275} 276 277struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, 278 struct xhci_container_ctx *ctx) 279{ 280 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); 281 return (struct xhci_input_control_ctx *)ctx->bytes; 282} 283 284struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, 285 struct xhci_container_ctx *ctx) 286{ 287 if (ctx->type == XHCI_CTX_TYPE_DEVICE) 288 return (struct xhci_slot_ctx *)ctx->bytes; 289 290 return (struct xhci_slot_ctx *) 291 (ctx->bytes + CTX_SIZE(xhci->hcc_params)); 292} 293 294struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, 295 struct xhci_container_ctx *ctx, 296 unsigned int ep_index) 297{ 298 /* increment ep index by offset of start of ep ctx array */ 299 ep_index++; 300 if (ctx->type == XHCI_CTX_TYPE_INPUT) 301 ep_index++; 302 303 return (struct xhci_ep_ctx *) 304 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); 305} 306 307 308/***************** Streams structures manipulation *************************/ 309 310void xhci_free_stream_ctx(struct xhci_hcd *xhci, 311 unsigned int num_stream_ctxs, 312 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) 313{ 314 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 315 316 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) 317 pci_free_consistent(pdev, 318 sizeof(struct xhci_stream_ctx)*num_stream_ctxs, 319 stream_ctx, dma); 320 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) 321 return dma_pool_free(xhci->small_streams_pool, 322 stream_ctx, dma); 323 else 324 return dma_pool_free(xhci->medium_streams_pool, 325 stream_ctx, dma); 326} 327 328/* 329 * The stream context array for each endpoint with bulk streams enabled can 330 * vary in size, based on: 331 * - how many streams the endpoint supports, 332 * - the maximum primary stream array size the host controller supports, 333 * - and how many streams the device driver asks for. 334 * 335 * The stream context array must be a power of 2, and can be as small as 336 * 64 bytes or as large as 1MB. 337 */ 338struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, 339 unsigned int num_stream_ctxs, dma_addr_t *dma, 340 gfp_t mem_flags) 341{ 342 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 343 344 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) 345 return pci_alloc_consistent(pdev, 346 sizeof(struct xhci_stream_ctx)*num_stream_ctxs, 347 dma); 348 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) 349 return dma_pool_alloc(xhci->small_streams_pool, 350 mem_flags, dma); 351 else 352 return dma_pool_alloc(xhci->medium_streams_pool, 353 mem_flags, dma); 354} 355 356struct xhci_ring *xhci_dma_to_transfer_ring( 357 struct xhci_virt_ep *ep, 358 u64 address) 359{ 360 if (ep->ep_state & EP_HAS_STREAMS) 361 return radix_tree_lookup(&ep->stream_info->trb_address_map, 362 address >> SEGMENT_SHIFT); 363 return ep->ring; 364} 365 366/* Only use this when you know stream_info is valid */ 367#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 368static struct xhci_ring *dma_to_stream_ring( 369 struct xhci_stream_info *stream_info, 370 u64 address) 371{ 372 return radix_tree_lookup(&stream_info->trb_address_map, 373 address >> SEGMENT_SHIFT); 374} 375#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ 376 377struct xhci_ring *xhci_stream_id_to_ring( 378 struct xhci_virt_device *dev, 379 unsigned int ep_index, 380 unsigned int stream_id) 381{ 382 struct xhci_virt_ep *ep = &dev->eps[ep_index]; 383 384 if (stream_id == 0) 385 return ep->ring; 386 if (!ep->stream_info) 387 return NULL; 388 389 if (stream_id > ep->stream_info->num_streams) 390 return NULL; 391 return ep->stream_info->stream_rings[stream_id]; 392} 393 394struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 395 unsigned int slot_id, unsigned int ep_index, 396 unsigned int stream_id) 397{ 398 struct xhci_virt_ep *ep; 399 400 ep = &xhci->devs[slot_id]->eps[ep_index]; 401 /* Common case: no streams */ 402 if (!(ep->ep_state & EP_HAS_STREAMS)) 403 return ep->ring; 404 405 if (stream_id == 0) { 406 xhci_warn(xhci, 407 "WARN: Slot ID %u, ep index %u has streams, " 408 "but URB has no stream ID.\n", 409 slot_id, ep_index); 410 return NULL; 411 } 412 413 if (stream_id < ep->stream_info->num_streams) 414 return ep->stream_info->stream_rings[stream_id]; 415 416 xhci_warn(xhci, 417 "WARN: Slot ID %u, ep index %u has " 418 "stream IDs 1 to %u allocated, " 419 "but stream ID %u is requested.\n", 420 slot_id, ep_index, 421 ep->stream_info->num_streams - 1, 422 stream_id); 423 return NULL; 424} 425 426/* Get the right ring for the given URB. 427 * If the endpoint supports streams, boundary check the URB's stream ID. 428 * If the endpoint doesn't support streams, return the singular endpoint ring. 429 */ 430struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 431 struct urb *urb) 432{ 433 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, 434 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id); 435} 436 437#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 438static int xhci_test_radix_tree(struct xhci_hcd *xhci, 439 unsigned int num_streams, 440 struct xhci_stream_info *stream_info) 441{ 442 u32 cur_stream; 443 struct xhci_ring *cur_ring; 444 u64 addr; 445 446 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 447 struct xhci_ring *mapped_ring; 448 int trb_size = sizeof(union xhci_trb); 449 450 cur_ring = stream_info->stream_rings[cur_stream]; 451 for (addr = cur_ring->first_seg->dma; 452 addr < cur_ring->first_seg->dma + SEGMENT_SIZE; 453 addr += trb_size) { 454 mapped_ring = dma_to_stream_ring(stream_info, addr); 455 if (cur_ring != mapped_ring) { 456 xhci_warn(xhci, "WARN: DMA address 0x%08llx " 457 "didn't map to stream ID %u; " 458 "mapped to ring %p\n", 459 (unsigned long long) addr, 460 cur_stream, 461 mapped_ring); 462 return -EINVAL; 463 } 464 } 465 /* One TRB after the end of the ring segment shouldn't return a 466 * pointer to the current ring (although it may be a part of a 467 * different ring). 468 */ 469 mapped_ring = dma_to_stream_ring(stream_info, addr); 470 if (mapped_ring != cur_ring) { 471 /* One TRB before should also fail */ 472 addr = cur_ring->first_seg->dma - trb_size; 473 mapped_ring = dma_to_stream_ring(stream_info, addr); 474 } 475 if (mapped_ring == cur_ring) { 476 xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx " 477 "mapped to valid stream ID %u; " 478 "mapped ring = %p\n", 479 (unsigned long long) addr, 480 cur_stream, 481 mapped_ring); 482 return -EINVAL; 483 } 484 } 485 return 0; 486} 487#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ 488 489/* 490 * Change an endpoint's internal structure so it supports stream IDs. The 491 * number of requested streams includes stream 0, which cannot be used by device 492 * drivers. 493 * 494 * The number of stream contexts in the stream context array may be bigger than 495 * the number of streams the driver wants to use. This is because the number of 496 * stream context array entries must be a power of two. 497 * 498 * We need a radix tree for mapping physical addresses of TRBs to which stream 499 * ID they belong to. We need to do this because the host controller won't tell 500 * us which stream ring the TRB came from. We could store the stream ID in an 501 * event data TRB, but that doesn't help us for the cancellation case, since the 502 * endpoint may stop before it reaches that event data TRB. 503 * 504 * The radix tree maps the upper portion of the TRB DMA address to a ring 505 * segment that has the same upper portion of DMA addresses. For example, say I 506 * have segments of size 1KB, that are always 64-byte aligned. A segment may 507 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the 508 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to 509 * pass the radix tree a key to get the right stream ID: 510 * 511 * 0x10c90fff >> 10 = 0x43243 512 * 0x10c912c0 >> 10 = 0x43244 513 * 0x10c91400 >> 10 = 0x43245 514 * 515 * Obviously, only those TRBs with DMA addresses that are within the segment 516 * will make the radix tree return the stream ID for that ring. 517 * 518 * Caveats for the radix tree: 519 * 520 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an 521 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be 522 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the 523 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit 524 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit 525 * extended systems (where the DMA address can be bigger than 32-bits), 526 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. 527 */ 528struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, 529 unsigned int num_stream_ctxs, 530 unsigned int num_streams, gfp_t mem_flags) 531{ 532 struct xhci_stream_info *stream_info; 533 u32 cur_stream; 534 struct xhci_ring *cur_ring; 535 unsigned long key; 536 u64 addr; 537 int ret; 538 539 xhci_dbg(xhci, "Allocating %u streams and %u " 540 "stream context array entries.\n", 541 num_streams, num_stream_ctxs); 542 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) { 543 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n"); 544 return NULL; 545 } 546 xhci->cmd_ring_reserved_trbs++; 547 548 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags); 549 if (!stream_info) 550 goto cleanup_trbs; 551 552 stream_info->num_streams = num_streams; 553 stream_info->num_stream_ctxs = num_stream_ctxs; 554 555 /* Initialize the array of virtual pointers to stream rings. */ 556 stream_info->stream_rings = kzalloc( 557 sizeof(struct xhci_ring *)*num_streams, 558 mem_flags); 559 if (!stream_info->stream_rings) 560 goto cleanup_info; 561 562 /* Initialize the array of DMA addresses for stream rings for the HW. */ 563 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci, 564 num_stream_ctxs, &stream_info->ctx_array_dma, 565 mem_flags); 566 if (!stream_info->stream_ctx_array) 567 goto cleanup_ctx; 568 memset(stream_info->stream_ctx_array, 0, 569 sizeof(struct xhci_stream_ctx)*num_stream_ctxs); 570 571 /* Allocate everything needed to free the stream rings later */ 572 stream_info->free_streams_command = 573 xhci_alloc_command(xhci, true, true, mem_flags); 574 if (!stream_info->free_streams_command) 575 goto cleanup_ctx; 576 577 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); 578 579 /* Allocate rings for all the streams that the driver will use, 580 * and add their segment DMA addresses to the radix tree. 581 * Stream 0 is reserved. 582 */ 583 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 584 stream_info->stream_rings[cur_stream] = 585 xhci_ring_alloc(xhci, 1, true, mem_flags); 586 cur_ring = stream_info->stream_rings[cur_stream]; 587 if (!cur_ring) 588 goto cleanup_rings; 589 cur_ring->stream_id = cur_stream; 590 /* Set deq ptr, cycle bit, and stream context type */ 591 addr = cur_ring->first_seg->dma | 592 SCT_FOR_CTX(SCT_PRI_TR) | 593 cur_ring->cycle_state; 594 stream_info->stream_ctx_array[cur_stream].stream_ring = addr; 595 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", 596 cur_stream, (unsigned long long) addr); 597 598 key = (unsigned long) 599 (cur_ring->first_seg->dma >> SEGMENT_SHIFT); 600 ret = radix_tree_insert(&stream_info->trb_address_map, 601 key, cur_ring); 602 if (ret) { 603 xhci_ring_free(xhci, cur_ring); 604 stream_info->stream_rings[cur_stream] = NULL; 605 goto cleanup_rings; 606 } 607 } 608 /* Leave the other unused stream ring pointers in the stream context 609 * array initialized to zero. This will cause the xHC to give us an 610 * error if the device asks for a stream ID we don't have setup (if it 611 * was any other way, the host controller would assume the ring is 612 * "empty" and wait forever for data to be queued to that stream ID). 613 */ 614#if XHCI_DEBUG 615 /* Do a little test on the radix tree to make sure it returns the 616 * correct values. 617 */ 618 if (xhci_test_radix_tree(xhci, num_streams, stream_info)) 619 goto cleanup_rings; 620#endif 621 622 return stream_info; 623 624cleanup_rings: 625 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 626 cur_ring = stream_info->stream_rings[cur_stream]; 627 if (cur_ring) { 628 addr = cur_ring->first_seg->dma; 629 radix_tree_delete(&stream_info->trb_address_map, 630 addr >> SEGMENT_SHIFT); 631 xhci_ring_free(xhci, cur_ring); 632 stream_info->stream_rings[cur_stream] = NULL; 633 } 634 } 635 xhci_free_command(xhci, stream_info->free_streams_command); 636cleanup_ctx: 637 kfree(stream_info->stream_rings); 638cleanup_info: 639 kfree(stream_info); 640cleanup_trbs: 641 xhci->cmd_ring_reserved_trbs--; 642 return NULL; 643} 644/* 645 * Sets the MaxPStreams field and the Linear Stream Array field. 646 * Sets the dequeue pointer to the stream context array. 647 */ 648void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, 649 struct xhci_ep_ctx *ep_ctx, 650 struct xhci_stream_info *stream_info) 651{ 652 u32 max_primary_streams; 653 /* MaxPStreams is the number of stream context array entries, not the 654 * number we're actually using. Must be in 2^(MaxPstreams + 1) format. 655 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. 656 */ 657 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; 658 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", 659 1 << (max_primary_streams + 1)); 660 ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; 661 ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams); 662 ep_ctx->ep_info |= EP_HAS_LSA; 663 ep_ctx->deq = stream_info->ctx_array_dma; 664} 665 666/* 667 * Sets the MaxPStreams field and the Linear Stream Array field to 0. 668 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark, 669 * not at the beginning of the ring). 670 */ 671void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, 672 struct xhci_ep_ctx *ep_ctx, 673 struct xhci_virt_ep *ep) 674{ 675 dma_addr_t addr; 676 ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; 677 ep_ctx->ep_info &= ~EP_HAS_LSA; 678 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); 679 ep_ctx->deq = addr | ep->ring->cycle_state; 680} 681 682/* Frees all stream contexts associated with the endpoint, 683 * 684 * Caller should fix the endpoint context streams fields. 685 */ 686void xhci_free_stream_info(struct xhci_hcd *xhci, 687 struct xhci_stream_info *stream_info) 688{ 689 int cur_stream; 690 struct xhci_ring *cur_ring; 691 dma_addr_t addr; 692 693 if (!stream_info) 694 return; 695 696 for (cur_stream = 1; cur_stream < stream_info->num_streams; 697 cur_stream++) { 698 cur_ring = stream_info->stream_rings[cur_stream]; 699 if (cur_ring) { 700 addr = cur_ring->first_seg->dma; 701 radix_tree_delete(&stream_info->trb_address_map, 702 addr >> SEGMENT_SHIFT); 703 xhci_ring_free(xhci, cur_ring); 704 stream_info->stream_rings[cur_stream] = NULL; 705 } 706 } 707 xhci_free_command(xhci, stream_info->free_streams_command); 708 xhci->cmd_ring_reserved_trbs--; 709 if (stream_info->stream_ctx_array) 710 xhci_free_stream_ctx(xhci, 711 stream_info->num_stream_ctxs, 712 stream_info->stream_ctx_array, 713 stream_info->ctx_array_dma); 714 715 if (stream_info) 716 kfree(stream_info->stream_rings); 717 kfree(stream_info); 718} 719 720 721/***************** Device context manipulation *************************/ 722 723static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, 724 struct xhci_virt_ep *ep) 725{ 726 init_timer(&ep->stop_cmd_timer); 727 ep->stop_cmd_timer.data = (unsigned long) ep; 728 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog; 729 ep->xhci = xhci; 730} 731 732/* All the xhci_tds in the ring's TD list should be freed at this point */ 733void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 734{ 735 struct xhci_virt_device *dev; 736 int i; 737 738 /* Slot ID 0 is reserved */ 739 if (slot_id == 0 || !xhci->devs[slot_id]) 740 return; 741 742 dev = xhci->devs[slot_id]; 743 xhci->dcbaa->dev_context_ptrs[slot_id] = 0; 744 if (!dev) 745 return; 746 747 for (i = 0; i < 31; ++i) { 748 if (dev->eps[i].ring) 749 xhci_ring_free(xhci, dev->eps[i].ring); 750 if (dev->eps[i].stream_info) 751 xhci_free_stream_info(xhci, 752 dev->eps[i].stream_info); 753 } 754 755 if (dev->ring_cache) { 756 for (i = 0; i < dev->num_rings_cached; i++) 757 xhci_ring_free(xhci, dev->ring_cache[i]); 758 kfree(dev->ring_cache); 759 } 760 761 if (dev->in_ctx) 762 xhci_free_container_ctx(xhci, dev->in_ctx); 763 if (dev->out_ctx) 764 xhci_free_container_ctx(xhci, dev->out_ctx); 765 766 kfree(xhci->devs[slot_id]); 767 xhci->devs[slot_id] = NULL; 768} 769 770int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 771 struct usb_device *udev, gfp_t flags) 772{ 773 struct xhci_virt_device *dev; 774 int i; 775 776 /* Slot ID 0 is reserved */ 777 if (slot_id == 0 || xhci->devs[slot_id]) { 778 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); 779 return 0; 780 } 781 782 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); 783 if (!xhci->devs[slot_id]) 784 return 0; 785 dev = xhci->devs[slot_id]; 786 787 /* Allocate the (output) device context that will be used in the HC. */ 788 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 789 if (!dev->out_ctx) 790 goto fail; 791 792 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, 793 (unsigned long long)dev->out_ctx->dma); 794 795 /* Allocate the (input) device context for address device command */ 796 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); 797 if (!dev->in_ctx) 798 goto fail; 799 800 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 801 (unsigned long long)dev->in_ctx->dma); 802 803 /* Initialize the cancellation list and watchdog timers for each ep */ 804 for (i = 0; i < 31; i++) { 805 xhci_init_endpoint_timer(xhci, &dev->eps[i]); 806 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); 807 } 808 809 /* Allocate endpoint 0 ring */ 810 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); 811 if (!dev->eps[0].ring) 812 goto fail; 813 814 /* Allocate pointers to the ring cache */ 815 dev->ring_cache = kzalloc( 816 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED, 817 flags); 818 if (!dev->ring_cache) 819 goto fail; 820 dev->num_rings_cached = 0; 821 822 init_completion(&dev->cmd_completion); 823 INIT_LIST_HEAD(&dev->cmd_list); 824 825 /* Point to output device context in dcbaa. */ 826 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; 827 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 828 slot_id, 829 &xhci->dcbaa->dev_context_ptrs[slot_id], 830 (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); 831 832 return 1; 833fail: 834 xhci_free_virt_device(xhci, slot_id); 835 return 0; 836} 837 838void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, 839 struct usb_device *udev) 840{ 841 struct xhci_virt_device *virt_dev; 842 struct xhci_ep_ctx *ep0_ctx; 843 struct xhci_ring *ep_ring; 844 845 virt_dev = xhci->devs[udev->slot_id]; 846 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0); 847 ep_ring = virt_dev->eps[0].ring; 848 /* 849 * FIXME we don't keep track of the dequeue pointer very well after a 850 * Set TR dequeue pointer, so we're setting the dequeue pointer of the 851 * host to our enqueue pointer. This should only be called after a 852 * configured device has reset, so all control transfers should have 853 * been completed or cancelled before the reset. 854 */ 855 ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue); 856 ep0_ctx->deq |= ep_ring->cycle_state; 857} 858 859/* Setup an xHCI virtual device for a Set Address command */ 860int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) 861{ 862 struct xhci_virt_device *dev; 863 struct xhci_ep_ctx *ep0_ctx; 864 struct usb_device *top_dev; 865 struct xhci_slot_ctx *slot_ctx; 866 struct xhci_input_control_ctx *ctrl_ctx; 867 868 dev = xhci->devs[udev->slot_id]; 869 /* Slot ID 0 is reserved */ 870 if (udev->slot_id == 0 || !dev) { 871 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", 872 udev->slot_id); 873 return -EINVAL; 874 } 875 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 876 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); 877 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 878 879 /* 2) New slot context and endpoint 0 context are valid*/ 880 ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; 881 882 /* 3) Only the control endpoint is valid - one endpoint context */ 883 slot_ctx->dev_info |= LAST_CTX(1); 884 885 slot_ctx->dev_info |= (u32) udev->route; 886 switch (udev->speed) { 887 case USB_SPEED_SUPER: 888 slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; 889 break; 890 case USB_SPEED_HIGH: 891 slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; 892 break; 893 case USB_SPEED_FULL: 894 slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; 895 break; 896 case USB_SPEED_LOW: 897 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; 898 break; 899 case USB_SPEED_WIRELESS: 900 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 901 return -EINVAL; 902 break; 903 default: 904 /* Speed was set earlier, this shouldn't happen. */ 905 BUG(); 906 } 907 /* Find the root hub port this device is under */ 908 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 909 top_dev = top_dev->parent) 910 /* Found device below root hub */; 911 slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); 912 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); 913 914 /* Is this a LS/FS device under a HS hub? */ 915 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && 916 udev->tt) { 917 slot_ctx->tt_info = udev->tt->hub->slot_id; 918 slot_ctx->tt_info |= udev->ttport << 8; 919 if (udev->tt->multi) 920 slot_ctx->dev_info |= DEV_MTT; 921 } 922 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 923 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 924 925 /* Step 4 - ring already allocated */ 926 /* Step 5 */ 927 ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); 928 /* 929 * XXX: Not sure about wireless USB devices. 930 */ 931 switch (udev->speed) { 932 case USB_SPEED_SUPER: 933 ep0_ctx->ep_info2 |= MAX_PACKET(512); 934 break; 935 case USB_SPEED_HIGH: 936 /* USB core guesses at a 64-byte max packet first for FS devices */ 937 case USB_SPEED_FULL: 938 ep0_ctx->ep_info2 |= MAX_PACKET(64); 939 break; 940 case USB_SPEED_LOW: 941 ep0_ctx->ep_info2 |= MAX_PACKET(8); 942 break; 943 case USB_SPEED_WIRELESS: 944 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 945 return -EINVAL; 946 break; 947 default: 948 /* New speed? */ 949 BUG(); 950 } 951 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 952 ep0_ctx->ep_info2 |= MAX_BURST(0); 953 ep0_ctx->ep_info2 |= ERROR_COUNT(3); 954 955 ep0_ctx->deq = 956 dev->eps[0].ring->first_seg->dma; 957 ep0_ctx->deq |= dev->eps[0].ring->cycle_state; 958 959 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 960 961 return 0; 962} 963 964/* Return the polling or NAK interval. 965 * 966 * The polling interval is expressed in "microframes". If xHCI's Interval field 967 * is set to N, it will service the endpoint every 2^(Interval)*125us. 968 * 969 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval 970 * is set to 0. 971 */ 972static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, 973 struct usb_host_endpoint *ep) 974{ 975 unsigned int interval = 0; 976 977 switch (udev->speed) { 978 case USB_SPEED_HIGH: 979 /* Max NAK rate */ 980 if (usb_endpoint_xfer_control(&ep->desc) || 981 usb_endpoint_xfer_bulk(&ep->desc)) 982 interval = ep->desc.bInterval; 983 /* Fall through - SS and HS isoc/int have same decoding */ 984 case USB_SPEED_SUPER: 985 if (usb_endpoint_xfer_int(&ep->desc) || 986 usb_endpoint_xfer_isoc(&ep->desc)) { 987 if (ep->desc.bInterval == 0) 988 interval = 0; 989 else 990 interval = ep->desc.bInterval - 1; 991 if (interval > 15) 992 interval = 15; 993 if (interval != ep->desc.bInterval + 1) 994 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", 995 ep->desc.bEndpointAddress, 1 << interval); 996 } 997 break; 998 /* Convert bInterval (in 1-255 frames) to microframes and round down to 999 * nearest power of 2. 1000 */ 1001 case USB_SPEED_FULL: 1002 case USB_SPEED_LOW: 1003 if (usb_endpoint_xfer_int(&ep->desc) || 1004 usb_endpoint_xfer_isoc(&ep->desc)) { 1005 interval = fls(8*ep->desc.bInterval) - 1; 1006 if (interval > 10) 1007 interval = 10; 1008 if (interval < 3) 1009 interval = 3; 1010 if ((1 << interval) != 8*ep->desc.bInterval) 1011 dev_warn(&udev->dev, 1012 "ep %#x - rounding interval" 1013 " to %d microframes, " 1014 "ep desc says %d microframes\n", 1015 ep->desc.bEndpointAddress, 1016 1 << interval, 1017 8*ep->desc.bInterval); 1018 } 1019 break; 1020 default: 1021 BUG(); 1022 } 1023 return EP_INTERVAL(interval); 1024} 1025 1026/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. 1027 * High speed endpoint descriptors can define "the number of additional 1028 * transaction opportunities per microframe", but that goes in the Max Burst 1029 * endpoint context field. 1030 */ 1031static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, 1032 struct usb_host_endpoint *ep) 1033{ 1034 if (udev->speed != USB_SPEED_SUPER || 1035 !usb_endpoint_xfer_isoc(&ep->desc)) 1036 return 0; 1037 return ep->ss_ep_comp.bmAttributes; 1038} 1039 1040static inline u32 xhci_get_endpoint_type(struct usb_device *udev, 1041 struct usb_host_endpoint *ep) 1042{ 1043 int in; 1044 u32 type; 1045 1046 in = usb_endpoint_dir_in(&ep->desc); 1047 if (usb_endpoint_xfer_control(&ep->desc)) { 1048 type = EP_TYPE(CTRL_EP); 1049 } else if (usb_endpoint_xfer_bulk(&ep->desc)) { 1050 if (in) 1051 type = EP_TYPE(BULK_IN_EP); 1052 else 1053 type = EP_TYPE(BULK_OUT_EP); 1054 } else if (usb_endpoint_xfer_isoc(&ep->desc)) { 1055 if (in) 1056 type = EP_TYPE(ISOC_IN_EP); 1057 else 1058 type = EP_TYPE(ISOC_OUT_EP); 1059 } else if (usb_endpoint_xfer_int(&ep->desc)) { 1060 if (in) 1061 type = EP_TYPE(INT_IN_EP); 1062 else 1063 type = EP_TYPE(INT_OUT_EP); 1064 } else { 1065 BUG(); 1066 } 1067 return type; 1068} 1069 1070/* Return the maximum endpoint service interval time (ESIT) payload. 1071 * Basically, this is the maxpacket size, multiplied by the burst size 1072 * and mult size. 1073 */ 1074static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, 1075 struct usb_device *udev, 1076 struct usb_host_endpoint *ep) 1077{ 1078 int max_burst; 1079 int max_packet; 1080 1081 /* Only applies for interrupt or isochronous endpoints */ 1082 if (usb_endpoint_xfer_control(&ep->desc) || 1083 usb_endpoint_xfer_bulk(&ep->desc)) 1084 return 0; 1085 1086 if (udev->speed == USB_SPEED_SUPER) 1087 return ep->ss_ep_comp.wBytesPerInterval; 1088 1089 max_packet = ep->desc.wMaxPacketSize & 0x3ff; 1090 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; 1091 /* A 0 in max burst means 1 transfer per ESIT */ 1092 return max_packet * (max_burst + 1); 1093} 1094 1095/* Set up an endpoint with one ring segment. Do not allocate stream rings. 1096 * Drivers will have to call usb_alloc_streams() to do that. 1097 */ 1098int xhci_endpoint_init(struct xhci_hcd *xhci, 1099 struct xhci_virt_device *virt_dev, 1100 struct usb_device *udev, 1101 struct usb_host_endpoint *ep, 1102 gfp_t mem_flags) 1103{ 1104 unsigned int ep_index; 1105 struct xhci_ep_ctx *ep_ctx; 1106 struct xhci_ring *ep_ring; 1107 unsigned int max_packet; 1108 unsigned int max_burst; 1109 u32 max_esit_payload; 1110 1111 ep_index = xhci_get_endpoint_index(&ep->desc); 1112 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1113 1114 /* Set up the endpoint ring */ 1115 virt_dev->eps[ep_index].new_ring = 1116 xhci_ring_alloc(xhci, 1, true, mem_flags); 1117 if (!virt_dev->eps[ep_index].new_ring) { 1118 /* Attempt to use the ring cache */ 1119 if (virt_dev->num_rings_cached == 0) 1120 return -ENOMEM; 1121 virt_dev->eps[ep_index].new_ring = 1122 virt_dev->ring_cache[virt_dev->num_rings_cached]; 1123 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1124 virt_dev->num_rings_cached--; 1125 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring); 1126 } 1127 virt_dev->eps[ep_index].skip = false; 1128 ep_ring = virt_dev->eps[ep_index].new_ring; 1129 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; 1130 1131 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); 1132 ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep)); 1133 1134 /* FIXME dig Mult and streams info out of ep companion desc */ 1135 1136 /* Allow 3 retries for everything but isoc; 1137 * error count = 0 means infinite retries. 1138 */ 1139 if (!usb_endpoint_xfer_isoc(&ep->desc)) 1140 ep_ctx->ep_info2 = ERROR_COUNT(3); 1141 else 1142 ep_ctx->ep_info2 = ERROR_COUNT(1); 1143 1144 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); 1145 1146 /* Set the max packet size and max burst */ 1147 switch (udev->speed) { 1148 case USB_SPEED_SUPER: 1149 max_packet = ep->desc.wMaxPacketSize; 1150 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 1151 /* dig out max burst from ep companion desc */ 1152 max_packet = ep->ss_ep_comp.bMaxBurst; 1153 if (!max_packet) 1154 xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n"); 1155 ep_ctx->ep_info2 |= MAX_BURST(max_packet); 1156 break; 1157 case USB_SPEED_HIGH: 1158 /* bits 11:12 specify the number of additional transaction 1159 * opportunities per microframe (USB 2.0, section 9.6.6) 1160 */ 1161 if (usb_endpoint_xfer_isoc(&ep->desc) || 1162 usb_endpoint_xfer_int(&ep->desc)) { 1163 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; 1164 ep_ctx->ep_info2 |= MAX_BURST(max_burst); 1165 } 1166 /* Fall through */ 1167 case USB_SPEED_FULL: 1168 case USB_SPEED_LOW: 1169 max_packet = ep->desc.wMaxPacketSize & 0x3ff; 1170 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 1171 break; 1172 default: 1173 BUG(); 1174 } 1175 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); 1176 ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload); 1177 1178 /* 1179 * XXX no idea how to calculate the average TRB buffer length for bulk 1180 * endpoints, as the driver gives us no clue how big each scatter gather 1181 * list entry (or buffer) is going to be. 1182 * 1183 * For isochronous and interrupt endpoints, we set it to the max 1184 * available, until we have new API in the USB core to allow drivers to 1185 * declare how much bandwidth they actually need. 1186 * 1187 * Normally, it would be calculated by taking the total of the buffer 1188 * lengths in the TD and then dividing by the number of TRBs in a TD, 1189 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't 1190 * use Event Data TRBs, and we don't chain in a link TRB on short 1191 * transfers, we're basically dividing by 1. 1192 */ 1193 ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload); 1194 1195 /* FIXME Debug endpoint context */ 1196 return 0; 1197} 1198 1199void xhci_endpoint_zero(struct xhci_hcd *xhci, 1200 struct xhci_virt_device *virt_dev, 1201 struct usb_host_endpoint *ep) 1202{ 1203 unsigned int ep_index; 1204 struct xhci_ep_ctx *ep_ctx; 1205 1206 ep_index = xhci_get_endpoint_index(&ep->desc); 1207 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1208 1209 ep_ctx->ep_info = 0; 1210 ep_ctx->ep_info2 = 0; 1211 ep_ctx->deq = 0; 1212 ep_ctx->tx_info = 0; 1213 /* Don't free the endpoint ring until the set interface or configuration 1214 * request succeeds. 1215 */ 1216} 1217 1218/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 1219 * Useful when you want to change one particular aspect of the endpoint and then 1220 * issue a configure endpoint command. 1221 */ 1222void xhci_endpoint_copy(struct xhci_hcd *xhci, 1223 struct xhci_container_ctx *in_ctx, 1224 struct xhci_container_ctx *out_ctx, 1225 unsigned int ep_index) 1226{ 1227 struct xhci_ep_ctx *out_ep_ctx; 1228 struct xhci_ep_ctx *in_ep_ctx; 1229 1230 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1231 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 1232 1233 in_ep_ctx->ep_info = out_ep_ctx->ep_info; 1234 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 1235 in_ep_ctx->deq = out_ep_ctx->deq; 1236 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 1237} 1238 1239/* Copy output xhci_slot_ctx to the input xhci_slot_ctx. 1240 * Useful when you want to change one particular aspect of the endpoint and then 1241 * issue a configure endpoint command. Only the context entries field matters, 1242 * but we'll copy the whole thing anyway. 1243 */ 1244void xhci_slot_copy(struct xhci_hcd *xhci, 1245 struct xhci_container_ctx *in_ctx, 1246 struct xhci_container_ctx *out_ctx) 1247{ 1248 struct xhci_slot_ctx *in_slot_ctx; 1249 struct xhci_slot_ctx *out_slot_ctx; 1250 1251 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1252 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx); 1253 1254 in_slot_ctx->dev_info = out_slot_ctx->dev_info; 1255 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; 1256 in_slot_ctx->tt_info = out_slot_ctx->tt_info; 1257 in_slot_ctx->dev_state = out_slot_ctx->dev_state; 1258} 1259 1260/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ 1261static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) 1262{ 1263 int i; 1264 struct device *dev = xhci_to_hcd(xhci)->self.controller; 1265 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1266 1267 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); 1268 1269 if (!num_sp) 1270 return 0; 1271 1272 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); 1273 if (!xhci->scratchpad) 1274 goto fail_sp; 1275 1276 xhci->scratchpad->sp_array = 1277 pci_alloc_consistent(to_pci_dev(dev), 1278 num_sp * sizeof(u64), 1279 &xhci->scratchpad->sp_dma); 1280 if (!xhci->scratchpad->sp_array) 1281 goto fail_sp2; 1282 1283 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); 1284 if (!xhci->scratchpad->sp_buffers) 1285 goto fail_sp3; 1286 1287 xhci->scratchpad->sp_dma_buffers = 1288 kzalloc(sizeof(dma_addr_t) * num_sp, flags); 1289 1290 if (!xhci->scratchpad->sp_dma_buffers) 1291 goto fail_sp4; 1292 1293 xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; 1294 for (i = 0; i < num_sp; i++) { 1295 dma_addr_t dma; 1296 void *buf = pci_alloc_consistent(to_pci_dev(dev), 1297 xhci->page_size, &dma); 1298 if (!buf) 1299 goto fail_sp5; 1300 1301 xhci->scratchpad->sp_array[i] = dma; 1302 xhci->scratchpad->sp_buffers[i] = buf; 1303 xhci->scratchpad->sp_dma_buffers[i] = dma; 1304 } 1305 1306 return 0; 1307 1308 fail_sp5: 1309 for (i = i - 1; i >= 0; i--) { 1310 pci_free_consistent(to_pci_dev(dev), xhci->page_size, 1311 xhci->scratchpad->sp_buffers[i], 1312 xhci->scratchpad->sp_dma_buffers[i]); 1313 } 1314 kfree(xhci->scratchpad->sp_dma_buffers); 1315 1316 fail_sp4: 1317 kfree(xhci->scratchpad->sp_buffers); 1318 1319 fail_sp3: 1320 pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), 1321 xhci->scratchpad->sp_array, 1322 xhci->scratchpad->sp_dma); 1323 1324 fail_sp2: 1325 kfree(xhci->scratchpad); 1326 xhci->scratchpad = NULL; 1327 1328 fail_sp: 1329 return -ENOMEM; 1330} 1331 1332static void scratchpad_free(struct xhci_hcd *xhci) 1333{ 1334 int num_sp; 1335 int i; 1336 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 1337 1338 if (!xhci->scratchpad) 1339 return; 1340 1341 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1342 1343 for (i = 0; i < num_sp; i++) { 1344 pci_free_consistent(pdev, xhci->page_size, 1345 xhci->scratchpad->sp_buffers[i], 1346 xhci->scratchpad->sp_dma_buffers[i]); 1347 } 1348 kfree(xhci->scratchpad->sp_dma_buffers); 1349 kfree(xhci->scratchpad->sp_buffers); 1350 pci_free_consistent(pdev, num_sp * sizeof(u64), 1351 xhci->scratchpad->sp_array, 1352 xhci->scratchpad->sp_dma); 1353 kfree(xhci->scratchpad); 1354 xhci->scratchpad = NULL; 1355} 1356 1357struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 1358 bool allocate_in_ctx, bool allocate_completion, 1359 gfp_t mem_flags) 1360{ 1361 struct xhci_command *command; 1362 1363 command = kzalloc(sizeof(*command), mem_flags); 1364 if (!command) 1365 return NULL; 1366 1367 if (allocate_in_ctx) { 1368 command->in_ctx = 1369 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, 1370 mem_flags); 1371 if (!command->in_ctx) { 1372 kfree(command); 1373 return NULL; 1374 } 1375 } 1376 1377 if (allocate_completion) { 1378 command->completion = 1379 kzalloc(sizeof(struct completion), mem_flags); 1380 if (!command->completion) { 1381 xhci_free_container_ctx(xhci, command->in_ctx); 1382 kfree(command); 1383 return NULL; 1384 } 1385 init_completion(command->completion); 1386 } 1387 1388 command->status = 0; 1389 INIT_LIST_HEAD(&command->cmd_list); 1390 return command; 1391} 1392 1393void xhci_free_command(struct xhci_hcd *xhci, 1394 struct xhci_command *command) 1395{ 1396 xhci_free_container_ctx(xhci, 1397 command->in_ctx); 1398 kfree(command->completion); 1399 kfree(command); 1400} 1401 1402void xhci_mem_cleanup(struct xhci_hcd *xhci) 1403{ 1404 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 1405 int size; 1406 int i; 1407 1408 /* Free the Event Ring Segment Table and the actual Event Ring */ 1409 if (xhci->ir_set) { 1410 xhci_writel(xhci, 0, &xhci->ir_set->erst_size); 1411 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); 1412 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); 1413 } 1414 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1415 if (xhci->erst.entries) 1416 pci_free_consistent(pdev, size, 1417 xhci->erst.entries, xhci->erst.erst_dma_addr); 1418 xhci->erst.entries = NULL; 1419 xhci_dbg(xhci, "Freed ERST\n"); 1420 if (xhci->event_ring) 1421 xhci_ring_free(xhci, xhci->event_ring); 1422 xhci->event_ring = NULL; 1423 xhci_dbg(xhci, "Freed event ring\n"); 1424 1425 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); 1426 if (xhci->cmd_ring) 1427 xhci_ring_free(xhci, xhci->cmd_ring); 1428 xhci->cmd_ring = NULL; 1429 xhci_dbg(xhci, "Freed command ring\n"); 1430 1431 for (i = 1; i < MAX_HC_SLOTS; ++i) 1432 xhci_free_virt_device(xhci, i); 1433 1434 if (xhci->segment_pool) 1435 dma_pool_destroy(xhci->segment_pool); 1436 xhci->segment_pool = NULL; 1437 xhci_dbg(xhci, "Freed segment pool\n"); 1438 1439 if (xhci->device_pool) 1440 dma_pool_destroy(xhci->device_pool); 1441 xhci->device_pool = NULL; 1442 xhci_dbg(xhci, "Freed device context pool\n"); 1443 1444 if (xhci->small_streams_pool) 1445 dma_pool_destroy(xhci->small_streams_pool); 1446 xhci->small_streams_pool = NULL; 1447 xhci_dbg(xhci, "Freed small stream array pool\n"); 1448 1449 if (xhci->medium_streams_pool) 1450 dma_pool_destroy(xhci->medium_streams_pool); 1451 xhci->medium_streams_pool = NULL; 1452 xhci_dbg(xhci, "Freed medium stream array pool\n"); 1453 1454 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); 1455 if (xhci->dcbaa) 1456 pci_free_consistent(pdev, sizeof(*xhci->dcbaa), 1457 xhci->dcbaa, xhci->dcbaa->dma); 1458 xhci->dcbaa = NULL; 1459 1460 scratchpad_free(xhci); 1461 xhci->page_size = 0; 1462 xhci->page_shift = 0; 1463} 1464 1465static int xhci_test_trb_in_td(struct xhci_hcd *xhci, 1466 struct xhci_segment *input_seg, 1467 union xhci_trb *start_trb, 1468 union xhci_trb *end_trb, 1469 dma_addr_t input_dma, 1470 struct xhci_segment *result_seg, 1471 char *test_name, int test_number) 1472{ 1473 unsigned long long start_dma; 1474 unsigned long long end_dma; 1475 struct xhci_segment *seg; 1476 1477 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); 1478 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); 1479 1480 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma); 1481 if (seg != result_seg) { 1482 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", 1483 test_name, test_number); 1484 xhci_warn(xhci, "Tested TRB math w/ seg %p and " 1485 "input DMA 0x%llx\n", 1486 input_seg, 1487 (unsigned long long) input_dma); 1488 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " 1489 "ending TRB %p (0x%llx DMA)\n", 1490 start_trb, start_dma, 1491 end_trb, end_dma); 1492 xhci_warn(xhci, "Expected seg %p, got seg %p\n", 1493 result_seg, seg); 1494 return -1; 1495 } 1496 return 0; 1497} 1498 1499/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ 1500static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) 1501{ 1502 struct { 1503 dma_addr_t input_dma; 1504 struct xhci_segment *result_seg; 1505 } simple_test_vector [] = { 1506 /* A zeroed DMA field should fail */ 1507 { 0, NULL }, 1508 /* One TRB before the ring start should fail */ 1509 { xhci->event_ring->first_seg->dma - 16, NULL }, 1510 /* One byte before the ring start should fail */ 1511 { xhci->event_ring->first_seg->dma - 1, NULL }, 1512 /* Starting TRB should succeed */ 1513 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, 1514 /* Ending TRB should succeed */ 1515 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, 1516 xhci->event_ring->first_seg }, 1517 /* One byte after the ring end should fail */ 1518 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, 1519 /* One TRB after the ring end should fail */ 1520 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, 1521 /* An address of all ones should fail */ 1522 { (dma_addr_t) (~0), NULL }, 1523 }; 1524 struct { 1525 struct xhci_segment *input_seg; 1526 union xhci_trb *start_trb; 1527 union xhci_trb *end_trb; 1528 dma_addr_t input_dma; 1529 struct xhci_segment *result_seg; 1530 } complex_test_vector [] = { 1531 /* Test feeding a valid DMA address from a different ring */ 1532 { .input_seg = xhci->event_ring->first_seg, 1533 .start_trb = xhci->event_ring->first_seg->trbs, 1534 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1535 .input_dma = xhci->cmd_ring->first_seg->dma, 1536 .result_seg = NULL, 1537 }, 1538 /* Test feeding a valid end TRB from a different ring */ 1539 { .input_seg = xhci->event_ring->first_seg, 1540 .start_trb = xhci->event_ring->first_seg->trbs, 1541 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1542 .input_dma = xhci->cmd_ring->first_seg->dma, 1543 .result_seg = NULL, 1544 }, 1545 /* Test feeding a valid start and end TRB from a different ring */ 1546 { .input_seg = xhci->event_ring->first_seg, 1547 .start_trb = xhci->cmd_ring->first_seg->trbs, 1548 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1549 .input_dma = xhci->cmd_ring->first_seg->dma, 1550 .result_seg = NULL, 1551 }, 1552 /* TRB in this ring, but after this TD */ 1553 { .input_seg = xhci->event_ring->first_seg, 1554 .start_trb = &xhci->event_ring->first_seg->trbs[0], 1555 .end_trb = &xhci->event_ring->first_seg->trbs[3], 1556 .input_dma = xhci->event_ring->first_seg->dma + 4*16, 1557 .result_seg = NULL, 1558 }, 1559 /* TRB in this ring, but before this TD */ 1560 { .input_seg = xhci->event_ring->first_seg, 1561 .start_trb = &xhci->event_ring->first_seg->trbs[3], 1562 .end_trb = &xhci->event_ring->first_seg->trbs[6], 1563 .input_dma = xhci->event_ring->first_seg->dma + 2*16, 1564 .result_seg = NULL, 1565 }, 1566 /* TRB in this ring, but after this wrapped TD */ 1567 { .input_seg = xhci->event_ring->first_seg, 1568 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 1569 .end_trb = &xhci->event_ring->first_seg->trbs[1], 1570 .input_dma = xhci->event_ring->first_seg->dma + 2*16, 1571 .result_seg = NULL, 1572 }, 1573 /* TRB in this ring, but before this wrapped TD */ 1574 { .input_seg = xhci->event_ring->first_seg, 1575 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 1576 .end_trb = &xhci->event_ring->first_seg->trbs[1], 1577 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, 1578 .result_seg = NULL, 1579 }, 1580 /* TRB not in this ring, and we have a wrapped TD */ 1581 { .input_seg = xhci->event_ring->first_seg, 1582 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 1583 .end_trb = &xhci->event_ring->first_seg->trbs[1], 1584 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, 1585 .result_seg = NULL, 1586 }, 1587 }; 1588 1589 unsigned int num_tests; 1590 int i, ret; 1591 1592 num_tests = ARRAY_SIZE(simple_test_vector); 1593 for (i = 0; i < num_tests; i++) { 1594 ret = xhci_test_trb_in_td(xhci, 1595 xhci->event_ring->first_seg, 1596 xhci->event_ring->first_seg->trbs, 1597 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1598 simple_test_vector[i].input_dma, 1599 simple_test_vector[i].result_seg, 1600 "Simple", i); 1601 if (ret < 0) 1602 return ret; 1603 } 1604 1605 num_tests = ARRAY_SIZE(complex_test_vector); 1606 for (i = 0; i < num_tests; i++) { 1607 ret = xhci_test_trb_in_td(xhci, 1608 complex_test_vector[i].input_seg, 1609 complex_test_vector[i].start_trb, 1610 complex_test_vector[i].end_trb, 1611 complex_test_vector[i].input_dma, 1612 complex_test_vector[i].result_seg, 1613 "Complex", i); 1614 if (ret < 0) 1615 return ret; 1616 } 1617 xhci_dbg(xhci, "TRB math tests passed.\n"); 1618 return 0; 1619} 1620 1621 1622int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 1623{ 1624 dma_addr_t dma; 1625 struct device *dev = xhci_to_hcd(xhci)->self.controller; 1626 unsigned int val, val2; 1627 u64 val_64; 1628 struct xhci_segment *seg; 1629 u32 page_size; 1630 int i; 1631 1632 page_size = xhci_readl(xhci, &xhci->op_regs->page_size); 1633 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); 1634 for (i = 0; i < 16; i++) { 1635 if ((0x1 & page_size) != 0) 1636 break; 1637 page_size = page_size >> 1; 1638 } 1639 if (i < 16) 1640 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); 1641 else 1642 xhci_warn(xhci, "WARN: no supported page size\n"); 1643 /* Use 4K pages, since that's common and the minimum the HC supports */ 1644 xhci->page_shift = 12; 1645 xhci->page_size = 1 << xhci->page_shift; 1646 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); 1647 1648 /* 1649 * Program the Number of Device Slots Enabled field in the CONFIG 1650 * register with the max value of slots the HC can handle. 1651 */ 1652 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); 1653 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", 1654 (unsigned int) val); 1655 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); 1656 val |= (val2 & ~HCS_SLOTS_MASK); 1657 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", 1658 (unsigned int) val); 1659 xhci_writel(xhci, val, &xhci->op_regs->config_reg); 1660 1661 /* 1662 * Section 5.4.8 - doorbell array must be 1663 * "physically contiguous and 64-byte (cache line) aligned". 1664 */ 1665 xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev), 1666 sizeof(*xhci->dcbaa), &dma); 1667 if (!xhci->dcbaa) 1668 goto fail; 1669 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); 1670 xhci->dcbaa->dma = dma; 1671 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", 1672 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 1673 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); 1674 1675 /* 1676 * Initialize the ring segment pool. The ring must be a contiguous 1677 * structure comprised of TRBs. The TRBs must be 16 byte aligned, 1678 * however, the command ring segment needs 64-byte aligned segments, 1679 * so we pick the greater alignment need. 1680 */ 1681 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 1682 SEGMENT_SIZE, 64, xhci->page_size); 1683 1684 /* See Table 46 and Note on Figure 55 */ 1685 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 1686 2112, 64, xhci->page_size); 1687 if (!xhci->segment_pool || !xhci->device_pool) 1688 goto fail; 1689 1690 /* Linear stream context arrays don't have any boundary restrictions, 1691 * and only need to be 16-byte aligned. 1692 */ 1693 xhci->small_streams_pool = 1694 dma_pool_create("xHCI 256 byte stream ctx arrays", 1695 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); 1696 xhci->medium_streams_pool = 1697 dma_pool_create("xHCI 1KB stream ctx arrays", 1698 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); 1699 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE 1700 * will be allocated with pci_alloc_consistent() 1701 */ 1702 1703 if (!xhci->small_streams_pool || !xhci->medium_streams_pool) 1704 goto fail; 1705 1706 /* Set up the command ring to have one segments for now. */ 1707 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); 1708 if (!xhci->cmd_ring) 1709 goto fail; 1710 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 1711 xhci_dbg(xhci, "First segment DMA is 0x%llx\n", 1712 (unsigned long long)xhci->cmd_ring->first_seg->dma); 1713 1714 /* Set the address in the Command Ring Control register */ 1715 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 1716 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 1717 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | 1718 xhci->cmd_ring->cycle_state; 1719 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); 1720 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 1721 xhci_dbg_cmd_ptrs(xhci); 1722 1723 val = xhci_readl(xhci, &xhci->cap_regs->db_off); 1724 val &= DBOFF_MASK; 1725 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" 1726 " from cap regs base addr\n", val); 1727 xhci->dba = (void *) xhci->cap_regs + val; 1728 xhci_dbg_regs(xhci); 1729 xhci_print_run_regs(xhci); 1730 /* Set ir_set to interrupt register set 0 */ 1731 xhci->ir_set = (void *) xhci->run_regs->ir_set; 1732 1733 /* 1734 * Event ring setup: Allocate a normal ring, but also setup 1735 * the event ring segment table (ERST). Section 4.9.3. 1736 */ 1737 xhci_dbg(xhci, "// Allocating event ring\n"); 1738 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); 1739 if (!xhci->event_ring) 1740 goto fail; 1741 if (xhci_check_trb_in_td_math(xhci, flags) < 0) 1742 goto fail; 1743 1744 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), 1745 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); 1746 if (!xhci->erst.entries) 1747 goto fail; 1748 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", 1749 (unsigned long long)dma); 1750 1751 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); 1752 xhci->erst.num_entries = ERST_NUM_SEGS; 1753 xhci->erst.erst_dma_addr = dma; 1754 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", 1755 xhci->erst.num_entries, 1756 xhci->erst.entries, 1757 (unsigned long long)xhci->erst.erst_dma_addr); 1758 1759 /* set ring base address and size for each segment table entry */ 1760 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { 1761 struct xhci_erst_entry *entry = &xhci->erst.entries[val]; 1762 entry->seg_addr = seg->dma; 1763 entry->seg_size = TRBS_PER_SEGMENT; 1764 entry->rsvd = 0; 1765 seg = seg->next; 1766 } 1767 1768 /* set ERST count with the number of entries in the segment table */ 1769 val = xhci_readl(xhci, &xhci->ir_set->erst_size); 1770 val &= ERST_SIZE_MASK; 1771 val |= ERST_NUM_SEGS; 1772 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", 1773 val); 1774 xhci_writel(xhci, val, &xhci->ir_set->erst_size); 1775 1776 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); 1777 /* set the segment table base address */ 1778 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", 1779 (unsigned long long)xhci->erst.erst_dma_addr); 1780 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); 1781 val_64 &= ERST_PTR_MASK; 1782 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); 1783 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); 1784 1785 /* Set the event ring dequeue address */ 1786 xhci_set_hc_event_deq(xhci); 1787 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); 1788 xhci_print_ir_set(xhci, xhci->ir_set, 0); 1789 1790 /* 1791 * XXX: Might need to set the Interrupter Moderation Register to 1792 * something other than the default (~1ms minimum between interrupts). 1793 * See section 5.5.1.2. 1794 */ 1795 init_completion(&xhci->addr_dev); 1796 for (i = 0; i < MAX_HC_SLOTS; ++i) 1797 xhci->devs[i] = NULL; 1798 1799 if (scratchpad_alloc(xhci, flags)) 1800 goto fail; 1801 1802 return 0; 1803 1804fail: 1805 xhci_warn(xhci, "Couldn't initialize memory\n"); 1806 xhci_mem_cleanup(xhci); 1807 return -ENOMEM; 1808} 1809