cppi_dma.c revision 704a14854aaf9758a1248ea36a7d1b8cc42a4b3e
1/* 2 * Copyright (C) 2005-2006 by Texas Instruments 3 * 4 * This file implements a DMA interface using TI's CPPI DMA. 5 * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB. 6 * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci. 7 */ 8 9#include <linux/usb.h> 10 11#include "musb_core.h" 12#include "musb_debug.h" 13#include "cppi_dma.h" 14 15 16/* CPPI DMA status 7-mar-2006: 17 * 18 * - See musb_{host,gadget}.c for more info 19 * 20 * - Correct RX DMA generally forces the engine into irq-per-packet mode, 21 * which can easily saturate the CPU under non-mass-storage loads. 22 * 23 * NOTES 24-aug-2006 (2.6.18-rc4): 24 * 25 * - peripheral RXDMA wedged in a test with packets of length 512/512/1. 26 * evidently after the 1 byte packet was received and acked, the queue 27 * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003, 28 * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401 29 * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx 30 * of its next (512 byte) packet. IRQ issues? 31 * 32 * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will 33 * evidently also directly update the RX and TX CSRs ... so audit all 34 * host and peripheral side DMA code to avoid CSR access after DMA has 35 * been started. 36 */ 37 38/* REVISIT now we can avoid preallocating these descriptors; or 39 * more simply, switch to a global freelist not per-channel ones. 40 * Note: at full speed, 64 descriptors == 4K bulk data. 41 */ 42#define NUM_TXCHAN_BD 64 43#define NUM_RXCHAN_BD 64 44 45static inline void cpu_drain_writebuffer(void) 46{ 47 wmb(); 48#ifdef CONFIG_CPU_ARM926T 49 /* REVISIT this "should not be needed", 50 * but lack of it sure seemed to hurt ... 51 */ 52 asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n"); 53#endif 54} 55 56static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) 57{ 58 struct cppi_descriptor *bd = c->freelist; 59 60 if (bd) 61 c->freelist = bd->next; 62 return bd; 63} 64 65static inline void 66cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) 67{ 68 if (!bd) 69 return; 70 bd->next = c->freelist; 71 c->freelist = bd; 72} 73 74/* 75 * Start DMA controller 76 * 77 * Initialize the DMA controller as necessary. 78 */ 79 80/* zero out entire rx state RAM entry for the channel */ 81static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx) 82{ 83 musb_writel(&rx->rx_skipbytes, 0, 0); 84 musb_writel(&rx->rx_head, 0, 0); 85 musb_writel(&rx->rx_sop, 0, 0); 86 musb_writel(&rx->rx_current, 0, 0); 87 musb_writel(&rx->rx_buf_current, 0, 0); 88 musb_writel(&rx->rx_len_len, 0, 0); 89 musb_writel(&rx->rx_cnt_cnt, 0, 0); 90} 91 92/* zero out entire tx state RAM entry for the channel */ 93static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr) 94{ 95 musb_writel(&tx->tx_head, 0, 0); 96 musb_writel(&tx->tx_buf, 0, 0); 97 musb_writel(&tx->tx_current, 0, 0); 98 musb_writel(&tx->tx_buf_current, 0, 0); 99 musb_writel(&tx->tx_info, 0, 0); 100 musb_writel(&tx->tx_rem_len, 0, 0); 101 /* musb_writel(&tx->tx_dummy, 0, 0); */ 102 musb_writel(&tx->tx_complete, 0, ptr); 103} 104 105static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) 106{ 107 int j; 108 109 /* initialize channel fields */ 110 c->head = NULL; 111 c->tail = NULL; 112 c->last_processed = NULL; 113 c->channel.status = MUSB_DMA_STATUS_UNKNOWN; 114 c->controller = cppi; 115 c->is_rndis = 0; 116 c->freelist = NULL; 117 118 /* build the BD Free list for the channel */ 119 for (j = 0; j < NUM_TXCHAN_BD + 1; j++) { 120 struct cppi_descriptor *bd; 121 dma_addr_t dma; 122 123 bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); 124 bd->dma = dma; 125 cppi_bd_free(c, bd); 126 } 127} 128 129static int cppi_channel_abort(struct dma_channel *); 130 131static void cppi_pool_free(struct cppi_channel *c) 132{ 133 struct cppi *cppi = c->controller; 134 struct cppi_descriptor *bd; 135 136 (void) cppi_channel_abort(&c->channel); 137 c->channel.status = MUSB_DMA_STATUS_UNKNOWN; 138 c->controller = NULL; 139 140 /* free all its bds */ 141 bd = c->last_processed; 142 do { 143 if (bd) 144 dma_pool_free(cppi->pool, bd, bd->dma); 145 bd = cppi_bd_alloc(c); 146 } while (bd); 147 c->last_processed = NULL; 148} 149 150static int __init cppi_controller_start(struct dma_controller *c) 151{ 152 struct cppi *controller; 153 void __iomem *tibase; 154 int i; 155 156 controller = container_of(c, struct cppi, controller); 157 158 /* do whatever is necessary to start controller */ 159 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { 160 controller->tx[i].transmit = true; 161 controller->tx[i].index = i; 162 } 163 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { 164 controller->rx[i].transmit = false; 165 controller->rx[i].index = i; 166 } 167 168 /* setup BD list on a per channel basis */ 169 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) 170 cppi_pool_init(controller, controller->tx + i); 171 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) 172 cppi_pool_init(controller, controller->rx + i); 173 174 tibase = controller->tibase; 175 INIT_LIST_HEAD(&controller->tx_complete); 176 177 /* initialise tx/rx channel head pointers to zero */ 178 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { 179 struct cppi_channel *tx_ch = controller->tx + i; 180 struct cppi_tx_stateram __iomem *tx; 181 182 INIT_LIST_HEAD(&tx_ch->tx_complete); 183 184 tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); 185 tx_ch->state_ram = tx; 186 cppi_reset_tx(tx, 0); 187 } 188 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { 189 struct cppi_channel *rx_ch = controller->rx + i; 190 struct cppi_rx_stateram __iomem *rx; 191 192 INIT_LIST_HEAD(&rx_ch->tx_complete); 193 194 rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i); 195 rx_ch->state_ram = rx; 196 cppi_reset_rx(rx); 197 } 198 199 /* enable individual cppi channels */ 200 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, 201 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 202 musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG, 203 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 204 205 /* enable tx/rx CPPI control */ 206 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); 207 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); 208 209 /* disable RNDIS mode, also host rx RNDIS autorequest */ 210 musb_writel(tibase, DAVINCI_RNDIS_REG, 0); 211 musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0); 212 213 return 0; 214} 215 216/* 217 * Stop DMA controller 218 * 219 * De-Init the DMA controller as necessary. 220 */ 221 222static int cppi_controller_stop(struct dma_controller *c) 223{ 224 struct cppi *controller; 225 void __iomem *tibase; 226 int i; 227 228 controller = container_of(c, struct cppi, controller); 229 230 tibase = controller->tibase; 231 /* DISABLE INDIVIDUAL CHANNEL Interrupts */ 232 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, 233 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 234 musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, 235 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 236 237 DBG(1, "Tearing down RX and TX Channels\n"); 238 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { 239 /* FIXME restructure of txdma to use bds like rxdma */ 240 controller->tx[i].last_processed = NULL; 241 cppi_pool_free(controller->tx + i); 242 } 243 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) 244 cppi_pool_free(controller->rx + i); 245 246 /* in Tx Case proper teardown is supported. We resort to disabling 247 * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is 248 * complete TX CPPI cannot be disabled. 249 */ 250 /*disable tx/rx cppi */ 251 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); 252 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); 253 254 return 0; 255} 256 257/* While dma channel is allocated, we only want the core irqs active 258 * for fault reports, otherwise we'd get irqs that we don't care about. 259 * Except for TX irqs, where dma done != fifo empty and reusable ... 260 * 261 * NOTE: docs don't say either way, but irq masking **enables** irqs. 262 * 263 * REVISIT same issue applies to pure PIO usage too, and non-cppi dma... 264 */ 265static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum) 266{ 267 musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8)); 268} 269 270static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum) 271{ 272 musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8)); 273} 274 275 276/* 277 * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to 278 * each transfer direction of a non-control endpoint, so allocating 279 * (and deallocating) is mostly a way to notice bad housekeeping on 280 * the software side. We assume the irqs are always active. 281 */ 282static struct dma_channel * 283cppi_channel_allocate(struct dma_controller *c, 284 struct musb_hw_ep *ep, u8 transmit) 285{ 286 struct cppi *controller; 287 u8 index; 288 struct cppi_channel *cppi_ch; 289 void __iomem *tibase; 290 291 controller = container_of(c, struct cppi, controller); 292 tibase = controller->tibase; 293 294 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ 295 index = ep->epnum - 1; 296 297 /* return the corresponding CPPI Channel Handle, and 298 * probably disable the non-CPPI irq until we need it. 299 */ 300 if (transmit) { 301 if (index >= ARRAY_SIZE(controller->tx)) { 302 DBG(1, "no %cX%d CPPI channel\n", 'T', index); 303 return NULL; 304 } 305 cppi_ch = controller->tx + index; 306 } else { 307 if (index >= ARRAY_SIZE(controller->rx)) { 308 DBG(1, "no %cX%d CPPI channel\n", 'R', index); 309 return NULL; 310 } 311 cppi_ch = controller->rx + index; 312 core_rxirq_disable(tibase, ep->epnum); 313 } 314 315 /* REVISIT make this an error later once the same driver code works 316 * with the other DMA engine too 317 */ 318 if (cppi_ch->hw_ep) 319 DBG(1, "re-allocating DMA%d %cX channel %p\n", 320 index, transmit ? 'T' : 'R', cppi_ch); 321 cppi_ch->hw_ep = ep; 322 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; 323 324 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); 325 return &cppi_ch->channel; 326} 327 328/* Release a CPPI Channel. */ 329static void cppi_channel_release(struct dma_channel *channel) 330{ 331 struct cppi_channel *c; 332 void __iomem *tibase; 333 334 /* REVISIT: for paranoia, check state and abort if needed... */ 335 336 c = container_of(channel, struct cppi_channel, channel); 337 tibase = c->controller->tibase; 338 if (!c->hw_ep) 339 DBG(1, "releasing idle DMA channel %p\n", c); 340 else if (!c->transmit) 341 core_rxirq_enable(tibase, c->index + 1); 342 343 /* for now, leave its cppi IRQ enabled (we won't trigger it) */ 344 c->hw_ep = NULL; 345 channel->status = MUSB_DMA_STATUS_UNKNOWN; 346} 347 348/* Context: controller irqlocked */ 349static void 350cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) 351{ 352 void __iomem *base = c->controller->mregs; 353 struct cppi_rx_stateram __iomem *rx = c->state_ram; 354 355 musb_ep_select(base, c->index + 1); 356 357 DBG(level, "RX DMA%d%s: %d left, csr %04x, " 358 "%08x H%08x S%08x C%08x, " 359 "B%08x L%08x %08x .. %08x" 360 "\n", 361 c->index, tag, 362 musb_readl(c->controller->tibase, 363 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), 364 musb_readw(c->hw_ep->regs, MUSB_RXCSR), 365 366 musb_readl(&rx->rx_skipbytes, 0), 367 musb_readl(&rx->rx_head, 0), 368 musb_readl(&rx->rx_sop, 0), 369 musb_readl(&rx->rx_current, 0), 370 371 musb_readl(&rx->rx_buf_current, 0), 372 musb_readl(&rx->rx_len_len, 0), 373 musb_readl(&rx->rx_cnt_cnt, 0), 374 musb_readl(&rx->rx_complete, 0) 375 ); 376} 377 378/* Context: controller irqlocked */ 379static void 380cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) 381{ 382 void __iomem *base = c->controller->mregs; 383 struct cppi_tx_stateram __iomem *tx = c->state_ram; 384 385 musb_ep_select(base, c->index + 1); 386 387 DBG(level, "TX DMA%d%s: csr %04x, " 388 "H%08x S%08x C%08x %08x, " 389 "F%08x L%08x .. %08x" 390 "\n", 391 c->index, tag, 392 musb_readw(c->hw_ep->regs, MUSB_TXCSR), 393 394 musb_readl(&tx->tx_head, 0), 395 musb_readl(&tx->tx_buf, 0), 396 musb_readl(&tx->tx_current, 0), 397 musb_readl(&tx->tx_buf_current, 0), 398 399 musb_readl(&tx->tx_info, 0), 400 musb_readl(&tx->tx_rem_len, 0), 401 /* dummy/unused word 6 */ 402 musb_readl(&tx->tx_complete, 0) 403 ); 404} 405 406/* Context: controller irqlocked */ 407static inline void 408cppi_rndis_update(struct cppi_channel *c, int is_rx, 409 void __iomem *tibase, int is_rndis) 410{ 411 /* we may need to change the rndis flag for this cppi channel */ 412 if (c->is_rndis != is_rndis) { 413 u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG); 414 u32 temp = 1 << (c->index); 415 416 if (is_rx) 417 temp <<= 16; 418 if (is_rndis) 419 value |= temp; 420 else 421 value &= ~temp; 422 musb_writel(tibase, DAVINCI_RNDIS_REG, value); 423 c->is_rndis = is_rndis; 424 } 425} 426 427#ifdef CONFIG_USB_MUSB_DEBUG 428static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd) 429{ 430 pr_debug("RXBD/%s %08x: " 431 "nxt %08x buf %08x off.blen %08x opt.plen %08x\n", 432 tag, bd->dma, 433 bd->hw_next, bd->hw_bufp, bd->hw_off_len, 434 bd->hw_options); 435} 436#endif 437 438static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) 439{ 440#ifdef CONFIG_USB_MUSB_DEBUG 441 struct cppi_descriptor *bd; 442 443 if (!_dbg_level(level)) 444 return; 445 cppi_dump_rx(level, rx, tag); 446 if (rx->last_processed) 447 cppi_dump_rxbd("last", rx->last_processed); 448 for (bd = rx->head; bd; bd = bd->next) 449 cppi_dump_rxbd("active", bd); 450#endif 451} 452 453 454/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX; 455 * so we won't ever use it (see "CPPI RX Woes" below). 456 */ 457static inline int cppi_autoreq_update(struct cppi_channel *rx, 458 void __iomem *tibase, int onepacket, unsigned n_bds) 459{ 460 u32 val; 461 462#ifdef RNDIS_RX_IS_USABLE 463 u32 tmp; 464 /* assert(is_host_active(musb)) */ 465 466 /* start from "AutoReq never" */ 467 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); 468 val = tmp & ~((0x3) << (rx->index * 2)); 469 470 /* HCD arranged reqpkt for packet #1. we arrange int 471 * for all but the last one, maybe in two segments. 472 */ 473 if (!onepacket) { 474#if 0 475 /* use two segments, autoreq "all" then the last "never" */ 476 val |= ((0x3) << (rx->index * 2)); 477 n_bds--; 478#else 479 /* one segment, autoreq "all-but-last" */ 480 val |= ((0x1) << (rx->index * 2)); 481#endif 482 } 483 484 if (val != tmp) { 485 int n = 100; 486 487 /* make sure that autoreq is updated before continuing */ 488 musb_writel(tibase, DAVINCI_AUTOREQ_REG, val); 489 do { 490 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); 491 if (tmp == val) 492 break; 493 cpu_relax(); 494 } while (n-- > 0); 495 } 496#endif 497 498 /* REQPKT is turned off after each segment */ 499 if (n_bds && rx->channel.actual_len) { 500 void __iomem *regs = rx->hw_ep->regs; 501 502 val = musb_readw(regs, MUSB_RXCSR); 503 if (!(val & MUSB_RXCSR_H_REQPKT)) { 504 val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS; 505 musb_writew(regs, MUSB_RXCSR, val); 506 /* flush writebufer */ 507 val = musb_readw(regs, MUSB_RXCSR); 508 } 509 } 510 return n_bds; 511} 512 513 514/* Buffer enqueuing Logic: 515 * 516 * - RX builds new queues each time, to help handle routine "early 517 * termination" cases (faults, including errors and short reads) 518 * more correctly. 519 * 520 * - for now, TX reuses the same queue of BDs every time 521 * 522 * REVISIT long term, we want a normal dynamic model. 523 * ... the goal will be to append to the 524 * existing queue, processing completed "dma buffers" (segments) on the fly. 525 * 526 * Otherwise we force an IRQ latency between requests, which slows us a lot 527 * (especially in "transparent" dma). Unfortunately that model seems to be 528 * inherent in the DMA model from the Mentor code, except in the rare case 529 * of transfers big enough (~128+ KB) that we could append "middle" segments 530 * in the TX paths. (RX can't do this, see below.) 531 * 532 * That's true even in the CPPI- friendly iso case, where most urbs have 533 * several small segments provided in a group and where the "packet at a time" 534 * "transparent" DMA model is always correct, even on the RX side. 535 */ 536 537/* 538 * CPPI TX: 539 * ======== 540 * TX is a lot more reasonable than RX; it doesn't need to run in 541 * irq-per-packet mode very often. RNDIS mode seems to behave too 542 * (except how it handles the exactly-N-packets case). Building a 543 * txdma queue with multiple requests (urb or usb_request) looks 544 * like it would work ... but fault handling would need much testing. 545 * 546 * The main issue with TX mode RNDIS relates to transfer lengths that 547 * are an exact multiple of the packet length. It appears that there's 548 * a hiccup in that case (maybe the DMA completes before the ZLP gets 549 * written?) boiling down to not being able to rely on CPPI writing any 550 * terminating zero length packet before the next transfer is written. 551 * So that's punted to PIO; better yet, gadget drivers can avoid it. 552 * 553 * Plus, there's allegedly an undocumented constraint that rndis transfer 554 * length be a multiple of 64 bytes ... but the chip doesn't act that 555 * way, and we really don't _want_ that behavior anyway. 556 * 557 * On TX, "transparent" mode works ... although experiments have shown 558 * problems trying to use the SOP/EOP bits in different USB packets. 559 * 560 * REVISIT try to handle terminating zero length packets using CPPI 561 * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet 562 * links avoid that issue by forcing them to avoid zlps.) 563 */ 564static void 565cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) 566{ 567 unsigned maxpacket = tx->maxpacket; 568 dma_addr_t addr = tx->buf_dma + tx->offset; 569 size_t length = tx->buf_len - tx->offset; 570 struct cppi_descriptor *bd; 571 unsigned n_bds; 572 unsigned i; 573 struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram; 574 int rndis; 575 576 /* TX can use the CPPI "rndis" mode, where we can probably fit this 577 * transfer in one BD and one IRQ. The only time we would NOT want 578 * to use it is when hardware constraints prevent it, or if we'd 579 * trigger the "send a ZLP?" confusion. 580 */ 581 rndis = (maxpacket & 0x3f) == 0 582 && length < 0xffff 583 && (length % maxpacket) != 0; 584 585 if (rndis) { 586 maxpacket = length; 587 n_bds = 1; 588 } else { 589 n_bds = length / maxpacket; 590 if (!length || (length % maxpacket)) 591 n_bds++; 592 n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD); 593 length = min(n_bds * maxpacket, length); 594 } 595 596 DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", 597 tx->index, 598 maxpacket, 599 rndis ? "rndis" : "transparent", 600 n_bds, 601 addr, length); 602 603 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); 604 605 /* assuming here that channel_program is called during 606 * transfer initiation ... current code maintains state 607 * for one outstanding request only (no queues, not even 608 * the implicit ones of an iso urb). 609 */ 610 611 bd = tx->freelist; 612 tx->head = bd; 613 tx->last_processed = NULL; 614 615 /* FIXME use BD pool like RX side does, and just queue 616 * the minimum number for this request. 617 */ 618 619 /* Prepare queue of BDs first, then hand it to hardware. 620 * All BDs except maybe the last should be of full packet 621 * size; for RNDIS there _is_ only that last packet. 622 */ 623 for (i = 0; i < n_bds; ) { 624 if (++i < n_bds && bd->next) 625 bd->hw_next = bd->next->dma; 626 else 627 bd->hw_next = 0; 628 629 bd->hw_bufp = tx->buf_dma + tx->offset; 630 631 /* FIXME set EOP only on the last packet, 632 * SOP only on the first ... avoid IRQs 633 */ 634 if ((tx->offset + maxpacket) <= tx->buf_len) { 635 tx->offset += maxpacket; 636 bd->hw_off_len = maxpacket; 637 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET 638 | CPPI_OWN_SET | maxpacket; 639 } else { 640 /* only this one may be a partial USB Packet */ 641 u32 partial_len; 642 643 partial_len = tx->buf_len - tx->offset; 644 tx->offset = tx->buf_len; 645 bd->hw_off_len = partial_len; 646 647 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET 648 | CPPI_OWN_SET | partial_len; 649 if (partial_len == 0) 650 bd->hw_options |= CPPI_ZERO_SET; 651 } 652 653 DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n", 654 bd, bd->hw_next, bd->hw_bufp, 655 bd->hw_off_len, bd->hw_options); 656 657 /* update the last BD enqueued to the list */ 658 tx->tail = bd; 659 bd = bd->next; 660 } 661 662 /* BDs live in DMA-coherent memory, but writes might be pending */ 663 cpu_drain_writebuffer(); 664 665 /* Write to the HeadPtr in state RAM to trigger */ 666 musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma); 667 668 cppi_dump_tx(5, tx, "/S"); 669} 670 671/* 672 * CPPI RX Woes: 673 * ============= 674 * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte 675 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back. 676 * (Full speed transfers have similar scenarios.) 677 * 678 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes, 679 * and the next packet goes into a buffer that's queued later; while (b) fills 680 * the buffer with 1024 bytes. How to do that with CPPI? 681 * 682 * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but 683 * (b) loses **BADLY** because nothing (!) happens when that second packet 684 * fills the buffer, much less when a third one arrives. (Which makes this 685 * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination 686 * is optional, and it's fine if peripherals -- not hosts! -- pad messages 687 * out to end-of-buffer. Standard PCI host controller DMA descriptors 688 * implement that mode by default ... which is no accident.) 689 * 690 * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have 691 * converse problems: (b) is handled right, but (a) loses badly. CPPI RX 692 * ignores SOP/EOP markings and processes both of those BDs; so both packets 693 * are loaded into the buffer (with a 212 byte gap between them), and the next 694 * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP 695 * are intended as outputs for RX queues, not inputs...) 696 * 697 * - A variant of "transparent" mode -- one BD at a time -- is the only way to 698 * reliably make both cases work, with software handling both cases correctly 699 * and at the significant penalty of needing an IRQ per packet. (The lack of 700 * I/O overlap can be slightly ameliorated by enabling double buffering.) 701 * 702 * So how to get rid of IRQ-per-packet? The transparent multi-BD case could 703 * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK 704 * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors 705 * with guaranteed driver level fault recovery and scrubbing out what's left 706 * of that garbaged datastream. 707 * 708 * But there seems to be no way to identify the cases where CPPI RNDIS mode 709 * is appropriate -- which do NOT include RNDIS host drivers, but do include 710 * the CDC Ethernet driver! -- and the documentation is incomplete/wrong. 711 * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic 712 * that applies best on the peripheral side (and which could fail rudely). 713 * 714 * Leaving only "transparent" mode; we avoid multi-bd modes in almost all 715 * cases other than mass storage class. Otherwise we're correct but slow, 716 * since CPPI penalizes our need for a "true RNDIS" default mode. 717 */ 718 719 720/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY 721 * 722 * IFF 723 * (a) peripheral mode ... since rndis peripherals could pad their 724 * writes to hosts, causing i/o failure; or we'd have to cope with 725 * a largely unknowable variety of host side protocol variants 726 * (b) and short reads are NOT errors ... since full reads would 727 * cause those same i/o failures 728 * (c) and read length is 729 * - less than 64KB (max per cppi descriptor) 730 * - not a multiple of 4096 (g_zero default, full reads typical) 731 * - N (>1) packets long, ditto (full reads not EXPECTED) 732 * THEN 733 * try rx rndis mode 734 * 735 * Cost of heuristic failing: RXDMA wedges at the end of transfers that 736 * fill out the whole buffer. Buggy host side usb network drivers could 737 * trigger that, but "in the field" such bugs seem to be all but unknown. 738 * 739 * So this module parameter lets the heuristic be disabled. When using 740 * gadgetfs, the heuristic will probably need to be disabled. 741 */ 742static int cppi_rx_rndis = 1; 743 744module_param(cppi_rx_rndis, bool, 0); 745MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic"); 746 747 748/** 749 * cppi_next_rx_segment - dma read for the next chunk of a buffer 750 * @musb: the controller 751 * @rx: dma channel 752 * @onepacket: true unless caller treats short reads as errors, and 753 * performs fault recovery above usbcore. 754 * Context: controller irqlocked 755 * 756 * See above notes about why we can't use multi-BD RX queues except in 757 * rare cases (mass storage class), and can never use the hardware "rndis" 758 * mode (since it's not a "true" RNDIS mode) with complete safety.. 759 * 760 * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in 761 * code to recover from corrupted datastreams after each short transfer. 762 */ 763static void 764cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) 765{ 766 unsigned maxpacket = rx->maxpacket; 767 dma_addr_t addr = rx->buf_dma + rx->offset; 768 size_t length = rx->buf_len - rx->offset; 769 struct cppi_descriptor *bd, *tail; 770 unsigned n_bds; 771 unsigned i; 772 void __iomem *tibase = musb->ctrl_base; 773 int is_rndis = 0; 774 struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram; 775 776 if (onepacket) { 777 /* almost every USB driver, host or peripheral side */ 778 n_bds = 1; 779 780 /* maybe apply the heuristic above */ 781 if (cppi_rx_rndis 782 && is_peripheral_active(musb) 783 && length > maxpacket 784 && (length & ~0xffff) == 0 785 && (length & 0x0fff) != 0 786 && (length & (maxpacket - 1)) == 0) { 787 maxpacket = length; 788 is_rndis = 1; 789 } 790 } else { 791 /* virtually nothing except mass storage class */ 792 if (length > 0xffff) { 793 n_bds = 0xffff / maxpacket; 794 length = n_bds * maxpacket; 795 } else { 796 n_bds = length / maxpacket; 797 if (length % maxpacket) 798 n_bds++; 799 } 800 if (n_bds == 1) 801 onepacket = 1; 802 else 803 n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD); 804 } 805 806 /* In host mode, autorequest logic can generate some IN tokens; it's 807 * tricky since we can't leave REQPKT set in RXCSR after the transfer 808 * finishes. So: multipacket transfers involve two or more segments. 809 * And always at least two IRQs ... RNDIS mode is not an option. 810 */ 811 if (is_host_active(musb)) 812 n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds); 813 814 cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis); 815 816 length = min(n_bds * maxpacket, length); 817 818 DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " 819 "dma 0x%x len %u %u/%u\n", 820 rx->index, maxpacket, 821 onepacket 822 ? (is_rndis ? "rndis" : "onepacket") 823 : "multipacket", 824 n_bds, 825 musb_readl(tibase, 826 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 827 & 0xffff, 828 addr, length, rx->channel.actual_len, rx->buf_len); 829 830 /* only queue one segment at a time, since the hardware prevents 831 * correct queue shutdown after unexpected short packets 832 */ 833 bd = cppi_bd_alloc(rx); 834 rx->head = bd; 835 836 /* Build BDs for all packets in this segment */ 837 for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { 838 u32 bd_len; 839 840 if (i) { 841 bd = cppi_bd_alloc(rx); 842 if (!bd) 843 break; 844 tail->next = bd; 845 tail->hw_next = bd->dma; 846 } 847 bd->hw_next = 0; 848 849 /* all but the last packet will be maxpacket size */ 850 if (maxpacket < length) 851 bd_len = maxpacket; 852 else 853 bd_len = length; 854 855 bd->hw_bufp = addr; 856 addr += bd_len; 857 rx->offset += bd_len; 858 859 bd->hw_off_len = (0 /*offset*/ << 16) + bd_len; 860 bd->buflen = bd_len; 861 862 bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0); 863 length -= bd_len; 864 } 865 866 /* we always expect at least one reusable BD! */ 867 if (!tail) { 868 WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds); 869 return; 870 } else if (i < n_bds) 871 WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds); 872 873 tail->next = NULL; 874 tail->hw_next = 0; 875 876 bd = rx->head; 877 rx->tail = tail; 878 879 /* short reads and other faults should terminate this entire 880 * dma segment. we want one "dma packet" per dma segment, not 881 * one per USB packet, terminating the whole queue at once... 882 * NOTE that current hardware seems to ignore SOP and EOP. 883 */ 884 bd->hw_options |= CPPI_SOP_SET; 885 tail->hw_options |= CPPI_EOP_SET; 886 887#ifdef CONFIG_USB_MUSB_DEBUG 888 if (_dbg_level(5)) { 889 struct cppi_descriptor *d; 890 891 for (d = rx->head; d; d = d->next) 892 cppi_dump_rxbd("S", d); 893 } 894#endif 895 896 /* in case the preceding transfer left some state... */ 897 tail = rx->last_processed; 898 if (tail) { 899 tail->next = bd; 900 tail->hw_next = bd->dma; 901 } 902 903 core_rxirq_enable(tibase, rx->index + 1); 904 905 /* BDs live in DMA-coherent memory, but writes might be pending */ 906 cpu_drain_writebuffer(); 907 908 /* REVISIT specs say to write this AFTER the BUFCNT register 909 * below ... but that loses badly. 910 */ 911 musb_writel(&rx_ram->rx_head, 0, bd->dma); 912 913 /* bufferCount must be at least 3, and zeroes on completion 914 * unless it underflows below zero, or stops at two, or keeps 915 * growing ... grr. 916 */ 917 i = musb_readl(tibase, 918 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 919 & 0xffff; 920 921 if (!i) 922 musb_writel(tibase, 923 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), 924 n_bds + 2); 925 else if (n_bds > (i - 3)) 926 musb_writel(tibase, 927 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), 928 n_bds - (i - 3)); 929 930 i = musb_readl(tibase, 931 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 932 & 0xffff; 933 if (i < (2 + n_bds)) { 934 DBG(2, "bufcnt%d underrun - %d (for %d)\n", 935 rx->index, i, n_bds); 936 musb_writel(tibase, 937 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), 938 n_bds + 2); 939 } 940 941 cppi_dump_rx(4, rx, "/S"); 942} 943 944/** 945 * cppi_channel_program - program channel for data transfer 946 * @ch: the channel 947 * @maxpacket: max packet size 948 * @mode: For RX, 1 unless the usb protocol driver promised to treat 949 * all short reads as errors and kick in high level fault recovery. 950 * For TX, ignored because of RNDIS mode races/glitches. 951 * @dma_addr: dma address of buffer 952 * @len: length of buffer 953 * Context: controller irqlocked 954 */ 955static int cppi_channel_program(struct dma_channel *ch, 956 u16 maxpacket, u8 mode, 957 dma_addr_t dma_addr, u32 len) 958{ 959 struct cppi_channel *cppi_ch; 960 struct cppi *controller; 961 struct musb *musb; 962 963 cppi_ch = container_of(ch, struct cppi_channel, channel); 964 controller = cppi_ch->controller; 965 musb = controller->musb; 966 967 switch (ch->status) { 968 case MUSB_DMA_STATUS_BUS_ABORT: 969 case MUSB_DMA_STATUS_CORE_ABORT: 970 /* fault irq handler should have handled cleanup */ 971 WARNING("%cX DMA%d not cleaned up after abort!\n", 972 cppi_ch->transmit ? 'T' : 'R', 973 cppi_ch->index); 974 /* WARN_ON(1); */ 975 break; 976 case MUSB_DMA_STATUS_BUSY: 977 WARNING("program active channel? %cX DMA%d\n", 978 cppi_ch->transmit ? 'T' : 'R', 979 cppi_ch->index); 980 /* WARN_ON(1); */ 981 break; 982 case MUSB_DMA_STATUS_UNKNOWN: 983 DBG(1, "%cX DMA%d not allocated!\n", 984 cppi_ch->transmit ? 'T' : 'R', 985 cppi_ch->index); 986 /* FALLTHROUGH */ 987 case MUSB_DMA_STATUS_FREE: 988 break; 989 } 990 991 ch->status = MUSB_DMA_STATUS_BUSY; 992 993 /* set transfer parameters, then queue up its first segment */ 994 cppi_ch->buf_dma = dma_addr; 995 cppi_ch->offset = 0; 996 cppi_ch->maxpacket = maxpacket; 997 cppi_ch->buf_len = len; 998 999 /* TX channel? or RX? */ 1000 if (cppi_ch->transmit) 1001 cppi_next_tx_segment(musb, cppi_ch); 1002 else 1003 cppi_next_rx_segment(musb, cppi_ch, mode); 1004 1005 return true; 1006} 1007 1008static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) 1009{ 1010 struct cppi_channel *rx = &cppi->rx[ch]; 1011 struct cppi_rx_stateram __iomem *state = rx->state_ram; 1012 struct cppi_descriptor *bd; 1013 struct cppi_descriptor *last = rx->last_processed; 1014 bool completed = false; 1015 bool acked = false; 1016 int i; 1017 dma_addr_t safe2ack; 1018 void __iomem *regs = rx->hw_ep->regs; 1019 1020 cppi_dump_rx(6, rx, "/K"); 1021 1022 bd = last ? last->next : rx->head; 1023 if (!bd) 1024 return false; 1025 1026 /* run through all completed BDs */ 1027 for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0); 1028 (safe2ack || completed) && bd && i < NUM_RXCHAN_BD; 1029 i++, bd = bd->next) { 1030 u16 len; 1031 1032 /* catch latest BD writes from CPPI */ 1033 rmb(); 1034 if (!completed && (bd->hw_options & CPPI_OWN_SET)) 1035 break; 1036 1037 DBG(5, "C/RXBD %08x: nxt %08x buf %08x " 1038 "off.len %08x opt.len %08x (%d)\n", 1039 bd->dma, bd->hw_next, bd->hw_bufp, 1040 bd->hw_off_len, bd->hw_options, 1041 rx->channel.actual_len); 1042 1043 /* actual packet received length */ 1044 if ((bd->hw_options & CPPI_SOP_SET) && !completed) 1045 len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK; 1046 else 1047 len = 0; 1048 1049 if (bd->hw_options & CPPI_EOQ_MASK) 1050 completed = true; 1051 1052 if (!completed && len < bd->buflen) { 1053 /* NOTE: when we get a short packet, RXCSR_H_REQPKT 1054 * must have been cleared, and no more DMA packets may 1055 * active be in the queue... TI docs didn't say, but 1056 * CPPI ignores those BDs even though OWN is still set. 1057 */ 1058 completed = true; 1059 DBG(3, "rx short %d/%d (%d)\n", 1060 len, bd->buflen, 1061 rx->channel.actual_len); 1062 } 1063 1064 /* If we got here, we expect to ack at least one BD; meanwhile 1065 * CPPI may completing other BDs while we scan this list... 1066 * 1067 * RACE: we can notice OWN cleared before CPPI raises the 1068 * matching irq by writing that BD as the completion pointer. 1069 * In such cases, stop scanning and wait for the irq, avoiding 1070 * lost acks and states where BD ownership is unclear. 1071 */ 1072 if (bd->dma == safe2ack) { 1073 musb_writel(&state->rx_complete, 0, safe2ack); 1074 safe2ack = musb_readl(&state->rx_complete, 0); 1075 acked = true; 1076 if (bd->dma == safe2ack) 1077 safe2ack = 0; 1078 } 1079 1080 rx->channel.actual_len += len; 1081 1082 cppi_bd_free(rx, last); 1083 last = bd; 1084 1085 /* stop scanning on end-of-segment */ 1086 if (bd->hw_next == 0) 1087 completed = true; 1088 } 1089 rx->last_processed = last; 1090 1091 /* dma abort, lost ack, or ... */ 1092 if (!acked && last) { 1093 int csr; 1094 1095 if (safe2ack == 0 || safe2ack == rx->last_processed->dma) 1096 musb_writel(&state->rx_complete, 0, safe2ack); 1097 if (safe2ack == 0) { 1098 cppi_bd_free(rx, last); 1099 rx->last_processed = NULL; 1100 1101 /* if we land here on the host side, H_REQPKT will 1102 * be clear and we need to restart the queue... 1103 */ 1104 WARN_ON(rx->head); 1105 } 1106 musb_ep_select(cppi->mregs, rx->index + 1); 1107 csr = musb_readw(regs, MUSB_RXCSR); 1108 if (csr & MUSB_RXCSR_DMAENAB) { 1109 DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", 1110 rx->index, 1111 rx->head, rx->tail, 1112 rx->last_processed 1113 ? rx->last_processed->dma 1114 : 0, 1115 completed ? ", completed" : "", 1116 csr); 1117 cppi_dump_rxq(4, "/what?", rx); 1118 } 1119 } 1120 if (!completed) { 1121 int csr; 1122 1123 rx->head = bd; 1124 1125 /* REVISIT seems like "autoreq all but EOP" doesn't... 1126 * setting it here "should" be racey, but seems to work 1127 */ 1128 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); 1129 if (is_host_active(cppi->musb) 1130 && bd 1131 && !(csr & MUSB_RXCSR_H_REQPKT)) { 1132 csr |= MUSB_RXCSR_H_REQPKT; 1133 musb_writew(regs, MUSB_RXCSR, 1134 MUSB_RXCSR_H_WZC_BITS | csr); 1135 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); 1136 } 1137 } else { 1138 rx->head = NULL; 1139 rx->tail = NULL; 1140 } 1141 1142 cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned"); 1143 return completed; 1144} 1145 1146void cppi_completion(struct musb *musb, u32 rx, u32 tx) 1147{ 1148 void __iomem *tibase; 1149 int i, index; 1150 struct cppi *cppi; 1151 struct musb_hw_ep *hw_ep = NULL; 1152 1153 cppi = container_of(musb->dma_controller, struct cppi, controller); 1154 1155 tibase = musb->ctrl_base; 1156 1157 /* process TX channels */ 1158 for (index = 0; tx; tx = tx >> 1, index++) { 1159 struct cppi_channel *tx_ch; 1160 struct cppi_tx_stateram __iomem *tx_ram; 1161 bool completed = false; 1162 struct cppi_descriptor *bd; 1163 1164 if (!(tx & 1)) 1165 continue; 1166 1167 tx_ch = cppi->tx + index; 1168 tx_ram = tx_ch->state_ram; 1169 1170 /* FIXME need a cppi_tx_scan() routine, which 1171 * can also be called from abort code 1172 */ 1173 1174 cppi_dump_tx(5, tx_ch, "/E"); 1175 1176 bd = tx_ch->head; 1177 1178 if (NULL == bd) { 1179 DBG(1, "null BD\n"); 1180 continue; 1181 } 1182 1183 /* run through all completed BDs */ 1184 for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; 1185 i++, bd = bd->next) { 1186 u16 len; 1187 1188 /* catch latest BD writes from CPPI */ 1189 rmb(); 1190 if (bd->hw_options & CPPI_OWN_SET) 1191 break; 1192 1193 DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n", 1194 bd, bd->hw_next, bd->hw_bufp, 1195 bd->hw_off_len, bd->hw_options); 1196 1197 len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; 1198 tx_ch->channel.actual_len += len; 1199 1200 tx_ch->last_processed = bd; 1201 1202 /* write completion register to acknowledge 1203 * processing of completed BDs, and possibly 1204 * release the IRQ; EOQ might not be set ... 1205 * 1206 * REVISIT use the same ack strategy as rx 1207 * 1208 * REVISIT have observed bit 18 set; huh?? 1209 */ 1210 /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ 1211 musb_writel(&tx_ram->tx_complete, 0, bd->dma); 1212 1213 /* stop scanning on end-of-segment */ 1214 if (bd->hw_next == 0) 1215 completed = true; 1216 } 1217 1218 /* on end of segment, maybe go to next one */ 1219 if (completed) { 1220 /* cppi_dump_tx(4, tx_ch, "/complete"); */ 1221 1222 /* transfer more, or report completion */ 1223 if (tx_ch->offset >= tx_ch->buf_len) { 1224 tx_ch->head = NULL; 1225 tx_ch->tail = NULL; 1226 tx_ch->channel.status = MUSB_DMA_STATUS_FREE; 1227 1228 hw_ep = tx_ch->hw_ep; 1229 1230 /* Peripheral role never repurposes the 1231 * endpoint, so immediate completion is 1232 * safe. Host role waits for the fifo 1233 * to empty (TXPKTRDY irq) before going 1234 * to the next queued bulk transfer. 1235 */ 1236 if (is_host_active(cppi->musb)) { 1237#if 0 1238 /* WORKAROUND because we may 1239 * not always get TXKPTRDY ... 1240 */ 1241 int csr; 1242 1243 csr = musb_readw(hw_ep->regs, 1244 MUSB_TXCSR); 1245 if (csr & MUSB_TXCSR_TXPKTRDY) 1246#endif 1247 completed = false; 1248 } 1249 if (completed) 1250 musb_dma_completion(musb, index + 1, 1); 1251 1252 } else { 1253 /* Bigger transfer than we could fit in 1254 * that first batch of descriptors... 1255 */ 1256 cppi_next_tx_segment(musb, tx_ch); 1257 } 1258 } else 1259 tx_ch->head = bd; 1260 } 1261 1262 /* Start processing the RX block */ 1263 for (index = 0; rx; rx = rx >> 1, index++) { 1264 1265 if (rx & 1) { 1266 struct cppi_channel *rx_ch; 1267 1268 rx_ch = cppi->rx + index; 1269 1270 /* let incomplete dma segments finish */ 1271 if (!cppi_rx_scan(cppi, index)) 1272 continue; 1273 1274 /* start another dma segment if needed */ 1275 if (rx_ch->channel.actual_len != rx_ch->buf_len 1276 && rx_ch->channel.actual_len 1277 == rx_ch->offset) { 1278 cppi_next_rx_segment(musb, rx_ch, 1); 1279 continue; 1280 } 1281 1282 /* all segments completed! */ 1283 rx_ch->channel.status = MUSB_DMA_STATUS_FREE; 1284 1285 hw_ep = rx_ch->hw_ep; 1286 1287 core_rxirq_disable(tibase, index + 1); 1288 musb_dma_completion(musb, index + 1, 0); 1289 } 1290 } 1291 1292 /* write to CPPI EOI register to re-enable interrupts */ 1293 musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); 1294} 1295 1296/* Instantiate a software object representing a DMA controller. */ 1297struct dma_controller *__init 1298dma_controller_create(struct musb *musb, void __iomem *mregs) 1299{ 1300 struct cppi *controller; 1301 1302 controller = kzalloc(sizeof *controller, GFP_KERNEL); 1303 if (!controller) 1304 return NULL; 1305 1306 controller->mregs = mregs; 1307 controller->tibase = mregs - DAVINCI_BASE_OFFSET; 1308 1309 controller->musb = musb; 1310 controller->controller.start = cppi_controller_start; 1311 controller->controller.stop = cppi_controller_stop; 1312 controller->controller.channel_alloc = cppi_channel_allocate; 1313 controller->controller.channel_release = cppi_channel_release; 1314 controller->controller.channel_program = cppi_channel_program; 1315 controller->controller.channel_abort = cppi_channel_abort; 1316 1317 /* NOTE: allocating from on-chip SRAM would give the least 1318 * contention for memory access, if that ever matters here. 1319 */ 1320 1321 /* setup BufferPool */ 1322 controller->pool = dma_pool_create("cppi", 1323 controller->musb->controller, 1324 sizeof(struct cppi_descriptor), 1325 CPPI_DESCRIPTOR_ALIGN, 0); 1326 if (!controller->pool) { 1327 kfree(controller); 1328 return NULL; 1329 } 1330 1331 return &controller->controller; 1332} 1333 1334/* 1335 * Destroy a previously-instantiated DMA controller. 1336 */ 1337void dma_controller_destroy(struct dma_controller *c) 1338{ 1339 struct cppi *cppi; 1340 1341 cppi = container_of(c, struct cppi, controller); 1342 1343 /* assert: caller stopped the controller first */ 1344 dma_pool_destroy(cppi->pool); 1345 1346 kfree(cppi); 1347} 1348 1349/* 1350 * Context: controller irqlocked, endpoint selected 1351 */ 1352static int cppi_channel_abort(struct dma_channel *channel) 1353{ 1354 struct cppi_channel *cppi_ch; 1355 struct cppi *controller; 1356 void __iomem *mbase; 1357 void __iomem *tibase; 1358 void __iomem *regs; 1359 u32 value; 1360 struct cppi_descriptor *queue; 1361 1362 cppi_ch = container_of(channel, struct cppi_channel, channel); 1363 1364 controller = cppi_ch->controller; 1365 1366 switch (channel->status) { 1367 case MUSB_DMA_STATUS_BUS_ABORT: 1368 case MUSB_DMA_STATUS_CORE_ABORT: 1369 /* from RX or TX fault irq handler */ 1370 case MUSB_DMA_STATUS_BUSY: 1371 /* the hardware needs shutting down */ 1372 regs = cppi_ch->hw_ep->regs; 1373 break; 1374 case MUSB_DMA_STATUS_UNKNOWN: 1375 case MUSB_DMA_STATUS_FREE: 1376 return 0; 1377 default: 1378 return -EINVAL; 1379 } 1380 1381 if (!cppi_ch->transmit && cppi_ch->head) 1382 cppi_dump_rxq(3, "/abort", cppi_ch); 1383 1384 mbase = controller->mregs; 1385 tibase = controller->tibase; 1386 1387 queue = cppi_ch->head; 1388 cppi_ch->head = NULL; 1389 cppi_ch->tail = NULL; 1390 1391 /* REVISIT should rely on caller having done this, 1392 * and caller should rely on us not changing it. 1393 * peripheral code is safe ... check host too. 1394 */ 1395 musb_ep_select(mbase, cppi_ch->index + 1); 1396 1397 if (cppi_ch->transmit) { 1398 struct cppi_tx_stateram __iomem *tx_ram; 1399 int enabled; 1400 1401 /* mask interrupts raised to signal teardown complete. */ 1402 enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG) 1403 & (1 << cppi_ch->index); 1404 if (enabled) 1405 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, 1406 (1 << cppi_ch->index)); 1407 1408 /* REVISIT put timeouts on these controller handshakes */ 1409 1410 cppi_dump_tx(6, cppi_ch, " (teardown)"); 1411 1412 /* teardown DMA engine then usb core */ 1413 do { 1414 value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); 1415 } while (!(value & CPPI_TEAR_READY)); 1416 musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index); 1417 1418 tx_ram = cppi_ch->state_ram; 1419 do { 1420 value = musb_readl(&tx_ram->tx_complete, 0); 1421 } while (0xFFFFFFFC != value); 1422 musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC); 1423 1424 /* FIXME clean up the transfer state ... here? 1425 * the completion routine should get called with 1426 * an appropriate status code. 1427 */ 1428 1429 value = musb_readw(regs, MUSB_TXCSR); 1430 value &= ~MUSB_TXCSR_DMAENAB; 1431 value |= MUSB_TXCSR_FLUSHFIFO; 1432 musb_writew(regs, MUSB_TXCSR, value); 1433 musb_writew(regs, MUSB_TXCSR, value); 1434 1435 /* re-enable interrupt */ 1436 if (enabled) 1437 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, 1438 (1 << cppi_ch->index)); 1439 1440 /* While we scrub the TX state RAM, ensure that we clean 1441 * up any interrupt that's currently asserted: 1442 * 1. Write to completion Ptr value 0x1(bit 0 set) 1443 * (write back mode) 1444 * 2. Write to completion Ptr value 0x0(bit 0 cleared) 1445 * (compare mode) 1446 * Value written is compared(for bits 31:2) and when 1447 * equal, interrupt is deasserted. 1448 */ 1449 cppi_reset_tx(tx_ram, 1); 1450 musb_writel(&tx_ram->tx_complete, 0, 0); 1451 1452 cppi_dump_tx(5, cppi_ch, " (done teardown)"); 1453 1454 /* REVISIT tx side _should_ clean up the same way 1455 * as the RX side ... this does no cleanup at all! 1456 */ 1457 1458 } else /* RX */ { 1459 u16 csr; 1460 1461 /* NOTE: docs don't guarantee any of this works ... we 1462 * expect that if the usb core stops telling the cppi core 1463 * to pull more data from it, then it'll be safe to flush 1464 * current RX DMA state iff any pending fifo transfer is done. 1465 */ 1466 1467 core_rxirq_disable(tibase, cppi_ch->index + 1); 1468 1469 /* for host, ensure ReqPkt is never set again */ 1470 if (is_host_active(cppi_ch->controller->musb)) { 1471 value = musb_readl(tibase, DAVINCI_AUTOREQ_REG); 1472 value &= ~((0x3) << (cppi_ch->index * 2)); 1473 musb_writel(tibase, DAVINCI_AUTOREQ_REG, value); 1474 } 1475 1476 csr = musb_readw(regs, MUSB_RXCSR); 1477 1478 /* for host, clear (just) ReqPkt at end of current packet(s) */ 1479 if (is_host_active(cppi_ch->controller->musb)) { 1480 csr |= MUSB_RXCSR_H_WZC_BITS; 1481 csr &= ~MUSB_RXCSR_H_REQPKT; 1482 } else 1483 csr |= MUSB_RXCSR_P_WZC_BITS; 1484 1485 /* clear dma enable */ 1486 csr &= ~(MUSB_RXCSR_DMAENAB); 1487 musb_writew(regs, MUSB_RXCSR, csr); 1488 csr = musb_readw(regs, MUSB_RXCSR); 1489 1490 /* Quiesce: wait for current dma to finish (if not cleanup). 1491 * We can't use bit zero of stateram->rx_sop, since that 1492 * refers to an entire "DMA packet" not just emptying the 1493 * current fifo. Most segments need multiple usb packets. 1494 */ 1495 if (channel->status == MUSB_DMA_STATUS_BUSY) 1496 udelay(50); 1497 1498 /* scan the current list, reporting any data that was 1499 * transferred and acking any IRQ 1500 */ 1501 cppi_rx_scan(controller, cppi_ch->index); 1502 1503 /* clobber the existing state once it's idle 1504 * 1505 * NOTE: arguably, we should also wait for all the other 1506 * RX channels to quiesce (how??) and then temporarily 1507 * disable RXCPPI_CTRL_REG ... but it seems that we can 1508 * rely on the controller restarting from state ram, with 1509 * only RXCPPI_BUFCNT state being bogus. BUFCNT will 1510 * correct itself after the next DMA transfer though. 1511 * 1512 * REVISIT does using rndis mode change that? 1513 */ 1514 cppi_reset_rx(cppi_ch->state_ram); 1515 1516 /* next DMA request _should_ load cppi head ptr */ 1517 1518 /* ... we don't "free" that list, only mutate it in place. */ 1519 cppi_dump_rx(5, cppi_ch, " (done abort)"); 1520 1521 /* clean up previously pending bds */ 1522 cppi_bd_free(cppi_ch, cppi_ch->last_processed); 1523 cppi_ch->last_processed = NULL; 1524 1525 while (queue) { 1526 struct cppi_descriptor *tmp = queue->next; 1527 1528 cppi_bd_free(cppi_ch, queue); 1529 queue = tmp; 1530 } 1531 } 1532 1533 channel->status = MUSB_DMA_STATUS_FREE; 1534 cppi_ch->buf_dma = 0; 1535 cppi_ch->offset = 0; 1536 cppi_ch->buf_len = 0; 1537 cppi_ch->maxpacket = 0; 1538 return 0; 1539} 1540 1541/* TBD Queries: 1542 * 1543 * Power Management ... probably turn off cppi during suspend, restart; 1544 * check state ram? Clocking is presumably shared with usb core. 1545 */ 1546