intel_mid_dma.c revision 03b96dca010145f3896abcd443b7fddb9813a0e6
1/* 2 * intel_mid_dma.c - Intel Langwell DMA Drivers 3 * 4 * Copyright (C) 2008-10 Intel Corp 5 * Author: Vinod Koul <vinod.koul@intel.com> 6 * The driver design is based on dw_dmac driver 7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, write to the Free Software Foundation, Inc., 20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * 24 * 25 */ 26#include <linux/pci.h> 27#include <linux/interrupt.h> 28#include <linux/pm_runtime.h> 29#include <linux/intel_mid_dma.h> 30 31#define MAX_CHAN 4 /*max ch across controllers*/ 32#include "intel_mid_dma_regs.h" 33 34#define INTEL_MID_DMAC1_ID 0x0814 35#define INTEL_MID_DMAC2_ID 0x0813 36#define INTEL_MID_GP_DMAC2_ID 0x0827 37#define INTEL_MFLD_DMAC1_ID 0x0830 38#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 39#define LNW_PERIPHRAL_MASK_SIZE 0x10 40#define LNW_PERIPHRAL_STATUS 0x0 41#define LNW_PERIPHRAL_MASK 0x8 42 43struct intel_mid_dma_probe_info { 44 u8 max_chan; 45 u8 ch_base; 46 u16 block_size; 47 u32 pimr_mask; 48}; 49 50#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ 51 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ 52 .max_chan = (_max_chan), \ 53 .ch_base = (_ch_base), \ 54 .block_size = (_block_size), \ 55 .pimr_mask = (_pimr_mask), \ 56 }) 57 58/***************************************************************************** 59Utility Functions*/ 60/** 61 * get_ch_index - convert status to channel 62 * @status: status mask 63 * @base: dma ch base value 64 * 65 * Modify the status mask and return the channel index needing 66 * attention (or -1 if neither) 67 */ 68static int get_ch_index(int *status, unsigned int base) 69{ 70 int i; 71 for (i = 0; i < MAX_CHAN; i++) { 72 if (*status & (1 << (i + base))) { 73 *status = *status & ~(1 << (i + base)); 74 pr_debug("MDMA: index %d New status %x\n", i, *status); 75 return i; 76 } 77 } 78 return -1; 79} 80 81/** 82 * get_block_ts - calculates dma transaction length 83 * @len: dma transfer length 84 * @tx_width: dma transfer src width 85 * @block_size: dma controller max block size 86 * 87 * Based on src width calculate the DMA trsaction length in data items 88 * return data items or FFFF if exceeds max length for block 89 */ 90static int get_block_ts(int len, int tx_width, int block_size) 91{ 92 int byte_width = 0, block_ts = 0; 93 94 switch (tx_width) { 95 case LNW_DMA_WIDTH_8BIT: 96 byte_width = 1; 97 break; 98 case LNW_DMA_WIDTH_16BIT: 99 byte_width = 2; 100 break; 101 case LNW_DMA_WIDTH_32BIT: 102 default: 103 byte_width = 4; 104 break; 105 } 106 107 block_ts = len/byte_width; 108 if (block_ts > block_size) 109 block_ts = 0xFFFF; 110 return block_ts; 111} 112 113/***************************************************************************** 114DMAC1 interrupt Functions*/ 115 116/** 117 * dmac1_mask_periphral_intr - mask the periphral interrupt 118 * @midc: dma channel for which masking is required 119 * 120 * Masks the DMA periphral interrupt 121 * this is valid for DMAC1 family controllers only 122 * This controller should have periphral mask registers already mapped 123 */ 124static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc) 125{ 126 u32 pimr; 127 struct middma_device *mid = to_middma_device(midc->chan.device); 128 129 if (mid->pimr_mask) { 130 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 131 pimr |= mid->pimr_mask; 132 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 133 } 134 return; 135} 136 137/** 138 * dmac1_unmask_periphral_intr - unmask the periphral interrupt 139 * @midc: dma channel for which masking is required 140 * 141 * UnMasks the DMA periphral interrupt, 142 * this is valid for DMAC1 family controllers only 143 * This controller should have periphral mask registers already mapped 144 */ 145static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) 146{ 147 u32 pimr; 148 struct middma_device *mid = to_middma_device(midc->chan.device); 149 150 if (mid->pimr_mask) { 151 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 152 pimr &= ~mid->pimr_mask; 153 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 154 } 155 return; 156} 157 158/** 159 * enable_dma_interrupt - enable the periphral interrupt 160 * @midc: dma channel for which enable interrupt is required 161 * 162 * Enable the DMA periphral interrupt, 163 * this is valid for DMAC1 family controllers only 164 * This controller should have periphral mask registers already mapped 165 */ 166static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) 167{ 168 dmac1_unmask_periphral_intr(midc); 169 170 /*en ch interrupts*/ 171 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 172 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 173 return; 174} 175 176/** 177 * disable_dma_interrupt - disable the periphral interrupt 178 * @midc: dma channel for which disable interrupt is required 179 * 180 * Disable the DMA periphral interrupt, 181 * this is valid for DMAC1 family controllers only 182 * This controller should have periphral mask registers already mapped 183 */ 184static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) 185{ 186 /*Check LPE PISR, make sure fwd is disabled*/ 187 dmac1_mask_periphral_intr(midc); 188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); 189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 191 return; 192} 193 194/***************************************************************************** 195DMA channel helper Functions*/ 196/** 197 * mid_desc_get - get a descriptor 198 * @midc: dma channel for which descriptor is required 199 * 200 * Obtain a descriptor for the channel. Returns NULL if none are free. 201 * Once the descriptor is returned it is private until put on another 202 * list or freed 203 */ 204static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) 205{ 206 struct intel_mid_dma_desc *desc, *_desc; 207 struct intel_mid_dma_desc *ret = NULL; 208 209 spin_lock_bh(&midc->lock); 210 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 211 if (async_tx_test_ack(&desc->txd)) { 212 list_del(&desc->desc_node); 213 ret = desc; 214 break; 215 } 216 } 217 spin_unlock_bh(&midc->lock); 218 return ret; 219} 220 221/** 222 * mid_desc_put - put a descriptor 223 * @midc: dma channel for which descriptor is required 224 * @desc: descriptor to put 225 * 226 * Return a descriptor from lwn_desc_get back to the free pool 227 */ 228static void midc_desc_put(struct intel_mid_dma_chan *midc, 229 struct intel_mid_dma_desc *desc) 230{ 231 if (desc) { 232 spin_lock_bh(&midc->lock); 233 list_add_tail(&desc->desc_node, &midc->free_list); 234 spin_unlock_bh(&midc->lock); 235 } 236} 237/** 238 * midc_dostart - begin a DMA transaction 239 * @midc: channel for which txn is to be started 240 * @first: first descriptor of series 241 * 242 * Load a transaction into the engine. This must be called with midc->lock 243 * held and bh disabled. 244 */ 245static void midc_dostart(struct intel_mid_dma_chan *midc, 246 struct intel_mid_dma_desc *first) 247{ 248 struct middma_device *mid = to_middma_device(midc->chan.device); 249 250 /* channel is idle */ 251 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { 252 /*error*/ 253 pr_err("ERR_MDMA: channel is busy in start\n"); 254 /* The tasklet will hopefully advance the queue... */ 255 return; 256 } 257 midc->busy = true; 258 /*write registers and en*/ 259 iowrite32(first->sar, midc->ch_regs + SAR); 260 iowrite32(first->dar, midc->ch_regs + DAR); 261 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); 262 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); 263 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); 264 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); 265 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", 266 (int)first->sar, (int)first->dar, first->cfg_hi, 267 first->cfg_lo, first->ctl_hi, first->ctl_lo); 268 269 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 270 first->status = DMA_IN_PROGRESS; 271} 272 273/** 274 * midc_descriptor_complete - process completed descriptor 275 * @midc: channel owning the descriptor 276 * @desc: the descriptor itself 277 * 278 * Process a completed descriptor and perform any callbacks upon 279 * the completion. The completion handling drops the lock during the 280 * callbacks but must be called with the lock held. 281 */ 282static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, 283 struct intel_mid_dma_desc *desc) 284{ 285 struct dma_async_tx_descriptor *txd = &desc->txd; 286 dma_async_tx_callback callback_txd = NULL; 287 void *param_txd = NULL; 288 289 midc->completed = txd->cookie; 290 callback_txd = txd->callback; 291 param_txd = txd->callback_param; 292 293 list_move(&desc->desc_node, &midc->free_list); 294 midc->busy = false; 295 spin_unlock_bh(&midc->lock); 296 if (callback_txd) { 297 pr_debug("MDMA: TXD callback set ... calling\n"); 298 callback_txd(param_txd); 299 spin_lock_bh(&midc->lock); 300 return; 301 } 302 spin_lock_bh(&midc->lock); 303 304} 305/** 306 * midc_scan_descriptors - check the descriptors in channel 307 * mark completed when tx is completete 308 * @mid: device 309 * @midc: channel to scan 310 * 311 * Walk the descriptor chain for the device and process any entries 312 * that are complete. 313 */ 314static void midc_scan_descriptors(struct middma_device *mid, 315 struct intel_mid_dma_chan *midc) 316{ 317 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; 318 319 /*tx is complete*/ 320 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 321 if (desc->status == DMA_IN_PROGRESS) { 322 desc->status = DMA_SUCCESS; 323 midc_descriptor_complete(midc, desc); 324 } 325 } 326 return; 327} 328 329/***************************************************************************** 330DMA engine callback Functions*/ 331/** 332 * intel_mid_dma_tx_submit - callback to submit DMA transaction 333 * @tx: dma engine descriptor 334 * 335 * Submit the DMA trasaction for this descriptor, start if ch idle 336 */ 337static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) 338{ 339 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); 340 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); 341 dma_cookie_t cookie; 342 343 spin_lock_bh(&midc->lock); 344 cookie = midc->chan.cookie; 345 346 if (++cookie < 0) 347 cookie = 1; 348 349 midc->chan.cookie = cookie; 350 desc->txd.cookie = cookie; 351 352 353 if (list_empty(&midc->active_list)) { 354 midc_dostart(midc, desc); 355 list_add_tail(&desc->desc_node, &midc->active_list); 356 } else { 357 list_add_tail(&desc->desc_node, &midc->queue); 358 } 359 spin_unlock_bh(&midc->lock); 360 361 return cookie; 362} 363 364/** 365 * intel_mid_dma_issue_pending - callback to issue pending txn 366 * @chan: chan where pending trascation needs to be checked and submitted 367 * 368 * Call for scan to issue pending descriptors 369 */ 370static void intel_mid_dma_issue_pending(struct dma_chan *chan) 371{ 372 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 373 374 spin_lock_bh(&midc->lock); 375 if (!list_empty(&midc->queue)) 376 midc_scan_descriptors(to_middma_device(chan->device), midc); 377 spin_unlock_bh(&midc->lock); 378} 379 380/** 381 * intel_mid_dma_tx_status - Return status of txn 382 * @chan: chan for where status needs to be checked 383 * @cookie: cookie for txn 384 * @txstate: DMA txn state 385 * 386 * Return status of DMA txn 387 */ 388static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, 389 dma_cookie_t cookie, 390 struct dma_tx_state *txstate) 391{ 392 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 393 dma_cookie_t last_used; 394 dma_cookie_t last_complete; 395 int ret; 396 397 last_complete = midc->completed; 398 last_used = chan->cookie; 399 400 ret = dma_async_is_complete(cookie, last_complete, last_used); 401 if (ret != DMA_SUCCESS) { 402 midc_scan_descriptors(to_middma_device(chan->device), midc); 403 404 last_complete = midc->completed; 405 last_used = chan->cookie; 406 407 ret = dma_async_is_complete(cookie, last_complete, last_used); 408 } 409 410 if (txstate) { 411 txstate->last = last_complete; 412 txstate->used = last_used; 413 txstate->residue = 0; 414 } 415 return ret; 416} 417 418/** 419 * intel_mid_dma_device_control - DMA device control 420 * @chan: chan for DMA control 421 * @cmd: control cmd 422 * @arg: cmd arg value 423 * 424 * Perform DMA control command 425 */ 426static int intel_mid_dma_device_control(struct dma_chan *chan, 427 enum dma_ctrl_cmd cmd, unsigned long arg) 428{ 429 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 430 struct middma_device *mid = to_middma_device(chan->device); 431 struct intel_mid_dma_desc *desc, *_desc; 432 LIST_HEAD(list); 433 434 if (cmd != DMA_TERMINATE_ALL) 435 return -ENXIO; 436 437 spin_lock_bh(&midc->lock); 438 if (midc->busy == false) { 439 spin_unlock_bh(&midc->lock); 440 return 0; 441 } 442 list_splice_init(&midc->free_list, &list); 443 midc->descs_allocated = 0; 444 midc->slave = NULL; 445 446 /* Disable interrupts */ 447 disable_dma_interrupt(midc); 448 449 spin_unlock_bh(&midc->lock); 450 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 451 pr_debug("MDMA: freeing descriptor %p\n", desc); 452 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 453 } 454 return 0; 455} 456 457/** 458 * intel_mid_dma_prep_slave_sg - Prep slave sg txn 459 * @chan: chan for DMA transfer 460 * @sgl: scatter gather list 461 * @sg_len: length of sg txn 462 * @direction: DMA transfer dirtn 463 * @flags: DMA flags 464 * 465 * Do DMA sg txn: NOT supported now 466 */ 467static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 468 struct dma_chan *chan, struct scatterlist *sgl, 469 unsigned int sg_len, enum dma_data_direction direction, 470 unsigned long flags) 471{ 472 /*not supported now*/ 473 return NULL; 474} 475 476/** 477 * intel_mid_dma_prep_memcpy - Prep memcpy txn 478 * @chan: chan for DMA transfer 479 * @dest: destn address 480 * @src: src address 481 * @len: DMA transfer len 482 * @flags: DMA flags 483 * 484 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only 485 * The periphral txn details should be filled in slave structure properly 486 * Returns the descriptor for this txn 487 */ 488static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( 489 struct dma_chan *chan, dma_addr_t dest, 490 dma_addr_t src, size_t len, unsigned long flags) 491{ 492 struct intel_mid_dma_chan *midc; 493 struct intel_mid_dma_desc *desc = NULL; 494 struct intel_mid_dma_slave *mids; 495 union intel_mid_dma_ctl_lo ctl_lo; 496 union intel_mid_dma_ctl_hi ctl_hi; 497 union intel_mid_dma_cfg_lo cfg_lo; 498 union intel_mid_dma_cfg_hi cfg_hi; 499 enum intel_mid_dma_width width = 0; 500 501 pr_debug("MDMA: Prep for memcpy\n"); 502 WARN_ON(!chan); 503 if (!len) 504 return NULL; 505 506 mids = chan->private; 507 WARN_ON(!mids); 508 509 midc = to_intel_mid_dma_chan(chan); 510 WARN_ON(!midc); 511 512 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 513 midc->dma->pci_id, midc->ch_id, len); 514 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 515 mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width); 516 517 /*calculate CFG_LO*/ 518 if (mids->hs_mode == LNW_DMA_SW_HS) { 519 cfg_lo.cfg_lo = 0; 520 cfg_lo.cfgx.hs_sel_dst = 1; 521 cfg_lo.cfgx.hs_sel_src = 1; 522 } else if (mids->hs_mode == LNW_DMA_HW_HS) 523 cfg_lo.cfg_lo = 0x00000; 524 525 /*calculate CFG_HI*/ 526 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 527 /*SW HS only*/ 528 cfg_hi.cfg_hi = 0; 529 } else { 530 cfg_hi.cfg_hi = 0; 531 if (midc->dma->pimr_mask) { 532 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 533 cfg_hi.cfgx.fifo_mode = 1; 534 if (mids->dirn == DMA_TO_DEVICE) { 535 cfg_hi.cfgx.src_per = 0; 536 if (mids->device_instance == 0) 537 cfg_hi.cfgx.dst_per = 3; 538 if (mids->device_instance == 1) 539 cfg_hi.cfgx.dst_per = 1; 540 } else if (mids->dirn == DMA_FROM_DEVICE) { 541 if (mids->device_instance == 0) 542 cfg_hi.cfgx.src_per = 2; 543 if (mids->device_instance == 1) 544 cfg_hi.cfgx.src_per = 0; 545 cfg_hi.cfgx.dst_per = 0; 546 } 547 } else { 548 cfg_hi.cfgx.protctl = 0x1; /*default value*/ 549 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = 550 midc->ch_id - midc->dma->chan_base; 551 } 552 } 553 554 /*calculate CTL_HI*/ 555 ctl_hi.ctlx.reser = 0; 556 width = mids->src_width; 557 558 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 559 pr_debug("MDMA:calc len %d for block size %d\n", 560 ctl_hi.ctlx.block_ts, midc->dma->block_size); 561 /*calculate CTL_LO*/ 562 ctl_lo.ctl_lo = 0; 563 ctl_lo.ctlx.int_en = 1; 564 ctl_lo.ctlx.dst_tr_width = mids->dst_width; 565 ctl_lo.ctlx.src_tr_width = mids->src_width; 566 ctl_lo.ctlx.dst_msize = mids->src_msize; 567 ctl_lo.ctlx.src_msize = mids->dst_msize; 568 569 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 570 ctl_lo.ctlx.tt_fc = 0; 571 ctl_lo.ctlx.sinc = 0; 572 ctl_lo.ctlx.dinc = 0; 573 } else { 574 if (mids->dirn == DMA_TO_DEVICE) { 575 ctl_lo.ctlx.sinc = 0; 576 ctl_lo.ctlx.dinc = 2; 577 ctl_lo.ctlx.tt_fc = 1; 578 } else if (mids->dirn == DMA_FROM_DEVICE) { 579 ctl_lo.ctlx.sinc = 2; 580 ctl_lo.ctlx.dinc = 0; 581 ctl_lo.ctlx.tt_fc = 2; 582 } 583 } 584 585 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", 586 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); 587 588 enable_dma_interrupt(midc); 589 590 desc = midc_desc_get(midc); 591 if (desc == NULL) 592 goto err_desc_get; 593 desc->sar = src; 594 desc->dar = dest ; 595 desc->len = len; 596 desc->cfg_hi = cfg_hi.cfg_hi; 597 desc->cfg_lo = cfg_lo.cfg_lo; 598 desc->ctl_lo = ctl_lo.ctl_lo; 599 desc->ctl_hi = ctl_hi.ctl_hi; 600 desc->width = width; 601 desc->dirn = mids->dirn; 602 return &desc->txd; 603 604err_desc_get: 605 pr_err("ERR_MDMA: Failed to get desc\n"); 606 midc_desc_put(midc, desc); 607 return NULL; 608} 609 610/** 611 * intel_mid_dma_free_chan_resources - Frees dma resources 612 * @chan: chan requiring attention 613 * 614 * Frees the allocated resources on this DMA chan 615 */ 616static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) 617{ 618 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 619 struct middma_device *mid = to_middma_device(chan->device); 620 struct intel_mid_dma_desc *desc, *_desc; 621 622 if (true == midc->busy) { 623 /*trying to free ch in use!!!!!*/ 624 pr_err("ERR_MDMA: trying to free ch in use\n"); 625 } 626 pm_runtime_put(&mid->pdev->dev); 627 spin_lock_bh(&midc->lock); 628 midc->descs_allocated = 0; 629 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 630 list_del(&desc->desc_node); 631 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 632 } 633 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 634 list_del(&desc->desc_node); 635 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 636 } 637 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { 638 list_del(&desc->desc_node); 639 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 640 } 641 spin_unlock_bh(&midc->lock); 642 midc->in_use = false; 643 midc->busy = false; 644 /* Disable CH interrupts */ 645 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 646 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); 647} 648 649/** 650 * intel_mid_dma_alloc_chan_resources - Allocate dma resources 651 * @chan: chan requiring attention 652 * 653 * Allocates DMA resources on this chan 654 * Return the descriptors allocated 655 */ 656static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) 657{ 658 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 659 struct middma_device *mid = to_middma_device(chan->device); 660 struct intel_mid_dma_desc *desc; 661 dma_addr_t phys; 662 int i = 0; 663 664 pm_runtime_get_sync(&mid->pdev->dev); 665 666 if (mid->state == SUSPENDED) { 667 if (dma_resume(mid->pdev)) { 668 pr_err("ERR_MDMA: resume failed"); 669 return -EFAULT; 670 } 671 } 672 673 /* ASSERT: channel is idle */ 674 if (test_ch_en(mid->dma_base, midc->ch_id)) { 675 /*ch is not idle*/ 676 pr_err("ERR_MDMA: ch not idle\n"); 677 pm_runtime_put(&mid->pdev->dev); 678 return -EIO; 679 } 680 midc->completed = chan->cookie = 1; 681 682 spin_lock_bh(&midc->lock); 683 while (midc->descs_allocated < DESCS_PER_CHANNEL) { 684 spin_unlock_bh(&midc->lock); 685 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); 686 if (!desc) { 687 pr_err("ERR_MDMA: desc failed\n"); 688 pm_runtime_put(&mid->pdev->dev); 689 return -ENOMEM; 690 /*check*/ 691 } 692 dma_async_tx_descriptor_init(&desc->txd, chan); 693 desc->txd.tx_submit = intel_mid_dma_tx_submit; 694 desc->txd.flags = DMA_CTRL_ACK; 695 desc->txd.phys = phys; 696 spin_lock_bh(&midc->lock); 697 i = ++midc->descs_allocated; 698 list_add_tail(&desc->desc_node, &midc->free_list); 699 } 700 spin_unlock_bh(&midc->lock); 701 midc->in_use = true; 702 midc->busy = false; 703 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); 704 return i; 705} 706 707/** 708 * midc_handle_error - Handle DMA txn error 709 * @mid: controller where error occured 710 * @midc: chan where error occured 711 * 712 * Scan the descriptor for error 713 */ 714static void midc_handle_error(struct middma_device *mid, 715 struct intel_mid_dma_chan *midc) 716{ 717 midc_scan_descriptors(mid, midc); 718} 719 720/** 721 * dma_tasklet - DMA interrupt tasklet 722 * @data: tasklet arg (the controller structure) 723 * 724 * Scan the controller for interrupts for completion/error 725 * Clear the interrupt and call for handling completion/error 726 */ 727static void dma_tasklet(unsigned long data) 728{ 729 struct middma_device *mid = NULL; 730 struct intel_mid_dma_chan *midc = NULL; 731 u32 status; 732 int i; 733 734 mid = (struct middma_device *)data; 735 if (mid == NULL) { 736 pr_err("ERR_MDMA: tasklet Null param\n"); 737 return; 738 } 739 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); 740 status = ioread32(mid->dma_base + RAW_TFR); 741 pr_debug("MDMA:RAW_TFR %x\n", status); 742 status &= mid->intr_mask; 743 while (status) { 744 /*txn interrupt*/ 745 i = get_ch_index(&status, mid->chan_base); 746 if (i < 0) { 747 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 748 return; 749 } 750 midc = &mid->ch[i]; 751 if (midc == NULL) { 752 pr_err("ERR_MDMA:Null param midc\n"); 753 return; 754 } 755 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 756 status, midc->ch_id, i); 757 /*clearing this interrupts first*/ 758 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); 759 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK); 760 761 spin_lock_bh(&midc->lock); 762 midc_scan_descriptors(mid, midc); 763 pr_debug("MDMA:Scan of desc... complete, unmasking\n"); 764 iowrite32(UNMASK_INTR_REG(midc->ch_id), 765 mid->dma_base + MASK_TFR); 766 spin_unlock_bh(&midc->lock); 767 } 768 769 status = ioread32(mid->dma_base + RAW_ERR); 770 status &= mid->intr_mask; 771 while (status) { 772 /*err interrupt*/ 773 i = get_ch_index(&status, mid->chan_base); 774 if (i < 0) { 775 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 776 return; 777 } 778 midc = &mid->ch[i]; 779 if (midc == NULL) { 780 pr_err("ERR_MDMA:Null param midc\n"); 781 return; 782 } 783 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 784 status, midc->ch_id, i); 785 786 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); 787 spin_lock_bh(&midc->lock); 788 midc_handle_error(mid, midc); 789 iowrite32(UNMASK_INTR_REG(midc->ch_id), 790 mid->dma_base + MASK_ERR); 791 spin_unlock_bh(&midc->lock); 792 } 793 pr_debug("MDMA:Exiting takslet...\n"); 794 return; 795} 796 797static void dma_tasklet1(unsigned long data) 798{ 799 pr_debug("MDMA:in takslet1...\n"); 800 return dma_tasklet(data); 801} 802 803static void dma_tasklet2(unsigned long data) 804{ 805 pr_debug("MDMA:in takslet2...\n"); 806 return dma_tasklet(data); 807} 808 809/** 810 * intel_mid_dma_interrupt - DMA ISR 811 * @irq: IRQ where interrupt occurred 812 * @data: ISR cllback data (the controller structure) 813 * 814 * See if this is our interrupt if so then schedule the tasklet 815 * otherwise ignore 816 */ 817static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) 818{ 819 struct middma_device *mid = data; 820 u32 tfr_status, err_status; 821 int call_tasklet = 0; 822 823 tfr_status = ioread32(mid->dma_base + RAW_TFR); 824 err_status = ioread32(mid->dma_base + RAW_ERR); 825 if (!tfr_status && !err_status) 826 return IRQ_NONE; 827 828 /*DMA Interrupt*/ 829 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); 830 if (!mid) { 831 pr_err("ERR_MDMA:null pointer mid\n"); 832 return -EINVAL; 833 } 834 835 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); 836 tfr_status &= mid->intr_mask; 837 if (tfr_status) { 838 /*need to disable intr*/ 839 iowrite32((tfr_status << 8), mid->dma_base + MASK_TFR); 840 pr_debug("MDMA: Calling tasklet %x\n", tfr_status); 841 call_tasklet = 1; 842 } 843 err_status &= mid->intr_mask; 844 if (err_status) { 845 iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); 846 call_tasklet = 1; 847 } 848 if (call_tasklet) 849 tasklet_schedule(&mid->tasklet); 850 851 return IRQ_HANDLED; 852} 853 854static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) 855{ 856 return intel_mid_dma_interrupt(irq, data); 857} 858 859static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) 860{ 861 return intel_mid_dma_interrupt(irq, data); 862} 863 864/** 865 * mid_setup_dma - Setup the DMA controller 866 * @pdev: Controller PCI device structure 867 * 868 * Initilize the DMA controller, channels, registers with DMA engine, 869 * ISR. Initilize DMA controller channels. 870 */ 871static int mid_setup_dma(struct pci_dev *pdev) 872{ 873 struct middma_device *dma = pci_get_drvdata(pdev); 874 int err, i; 875 876 /* DMA coherent memory pool for DMA descriptor allocations */ 877 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, 878 sizeof(struct intel_mid_dma_desc), 879 32, 0); 880 if (NULL == dma->dma_pool) { 881 pr_err("ERR_MDMA:pci_pool_create failed\n"); 882 err = -ENOMEM; 883 kfree(dma); 884 goto err_dma_pool; 885 } 886 887 INIT_LIST_HEAD(&dma->common.channels); 888 dma->pci_id = pdev->device; 889 if (dma->pimr_mask) { 890 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, 891 LNW_PERIPHRAL_MASK_SIZE); 892 if (dma->mask_reg == NULL) { 893 pr_err("ERR_MDMA:Cant map periphral intr space !!\n"); 894 return -ENOMEM; 895 } 896 } else 897 dma->mask_reg = NULL; 898 899 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); 900 /*init CH structures*/ 901 dma->intr_mask = 0; 902 dma->state = RUNNING; 903 for (i = 0; i < dma->max_chan; i++) { 904 struct intel_mid_dma_chan *midch = &dma->ch[i]; 905 906 midch->chan.device = &dma->common; 907 midch->chan.cookie = 1; 908 midch->chan.chan_id = i; 909 midch->ch_id = dma->chan_base + i; 910 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 911 912 midch->dma_base = dma->dma_base; 913 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; 914 midch->dma = dma; 915 dma->intr_mask |= 1 << (dma->chan_base + i); 916 spin_lock_init(&midch->lock); 917 918 INIT_LIST_HEAD(&midch->active_list); 919 INIT_LIST_HEAD(&midch->queue); 920 INIT_LIST_HEAD(&midch->free_list); 921 /*mask interrupts*/ 922 iowrite32(MASK_INTR_REG(midch->ch_id), 923 dma->dma_base + MASK_BLOCK); 924 iowrite32(MASK_INTR_REG(midch->ch_id), 925 dma->dma_base + MASK_SRC_TRAN); 926 iowrite32(MASK_INTR_REG(midch->ch_id), 927 dma->dma_base + MASK_DST_TRAN); 928 iowrite32(MASK_INTR_REG(midch->ch_id), 929 dma->dma_base + MASK_ERR); 930 iowrite32(MASK_INTR_REG(midch->ch_id), 931 dma->dma_base + MASK_TFR); 932 933 disable_dma_interrupt(midch); 934 list_add_tail(&midch->chan.device_node, &dma->common.channels); 935 } 936 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); 937 938 /*init dma structure*/ 939 dma_cap_zero(dma->common.cap_mask); 940 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); 941 dma_cap_set(DMA_SLAVE, dma->common.cap_mask); 942 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); 943 dma->common.dev = &pdev->dev; 944 dma->common.chancnt = dma->max_chan; 945 946 dma->common.device_alloc_chan_resources = 947 intel_mid_dma_alloc_chan_resources; 948 dma->common.device_free_chan_resources = 949 intel_mid_dma_free_chan_resources; 950 951 dma->common.device_tx_status = intel_mid_dma_tx_status; 952 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; 953 dma->common.device_issue_pending = intel_mid_dma_issue_pending; 954 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; 955 dma->common.device_control = intel_mid_dma_device_control; 956 957 /*enable dma cntrl*/ 958 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); 959 960 /*register irq */ 961 if (dma->pimr_mask) { 962 pr_debug("MDMA:Requesting irq shared for DMAC1\n"); 963 err = request_irq(pdev->irq, intel_mid_dma_interrupt1, 964 IRQF_SHARED, "INTEL_MID_DMAC1", dma); 965 if (0 != err) 966 goto err_irq; 967 } else { 968 dma->intr_mask = 0x03; 969 pr_debug("MDMA:Requesting irq for DMAC2\n"); 970 err = request_irq(pdev->irq, intel_mid_dma_interrupt2, 971 IRQF_SHARED, "INTEL_MID_DMAC2", dma); 972 if (0 != err) 973 goto err_irq; 974 } 975 /*register device w/ engine*/ 976 err = dma_async_device_register(&dma->common); 977 if (0 != err) { 978 pr_err("ERR_MDMA:device_register failed: %d\n", err); 979 goto err_engine; 980 } 981 if (dma->pimr_mask) { 982 pr_debug("setting up tasklet1 for DMAC1\n"); 983 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); 984 } else { 985 pr_debug("setting up tasklet2 for DMAC2\n"); 986 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); 987 } 988 return 0; 989 990err_engine: 991 free_irq(pdev->irq, dma); 992err_irq: 993 pci_pool_destroy(dma->dma_pool); 994 kfree(dma); 995err_dma_pool: 996 pr_err("ERR_MDMA:setup_dma failed: %d\n", err); 997 return err; 998 999} 1000 1001/** 1002 * middma_shutdown - Shutdown the DMA controller 1003 * @pdev: Controller PCI device structure 1004 * 1005 * Called by remove 1006 * Unregister DMa controller, clear all structures and free interrupt 1007 */ 1008static void middma_shutdown(struct pci_dev *pdev) 1009{ 1010 struct middma_device *device = pci_get_drvdata(pdev); 1011 1012 dma_async_device_unregister(&device->common); 1013 pci_pool_destroy(device->dma_pool); 1014 if (device->mask_reg) 1015 iounmap(device->mask_reg); 1016 if (device->dma_base) 1017 iounmap(device->dma_base); 1018 free_irq(pdev->irq, device); 1019 return; 1020} 1021 1022/** 1023 * intel_mid_dma_probe - PCI Probe 1024 * @pdev: Controller PCI device structure 1025 * @id: pci device id structure 1026 * 1027 * Initilize the PCI device, map BARs, query driver data. 1028 * Call setup_dma to complete contoller and chan initilzation 1029 */ 1030static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, 1031 const struct pci_device_id *id) 1032{ 1033 struct middma_device *device; 1034 u32 base_addr, bar_size; 1035 struct intel_mid_dma_probe_info *info; 1036 int err; 1037 1038 pr_debug("MDMA: probe for %x\n", pdev->device); 1039 info = (void *)id->driver_data; 1040 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", 1041 info->max_chan, info->ch_base, 1042 info->block_size, info->pimr_mask); 1043 1044 err = pci_enable_device(pdev); 1045 if (err) 1046 goto err_enable_device; 1047 1048 err = pci_request_regions(pdev, "intel_mid_dmac"); 1049 if (err) 1050 goto err_request_regions; 1051 1052 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1053 if (err) 1054 goto err_set_dma_mask; 1055 1056 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1057 if (err) 1058 goto err_set_dma_mask; 1059 1060 device = kzalloc(sizeof(*device), GFP_KERNEL); 1061 if (!device) { 1062 pr_err("ERR_MDMA:kzalloc failed probe\n"); 1063 err = -ENOMEM; 1064 goto err_kzalloc; 1065 } 1066 device->pdev = pci_dev_get(pdev); 1067 1068 base_addr = pci_resource_start(pdev, 0); 1069 bar_size = pci_resource_len(pdev, 0); 1070 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); 1071 if (!device->dma_base) { 1072 pr_err("ERR_MDMA:ioremap failed\n"); 1073 err = -ENOMEM; 1074 goto err_ioremap; 1075 } 1076 pci_set_drvdata(pdev, device); 1077 pci_set_master(pdev); 1078 device->max_chan = info->max_chan; 1079 device->chan_base = info->ch_base; 1080 device->block_size = info->block_size; 1081 device->pimr_mask = info->pimr_mask; 1082 1083 err = mid_setup_dma(pdev); 1084 if (err) 1085 goto err_dma; 1086 1087 pm_runtime_set_active(&pdev->dev); 1088 pm_runtime_enable(&pdev->dev); 1089 pm_runtime_allow(&pdev->dev); 1090 return 0; 1091 1092err_dma: 1093 iounmap(device->dma_base); 1094err_ioremap: 1095 pci_dev_put(pdev); 1096 kfree(device); 1097err_kzalloc: 1098err_set_dma_mask: 1099 pci_release_regions(pdev); 1100 pci_disable_device(pdev); 1101err_request_regions: 1102err_enable_device: 1103 pr_err("ERR_MDMA:Probe failed %d\n", err); 1104 return err; 1105} 1106 1107/** 1108 * intel_mid_dma_remove - PCI remove 1109 * @pdev: Controller PCI device structure 1110 * 1111 * Free up all resources and data 1112 * Call shutdown_dma to complete contoller and chan cleanup 1113 */ 1114static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) 1115{ 1116 struct middma_device *device = pci_get_drvdata(pdev); 1117 middma_shutdown(pdev); 1118 pci_dev_put(pdev); 1119 kfree(device); 1120 pci_release_regions(pdev); 1121 pci_disable_device(pdev); 1122} 1123 1124/* Power Management */ 1125/* 1126* dma_suspend - PCI suspend function 1127* 1128* @pci: PCI device structure 1129* @state: PM message 1130* 1131* This function is called by OS when a power event occurs 1132*/ 1133int dma_suspend(struct pci_dev *pci, pm_message_t state) 1134{ 1135 int i; 1136 struct middma_device *device = pci_get_drvdata(pci); 1137 pr_debug("MDMA: dma_suspend called\n"); 1138 1139 for (i = 0; i < device->max_chan; i++) { 1140 if (device->ch[i].in_use) 1141 return -EAGAIN; 1142 } 1143 device->state = SUSPENDED; 1144 pci_set_drvdata(pci, device); 1145 pci_save_state(pci); 1146 pci_disable_device(pci); 1147 pci_set_power_state(pci, PCI_D3hot); 1148 return 0; 1149} 1150 1151/** 1152* dma_resume - PCI resume function 1153* 1154* @pci: PCI device structure 1155* 1156* This function is called by OS when a power event occurs 1157*/ 1158int dma_resume(struct pci_dev *pci) 1159{ 1160 int ret; 1161 struct middma_device *device = pci_get_drvdata(pci); 1162 1163 pr_debug("MDMA: dma_resume called\n"); 1164 pci_set_power_state(pci, PCI_D0); 1165 pci_restore_state(pci); 1166 ret = pci_enable_device(pci); 1167 if (ret) { 1168 pr_err("MDMA: device cant be enabled for %x\n", pci->device); 1169 return ret; 1170 } 1171 device->state = RUNNING; 1172 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1173 pci_set_drvdata(pci, device); 1174 return 0; 1175} 1176 1177static int dma_runtime_suspend(struct device *dev) 1178{ 1179 struct pci_dev *pci_dev = to_pci_dev(dev); 1180 return dma_suspend(pci_dev, PMSG_SUSPEND); 1181} 1182 1183static int dma_runtime_resume(struct device *dev) 1184{ 1185 struct pci_dev *pci_dev = to_pci_dev(dev); 1186 return dma_resume(pci_dev); 1187} 1188 1189static int dma_runtime_idle(struct device *dev) 1190{ 1191 struct pci_dev *pdev = to_pci_dev(dev); 1192 struct middma_device *device = pci_get_drvdata(pdev); 1193 int i; 1194 1195 for (i = 0; i < device->max_chan; i++) { 1196 if (device->ch[i].in_use) 1197 return -EAGAIN; 1198 } 1199 1200 return pm_schedule_suspend(dev, 0); 1201} 1202 1203/****************************************************************************** 1204* PCI stuff 1205*/ 1206static struct pci_device_id intel_mid_dma_ids[] = { 1207 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, 1208 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1209 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1210 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, 1211 { 0, } 1212}; 1213MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); 1214 1215static const struct dev_pm_ops intel_mid_dma_pm = { 1216 .runtime_suspend = dma_runtime_suspend, 1217 .runtime_resume = dma_runtime_resume, 1218 .runtime_idle = dma_runtime_idle, 1219}; 1220 1221static struct pci_driver intel_mid_dma_pci = { 1222 .name = "Intel MID DMA", 1223 .id_table = intel_mid_dma_ids, 1224 .probe = intel_mid_dma_probe, 1225 .remove = __devexit_p(intel_mid_dma_remove), 1226#ifdef CONFIG_PM 1227 .suspend = dma_suspend, 1228 .resume = dma_resume, 1229 .driver = { 1230 .pm = &intel_mid_dma_pm, 1231 }, 1232#endif 1233}; 1234 1235static int __init intel_mid_dma_init(void) 1236{ 1237 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", 1238 INTEL_MID_DMA_DRIVER_VERSION); 1239 return pci_register_driver(&intel_mid_dma_pci); 1240} 1241fs_initcall(intel_mid_dma_init); 1242 1243static void __exit intel_mid_dma_exit(void) 1244{ 1245 pci_unregister_driver(&intel_mid_dma_pci); 1246} 1247module_exit(intel_mid_dma_exit); 1248 1249MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); 1250MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); 1251MODULE_LICENSE("GPL v2"); 1252MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION); 1253