intel_mid_dma.c revision 1fded07513ea57b5ee128958ff119e05588b7227
1/* 2 * intel_mid_dma.c - Intel Langwell DMA Drivers 3 * 4 * Copyright (C) 2008-10 Intel Corp 5 * Author: Vinod Koul <vinod.koul@intel.com> 6 * The driver design is based on dw_dmac driver 7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, write to the Free Software Foundation, Inc., 20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * 24 * 25 */ 26#include <linux/pci.h> 27#include <linux/interrupt.h> 28#include <linux/pm_runtime.h> 29#include <linux/intel_mid_dma.h> 30#include <linux/module.h> 31 32#define MAX_CHAN 4 /*max ch across controllers*/ 33#include "intel_mid_dma_regs.h" 34 35#define INTEL_MID_DMAC1_ID 0x0814 36#define INTEL_MID_DMAC2_ID 0x0813 37#define INTEL_MID_GP_DMAC2_ID 0x0827 38#define INTEL_MFLD_DMAC1_ID 0x0830 39#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 40#define LNW_PERIPHRAL_MASK_SIZE 0x10 41#define LNW_PERIPHRAL_STATUS 0x0 42#define LNW_PERIPHRAL_MASK 0x8 43 44struct intel_mid_dma_probe_info { 45 u8 max_chan; 46 u8 ch_base; 47 u16 block_size; 48 u32 pimr_mask; 49}; 50 51#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ 52 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ 53 .max_chan = (_max_chan), \ 54 .ch_base = (_ch_base), \ 55 .block_size = (_block_size), \ 56 .pimr_mask = (_pimr_mask), \ 57 }) 58 59/***************************************************************************** 60Utility Functions*/ 61/** 62 * get_ch_index - convert status to channel 63 * @status: status mask 64 * @base: dma ch base value 65 * 66 * Modify the status mask and return the channel index needing 67 * attention (or -1 if neither) 68 */ 69static int get_ch_index(int *status, unsigned int base) 70{ 71 int i; 72 for (i = 0; i < MAX_CHAN; i++) { 73 if (*status & (1 << (i + base))) { 74 *status = *status & ~(1 << (i + base)); 75 pr_debug("MDMA: index %d New status %x\n", i, *status); 76 return i; 77 } 78 } 79 return -1; 80} 81 82/** 83 * get_block_ts - calculates dma transaction length 84 * @len: dma transfer length 85 * @tx_width: dma transfer src width 86 * @block_size: dma controller max block size 87 * 88 * Based on src width calculate the DMA trsaction length in data items 89 * return data items or FFFF if exceeds max length for block 90 */ 91static int get_block_ts(int len, int tx_width, int block_size) 92{ 93 int byte_width = 0, block_ts = 0; 94 95 switch (tx_width) { 96 case DMA_SLAVE_BUSWIDTH_1_BYTE: 97 byte_width = 1; 98 break; 99 case DMA_SLAVE_BUSWIDTH_2_BYTES: 100 byte_width = 2; 101 break; 102 case DMA_SLAVE_BUSWIDTH_4_BYTES: 103 default: 104 byte_width = 4; 105 break; 106 } 107 108 block_ts = len/byte_width; 109 if (block_ts > block_size) 110 block_ts = 0xFFFF; 111 return block_ts; 112} 113 114/***************************************************************************** 115DMAC1 interrupt Functions*/ 116 117/** 118 * dmac1_mask_periphral_intr - mask the periphral interrupt 119 * @mid: dma device for which masking is required 120 * 121 * Masks the DMA periphral interrupt 122 * this is valid for DMAC1 family controllers only 123 * This controller should have periphral mask registers already mapped 124 */ 125static void dmac1_mask_periphral_intr(struct middma_device *mid) 126{ 127 u32 pimr; 128 129 if (mid->pimr_mask) { 130 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 131 pimr |= mid->pimr_mask; 132 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 133 } 134 return; 135} 136 137/** 138 * dmac1_unmask_periphral_intr - unmask the periphral interrupt 139 * @midc: dma channel for which masking is required 140 * 141 * UnMasks the DMA periphral interrupt, 142 * this is valid for DMAC1 family controllers only 143 * This controller should have periphral mask registers already mapped 144 */ 145static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) 146{ 147 u32 pimr; 148 struct middma_device *mid = to_middma_device(midc->chan.device); 149 150 if (mid->pimr_mask) { 151 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 152 pimr &= ~mid->pimr_mask; 153 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 154 } 155 return; 156} 157 158/** 159 * enable_dma_interrupt - enable the periphral interrupt 160 * @midc: dma channel for which enable interrupt is required 161 * 162 * Enable the DMA periphral interrupt, 163 * this is valid for DMAC1 family controllers only 164 * This controller should have periphral mask registers already mapped 165 */ 166static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) 167{ 168 dmac1_unmask_periphral_intr(midc); 169 170 /*en ch interrupts*/ 171 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 172 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 173 return; 174} 175 176/** 177 * disable_dma_interrupt - disable the periphral interrupt 178 * @midc: dma channel for which disable interrupt is required 179 * 180 * Disable the DMA periphral interrupt, 181 * this is valid for DMAC1 family controllers only 182 * This controller should have periphral mask registers already mapped 183 */ 184static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) 185{ 186 /*Check LPE PISR, make sure fwd is disabled*/ 187 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); 188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 190 return; 191} 192 193/***************************************************************************** 194DMA channel helper Functions*/ 195/** 196 * mid_desc_get - get a descriptor 197 * @midc: dma channel for which descriptor is required 198 * 199 * Obtain a descriptor for the channel. Returns NULL if none are free. 200 * Once the descriptor is returned it is private until put on another 201 * list or freed 202 */ 203static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) 204{ 205 struct intel_mid_dma_desc *desc, *_desc; 206 struct intel_mid_dma_desc *ret = NULL; 207 208 spin_lock_bh(&midc->lock); 209 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 210 if (async_tx_test_ack(&desc->txd)) { 211 list_del(&desc->desc_node); 212 ret = desc; 213 break; 214 } 215 } 216 spin_unlock_bh(&midc->lock); 217 return ret; 218} 219 220/** 221 * mid_desc_put - put a descriptor 222 * @midc: dma channel for which descriptor is required 223 * @desc: descriptor to put 224 * 225 * Return a descriptor from lwn_desc_get back to the free pool 226 */ 227static void midc_desc_put(struct intel_mid_dma_chan *midc, 228 struct intel_mid_dma_desc *desc) 229{ 230 if (desc) { 231 spin_lock_bh(&midc->lock); 232 list_add_tail(&desc->desc_node, &midc->free_list); 233 spin_unlock_bh(&midc->lock); 234 } 235} 236/** 237 * midc_dostart - begin a DMA transaction 238 * @midc: channel for which txn is to be started 239 * @first: first descriptor of series 240 * 241 * Load a transaction into the engine. This must be called with midc->lock 242 * held and bh disabled. 243 */ 244static void midc_dostart(struct intel_mid_dma_chan *midc, 245 struct intel_mid_dma_desc *first) 246{ 247 struct middma_device *mid = to_middma_device(midc->chan.device); 248 249 /* channel is idle */ 250 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { 251 /*error*/ 252 pr_err("ERR_MDMA: channel is busy in start\n"); 253 /* The tasklet will hopefully advance the queue... */ 254 return; 255 } 256 midc->busy = true; 257 /*write registers and en*/ 258 iowrite32(first->sar, midc->ch_regs + SAR); 259 iowrite32(first->dar, midc->ch_regs + DAR); 260 iowrite32(first->lli_phys, midc->ch_regs + LLP); 261 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); 262 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); 263 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); 264 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); 265 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", 266 (int)first->sar, (int)first->dar, first->cfg_hi, 267 first->cfg_lo, first->ctl_hi, first->ctl_lo); 268 first->status = DMA_IN_PROGRESS; 269 270 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 271} 272 273/** 274 * midc_descriptor_complete - process completed descriptor 275 * @midc: channel owning the descriptor 276 * @desc: the descriptor itself 277 * 278 * Process a completed descriptor and perform any callbacks upon 279 * the completion. The completion handling drops the lock during the 280 * callbacks but must be called with the lock held. 281 */ 282static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, 283 struct intel_mid_dma_desc *desc) 284 __releases(&midc->lock) __acquires(&midc->lock) 285{ 286 struct dma_async_tx_descriptor *txd = &desc->txd; 287 dma_async_tx_callback callback_txd = NULL; 288 struct intel_mid_dma_lli *llitem; 289 void *param_txd = NULL; 290 291 midc->completed = txd->cookie; 292 callback_txd = txd->callback; 293 param_txd = txd->callback_param; 294 295 if (desc->lli != NULL) { 296 /*clear the DONE bit of completed LLI in memory*/ 297 llitem = desc->lli + desc->current_lli; 298 llitem->ctl_hi &= CLEAR_DONE; 299 if (desc->current_lli < desc->lli_length-1) 300 (desc->current_lli)++; 301 else 302 desc->current_lli = 0; 303 } 304 spin_unlock_bh(&midc->lock); 305 if (callback_txd) { 306 pr_debug("MDMA: TXD callback set ... calling\n"); 307 callback_txd(param_txd); 308 } 309 if (midc->raw_tfr) { 310 desc->status = DMA_SUCCESS; 311 if (desc->lli != NULL) { 312 pci_pool_free(desc->lli_pool, desc->lli, 313 desc->lli_phys); 314 pci_pool_destroy(desc->lli_pool); 315 desc->lli = NULL; 316 } 317 list_move(&desc->desc_node, &midc->free_list); 318 midc->busy = false; 319 } 320 spin_lock_bh(&midc->lock); 321 322} 323/** 324 * midc_scan_descriptors - check the descriptors in channel 325 * mark completed when tx is completete 326 * @mid: device 327 * @midc: channel to scan 328 * 329 * Walk the descriptor chain for the device and process any entries 330 * that are complete. 331 */ 332static void midc_scan_descriptors(struct middma_device *mid, 333 struct intel_mid_dma_chan *midc) 334{ 335 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; 336 337 /*tx is complete*/ 338 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 339 if (desc->status == DMA_IN_PROGRESS) 340 midc_descriptor_complete(midc, desc); 341 } 342 return; 343 } 344/** 345 * midc_lli_fill_sg - Helper function to convert 346 * SG list to Linked List Items. 347 *@midc: Channel 348 *@desc: DMA descriptor 349 *@sglist: Pointer to SG list 350 *@sglen: SG list length 351 *@flags: DMA transaction flags 352 * 353 * Walk through the SG list and convert the SG list into Linked 354 * List Items (LLI). 355 */ 356static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, 357 struct intel_mid_dma_desc *desc, 358 struct scatterlist *sglist, 359 unsigned int sglen, 360 unsigned int flags) 361{ 362 struct intel_mid_dma_slave *mids; 363 struct scatterlist *sg; 364 dma_addr_t lli_next, sg_phy_addr; 365 struct intel_mid_dma_lli *lli_bloc_desc; 366 union intel_mid_dma_ctl_lo ctl_lo; 367 union intel_mid_dma_ctl_hi ctl_hi; 368 int i; 369 370 pr_debug("MDMA: Entered midc_lli_fill_sg\n"); 371 mids = midc->mid_slave; 372 373 lli_bloc_desc = desc->lli; 374 lli_next = desc->lli_phys; 375 376 ctl_lo.ctl_lo = desc->ctl_lo; 377 ctl_hi.ctl_hi = desc->ctl_hi; 378 for_each_sg(sglist, sg, sglen, i) { 379 /*Populate CTL_LOW and LLI values*/ 380 if (i != sglen - 1) { 381 lli_next = lli_next + 382 sizeof(struct intel_mid_dma_lli); 383 } else { 384 /*Check for circular list, otherwise terminate LLI to ZERO*/ 385 if (flags & DMA_PREP_CIRCULAR_LIST) { 386 pr_debug("MDMA: LLI is configured in circular mode\n"); 387 lli_next = desc->lli_phys; 388 } else { 389 lli_next = 0; 390 ctl_lo.ctlx.llp_dst_en = 0; 391 ctl_lo.ctlx.llp_src_en = 0; 392 } 393 } 394 /*Populate CTL_HI values*/ 395 ctl_hi.ctlx.block_ts = get_block_ts(sg->length, 396 desc->width, 397 midc->dma->block_size); 398 /*Populate SAR and DAR values*/ 399 sg_phy_addr = sg_phys(sg); 400 if (desc->dirn == DMA_MEM_TO_DEV) { 401 lli_bloc_desc->sar = sg_phy_addr; 402 lli_bloc_desc->dar = mids->dma_slave.dst_addr; 403 } else if (desc->dirn == DMA_DEV_TO_MEM) { 404 lli_bloc_desc->sar = mids->dma_slave.src_addr; 405 lli_bloc_desc->dar = sg_phy_addr; 406 } 407 /*Copy values into block descriptor in system memroy*/ 408 lli_bloc_desc->llp = lli_next; 409 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; 410 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; 411 412 lli_bloc_desc++; 413 } 414 /*Copy very first LLI values to descriptor*/ 415 desc->ctl_lo = desc->lli->ctl_lo; 416 desc->ctl_hi = desc->lli->ctl_hi; 417 desc->sar = desc->lli->sar; 418 desc->dar = desc->lli->dar; 419 420 return 0; 421} 422/***************************************************************************** 423DMA engine callback Functions*/ 424/** 425 * intel_mid_dma_tx_submit - callback to submit DMA transaction 426 * @tx: dma engine descriptor 427 * 428 * Submit the DMA trasaction for this descriptor, start if ch idle 429 */ 430static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) 431{ 432 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); 433 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); 434 dma_cookie_t cookie; 435 436 spin_lock_bh(&midc->lock); 437 cookie = midc->chan.cookie; 438 439 if (++cookie < 0) 440 cookie = 1; 441 442 midc->chan.cookie = cookie; 443 desc->txd.cookie = cookie; 444 445 446 if (list_empty(&midc->active_list)) 447 list_add_tail(&desc->desc_node, &midc->active_list); 448 else 449 list_add_tail(&desc->desc_node, &midc->queue); 450 451 midc_dostart(midc, desc); 452 spin_unlock_bh(&midc->lock); 453 454 return cookie; 455} 456 457/** 458 * intel_mid_dma_issue_pending - callback to issue pending txn 459 * @chan: chan where pending trascation needs to be checked and submitted 460 * 461 * Call for scan to issue pending descriptors 462 */ 463static void intel_mid_dma_issue_pending(struct dma_chan *chan) 464{ 465 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 466 467 spin_lock_bh(&midc->lock); 468 if (!list_empty(&midc->queue)) 469 midc_scan_descriptors(to_middma_device(chan->device), midc); 470 spin_unlock_bh(&midc->lock); 471} 472 473/** 474 * intel_mid_dma_tx_status - Return status of txn 475 * @chan: chan for where status needs to be checked 476 * @cookie: cookie for txn 477 * @txstate: DMA txn state 478 * 479 * Return status of DMA txn 480 */ 481static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, 482 dma_cookie_t cookie, 483 struct dma_tx_state *txstate) 484{ 485 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 486 dma_cookie_t last_used; 487 dma_cookie_t last_complete; 488 int ret; 489 490 last_complete = midc->completed; 491 last_used = chan->cookie; 492 493 ret = dma_async_is_complete(cookie, last_complete, last_used); 494 if (ret != DMA_SUCCESS) { 495 spin_lock_bh(&midc->lock); 496 midc_scan_descriptors(to_middma_device(chan->device), midc); 497 spin_unlock_bh(&midc->lock); 498 499 last_complete = midc->completed; 500 last_used = chan->cookie; 501 502 ret = dma_async_is_complete(cookie, last_complete, last_used); 503 } 504 505 if (txstate) { 506 txstate->last = last_complete; 507 txstate->used = last_used; 508 txstate->residue = 0; 509 } 510 return ret; 511} 512 513static int dma_slave_control(struct dma_chan *chan, unsigned long arg) 514{ 515 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 516 struct dma_slave_config *slave = (struct dma_slave_config *)arg; 517 struct intel_mid_dma_slave *mid_slave; 518 519 BUG_ON(!midc); 520 BUG_ON(!slave); 521 pr_debug("MDMA: slave control called\n"); 522 523 mid_slave = to_intel_mid_dma_slave(slave); 524 525 BUG_ON(!mid_slave); 526 527 midc->mid_slave = mid_slave; 528 return 0; 529} 530/** 531 * intel_mid_dma_device_control - DMA device control 532 * @chan: chan for DMA control 533 * @cmd: control cmd 534 * @arg: cmd arg value 535 * 536 * Perform DMA control command 537 */ 538static int intel_mid_dma_device_control(struct dma_chan *chan, 539 enum dma_ctrl_cmd cmd, unsigned long arg) 540{ 541 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 542 struct middma_device *mid = to_middma_device(chan->device); 543 struct intel_mid_dma_desc *desc, *_desc; 544 union intel_mid_dma_cfg_lo cfg_lo; 545 546 if (cmd == DMA_SLAVE_CONFIG) 547 return dma_slave_control(chan, arg); 548 549 if (cmd != DMA_TERMINATE_ALL) 550 return -ENXIO; 551 552 spin_lock_bh(&midc->lock); 553 if (midc->busy == false) { 554 spin_unlock_bh(&midc->lock); 555 return 0; 556 } 557 /*Suspend and disable the channel*/ 558 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); 559 cfg_lo.cfgx.ch_susp = 1; 560 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); 561 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 562 midc->busy = false; 563 /* Disable interrupts */ 564 disable_dma_interrupt(midc); 565 midc->descs_allocated = 0; 566 567 spin_unlock_bh(&midc->lock); 568 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 569 if (desc->lli != NULL) { 570 pci_pool_free(desc->lli_pool, desc->lli, 571 desc->lli_phys); 572 pci_pool_destroy(desc->lli_pool); 573 desc->lli = NULL; 574 } 575 list_move(&desc->desc_node, &midc->free_list); 576 } 577 return 0; 578} 579 580 581/** 582 * intel_mid_dma_prep_memcpy - Prep memcpy txn 583 * @chan: chan for DMA transfer 584 * @dest: destn address 585 * @src: src address 586 * @len: DMA transfer len 587 * @flags: DMA flags 588 * 589 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only 590 * The periphral txn details should be filled in slave structure properly 591 * Returns the descriptor for this txn 592 */ 593static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( 594 struct dma_chan *chan, dma_addr_t dest, 595 dma_addr_t src, size_t len, unsigned long flags) 596{ 597 struct intel_mid_dma_chan *midc; 598 struct intel_mid_dma_desc *desc = NULL; 599 struct intel_mid_dma_slave *mids; 600 union intel_mid_dma_ctl_lo ctl_lo; 601 union intel_mid_dma_ctl_hi ctl_hi; 602 union intel_mid_dma_cfg_lo cfg_lo; 603 union intel_mid_dma_cfg_hi cfg_hi; 604 enum dma_slave_buswidth width; 605 606 pr_debug("MDMA: Prep for memcpy\n"); 607 BUG_ON(!chan); 608 if (!len) 609 return NULL; 610 611 midc = to_intel_mid_dma_chan(chan); 612 BUG_ON(!midc); 613 614 mids = midc->mid_slave; 615 BUG_ON(!mids); 616 617 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 618 midc->dma->pci_id, midc->ch_id, len); 619 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 620 mids->cfg_mode, mids->dma_slave.direction, 621 mids->hs_mode, mids->dma_slave.src_addr_width); 622 623 /*calculate CFG_LO*/ 624 if (mids->hs_mode == LNW_DMA_SW_HS) { 625 cfg_lo.cfg_lo = 0; 626 cfg_lo.cfgx.hs_sel_dst = 1; 627 cfg_lo.cfgx.hs_sel_src = 1; 628 } else if (mids->hs_mode == LNW_DMA_HW_HS) 629 cfg_lo.cfg_lo = 0x00000; 630 631 /*calculate CFG_HI*/ 632 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 633 /*SW HS only*/ 634 cfg_hi.cfg_hi = 0; 635 } else { 636 cfg_hi.cfg_hi = 0; 637 if (midc->dma->pimr_mask) { 638 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 639 cfg_hi.cfgx.fifo_mode = 1; 640 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { 641 cfg_hi.cfgx.src_per = 0; 642 if (mids->device_instance == 0) 643 cfg_hi.cfgx.dst_per = 3; 644 if (mids->device_instance == 1) 645 cfg_hi.cfgx.dst_per = 1; 646 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { 647 if (mids->device_instance == 0) 648 cfg_hi.cfgx.src_per = 2; 649 if (mids->device_instance == 1) 650 cfg_hi.cfgx.src_per = 0; 651 cfg_hi.cfgx.dst_per = 0; 652 } 653 } else { 654 cfg_hi.cfgx.protctl = 0x1; /*default value*/ 655 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = 656 midc->ch_id - midc->dma->chan_base; 657 } 658 } 659 660 /*calculate CTL_HI*/ 661 ctl_hi.ctlx.reser = 0; 662 ctl_hi.ctlx.done = 0; 663 width = mids->dma_slave.src_addr_width; 664 665 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 666 pr_debug("MDMA:calc len %d for block size %d\n", 667 ctl_hi.ctlx.block_ts, midc->dma->block_size); 668 /*calculate CTL_LO*/ 669 ctl_lo.ctl_lo = 0; 670 ctl_lo.ctlx.int_en = 1; 671 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; 672 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; 673 674 /* 675 * Here we need some translation from "enum dma_slave_buswidth" 676 * to the format for our dma controller 677 * standard intel_mid_dmac's format 678 * 1 Byte 0b000 679 * 2 Bytes 0b001 680 * 4 Bytes 0b010 681 */ 682 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; 683 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; 684 685 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 686 ctl_lo.ctlx.tt_fc = 0; 687 ctl_lo.ctlx.sinc = 0; 688 ctl_lo.ctlx.dinc = 0; 689 } else { 690 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { 691 ctl_lo.ctlx.sinc = 0; 692 ctl_lo.ctlx.dinc = 2; 693 ctl_lo.ctlx.tt_fc = 1; 694 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { 695 ctl_lo.ctlx.sinc = 2; 696 ctl_lo.ctlx.dinc = 0; 697 ctl_lo.ctlx.tt_fc = 2; 698 } 699 } 700 701 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", 702 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); 703 704 enable_dma_interrupt(midc); 705 706 desc = midc_desc_get(midc); 707 if (desc == NULL) 708 goto err_desc_get; 709 desc->sar = src; 710 desc->dar = dest ; 711 desc->len = len; 712 desc->cfg_hi = cfg_hi.cfg_hi; 713 desc->cfg_lo = cfg_lo.cfg_lo; 714 desc->ctl_lo = ctl_lo.ctl_lo; 715 desc->ctl_hi = ctl_hi.ctl_hi; 716 desc->width = width; 717 desc->dirn = mids->dma_slave.direction; 718 desc->lli_phys = 0; 719 desc->lli = NULL; 720 desc->lli_pool = NULL; 721 return &desc->txd; 722 723err_desc_get: 724 pr_err("ERR_MDMA: Failed to get desc\n"); 725 midc_desc_put(midc, desc); 726 return NULL; 727} 728/** 729 * intel_mid_dma_prep_slave_sg - Prep slave sg txn 730 * @chan: chan for DMA transfer 731 * @sgl: scatter gather list 732 * @sg_len: length of sg txn 733 * @direction: DMA transfer dirtn 734 * @flags: DMA flags 735 * 736 * Prepares LLI based periphral transfer 737 */ 738static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 739 struct dma_chan *chan, struct scatterlist *sgl, 740 unsigned int sg_len, enum dma_transfer_direction direction, 741 unsigned long flags) 742{ 743 struct intel_mid_dma_chan *midc = NULL; 744 struct intel_mid_dma_slave *mids = NULL; 745 struct intel_mid_dma_desc *desc = NULL; 746 struct dma_async_tx_descriptor *txd = NULL; 747 union intel_mid_dma_ctl_lo ctl_lo; 748 749 pr_debug("MDMA: Prep for slave SG\n"); 750 751 if (!sg_len) { 752 pr_err("MDMA: Invalid SG length\n"); 753 return NULL; 754 } 755 midc = to_intel_mid_dma_chan(chan); 756 BUG_ON(!midc); 757 758 mids = midc->mid_slave; 759 BUG_ON(!mids); 760 761 if (!midc->dma->pimr_mask) { 762 /* We can still handle sg list with only one item */ 763 if (sg_len == 1) { 764 txd = intel_mid_dma_prep_memcpy(chan, 765 mids->dma_slave.dst_addr, 766 mids->dma_slave.src_addr, 767 sgl->length, 768 flags); 769 return txd; 770 } else { 771 pr_warn("MDMA: SG list is not supported by this controller\n"); 772 return NULL; 773 } 774 } 775 776 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 777 sg_len, direction, flags); 778 779 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); 780 if (NULL == txd) { 781 pr_err("MDMA: Prep memcpy failed\n"); 782 return NULL; 783 } 784 785 desc = to_intel_mid_dma_desc(txd); 786 desc->dirn = direction; 787 ctl_lo.ctl_lo = desc->ctl_lo; 788 ctl_lo.ctlx.llp_dst_en = 1; 789 ctl_lo.ctlx.llp_src_en = 1; 790 desc->ctl_lo = ctl_lo.ctl_lo; 791 desc->lli_length = sg_len; 792 desc->current_lli = 0; 793 /* DMA coherent memory pool for LLI descriptors*/ 794 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", 795 midc->dma->pdev, 796 (sizeof(struct intel_mid_dma_lli)*sg_len), 797 32, 0); 798 if (NULL == desc->lli_pool) { 799 pr_err("MID_DMA:LLI pool create failed\n"); 800 return NULL; 801 } 802 803 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); 804 if (!desc->lli) { 805 pr_err("MID_DMA: LLI alloc failed\n"); 806 pci_pool_destroy(desc->lli_pool); 807 return NULL; 808 } 809 810 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); 811 if (flags & DMA_PREP_INTERRUPT) { 812 iowrite32(UNMASK_INTR_REG(midc->ch_id), 813 midc->dma_base + MASK_BLOCK); 814 pr_debug("MDMA:Enabled Block interrupt\n"); 815 } 816 return &desc->txd; 817} 818 819/** 820 * intel_mid_dma_free_chan_resources - Frees dma resources 821 * @chan: chan requiring attention 822 * 823 * Frees the allocated resources on this DMA chan 824 */ 825static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) 826{ 827 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 828 struct middma_device *mid = to_middma_device(chan->device); 829 struct intel_mid_dma_desc *desc, *_desc; 830 831 if (true == midc->busy) { 832 /*trying to free ch in use!!!!!*/ 833 pr_err("ERR_MDMA: trying to free ch in use\n"); 834 } 835 pm_runtime_put(&mid->pdev->dev); 836 spin_lock_bh(&midc->lock); 837 midc->descs_allocated = 0; 838 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 839 list_del(&desc->desc_node); 840 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 841 } 842 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 843 list_del(&desc->desc_node); 844 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 845 } 846 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { 847 list_del(&desc->desc_node); 848 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 849 } 850 spin_unlock_bh(&midc->lock); 851 midc->in_use = false; 852 midc->busy = false; 853 /* Disable CH interrupts */ 854 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 855 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); 856} 857 858/** 859 * intel_mid_dma_alloc_chan_resources - Allocate dma resources 860 * @chan: chan requiring attention 861 * 862 * Allocates DMA resources on this chan 863 * Return the descriptors allocated 864 */ 865static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) 866{ 867 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 868 struct middma_device *mid = to_middma_device(chan->device); 869 struct intel_mid_dma_desc *desc; 870 dma_addr_t phys; 871 int i = 0; 872 873 pm_runtime_get_sync(&mid->pdev->dev); 874 875 if (mid->state == SUSPENDED) { 876 if (dma_resume(mid->pdev)) { 877 pr_err("ERR_MDMA: resume failed"); 878 return -EFAULT; 879 } 880 } 881 882 /* ASSERT: channel is idle */ 883 if (test_ch_en(mid->dma_base, midc->ch_id)) { 884 /*ch is not idle*/ 885 pr_err("ERR_MDMA: ch not idle\n"); 886 pm_runtime_put(&mid->pdev->dev); 887 return -EIO; 888 } 889 midc->completed = chan->cookie = 1; 890 891 spin_lock_bh(&midc->lock); 892 while (midc->descs_allocated < DESCS_PER_CHANNEL) { 893 spin_unlock_bh(&midc->lock); 894 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); 895 if (!desc) { 896 pr_err("ERR_MDMA: desc failed\n"); 897 pm_runtime_put(&mid->pdev->dev); 898 return -ENOMEM; 899 /*check*/ 900 } 901 dma_async_tx_descriptor_init(&desc->txd, chan); 902 desc->txd.tx_submit = intel_mid_dma_tx_submit; 903 desc->txd.flags = DMA_CTRL_ACK; 904 desc->txd.phys = phys; 905 spin_lock_bh(&midc->lock); 906 i = ++midc->descs_allocated; 907 list_add_tail(&desc->desc_node, &midc->free_list); 908 } 909 spin_unlock_bh(&midc->lock); 910 midc->in_use = true; 911 midc->busy = false; 912 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); 913 return i; 914} 915 916/** 917 * midc_handle_error - Handle DMA txn error 918 * @mid: controller where error occurred 919 * @midc: chan where error occurred 920 * 921 * Scan the descriptor for error 922 */ 923static void midc_handle_error(struct middma_device *mid, 924 struct intel_mid_dma_chan *midc) 925{ 926 midc_scan_descriptors(mid, midc); 927} 928 929/** 930 * dma_tasklet - DMA interrupt tasklet 931 * @data: tasklet arg (the controller structure) 932 * 933 * Scan the controller for interrupts for completion/error 934 * Clear the interrupt and call for handling completion/error 935 */ 936static void dma_tasklet(unsigned long data) 937{ 938 struct middma_device *mid = NULL; 939 struct intel_mid_dma_chan *midc = NULL; 940 u32 status, raw_tfr, raw_block; 941 int i; 942 943 mid = (struct middma_device *)data; 944 if (mid == NULL) { 945 pr_err("ERR_MDMA: tasklet Null param\n"); 946 return; 947 } 948 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); 949 raw_tfr = ioread32(mid->dma_base + RAW_TFR); 950 raw_block = ioread32(mid->dma_base + RAW_BLOCK); 951 status = raw_tfr | raw_block; 952 status &= mid->intr_mask; 953 while (status) { 954 /*txn interrupt*/ 955 i = get_ch_index(&status, mid->chan_base); 956 if (i < 0) { 957 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 958 return; 959 } 960 midc = &mid->ch[i]; 961 if (midc == NULL) { 962 pr_err("ERR_MDMA:Null param midc\n"); 963 return; 964 } 965 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 966 status, midc->ch_id, i); 967 midc->raw_tfr = raw_tfr; 968 midc->raw_block = raw_block; 969 spin_lock_bh(&midc->lock); 970 /*clearing this interrupts first*/ 971 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); 972 if (raw_block) { 973 iowrite32((1 << midc->ch_id), 974 mid->dma_base + CLEAR_BLOCK); 975 } 976 midc_scan_descriptors(mid, midc); 977 pr_debug("MDMA:Scan of desc... complete, unmasking\n"); 978 iowrite32(UNMASK_INTR_REG(midc->ch_id), 979 mid->dma_base + MASK_TFR); 980 if (raw_block) { 981 iowrite32(UNMASK_INTR_REG(midc->ch_id), 982 mid->dma_base + MASK_BLOCK); 983 } 984 spin_unlock_bh(&midc->lock); 985 } 986 987 status = ioread32(mid->dma_base + RAW_ERR); 988 status &= mid->intr_mask; 989 while (status) { 990 /*err interrupt*/ 991 i = get_ch_index(&status, mid->chan_base); 992 if (i < 0) { 993 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 994 return; 995 } 996 midc = &mid->ch[i]; 997 if (midc == NULL) { 998 pr_err("ERR_MDMA:Null param midc\n"); 999 return; 1000 } 1001 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 1002 status, midc->ch_id, i); 1003 1004 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); 1005 spin_lock_bh(&midc->lock); 1006 midc_handle_error(mid, midc); 1007 iowrite32(UNMASK_INTR_REG(midc->ch_id), 1008 mid->dma_base + MASK_ERR); 1009 spin_unlock_bh(&midc->lock); 1010 } 1011 pr_debug("MDMA:Exiting takslet...\n"); 1012 return; 1013} 1014 1015static void dma_tasklet1(unsigned long data) 1016{ 1017 pr_debug("MDMA:in takslet1...\n"); 1018 return dma_tasklet(data); 1019} 1020 1021static void dma_tasklet2(unsigned long data) 1022{ 1023 pr_debug("MDMA:in takslet2...\n"); 1024 return dma_tasklet(data); 1025} 1026 1027/** 1028 * intel_mid_dma_interrupt - DMA ISR 1029 * @irq: IRQ where interrupt occurred 1030 * @data: ISR cllback data (the controller structure) 1031 * 1032 * See if this is our interrupt if so then schedule the tasklet 1033 * otherwise ignore 1034 */ 1035static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) 1036{ 1037 struct middma_device *mid = data; 1038 u32 tfr_status, err_status; 1039 int call_tasklet = 0; 1040 1041 tfr_status = ioread32(mid->dma_base + RAW_TFR); 1042 err_status = ioread32(mid->dma_base + RAW_ERR); 1043 if (!tfr_status && !err_status) 1044 return IRQ_NONE; 1045 1046 /*DMA Interrupt*/ 1047 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); 1048 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); 1049 tfr_status &= mid->intr_mask; 1050 if (tfr_status) { 1051 /*need to disable intr*/ 1052 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); 1053 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); 1054 pr_debug("MDMA: Calling tasklet %x\n", tfr_status); 1055 call_tasklet = 1; 1056 } 1057 err_status &= mid->intr_mask; 1058 if (err_status) { 1059 iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); 1060 call_tasklet = 1; 1061 } 1062 if (call_tasklet) 1063 tasklet_schedule(&mid->tasklet); 1064 1065 return IRQ_HANDLED; 1066} 1067 1068static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) 1069{ 1070 return intel_mid_dma_interrupt(irq, data); 1071} 1072 1073static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) 1074{ 1075 return intel_mid_dma_interrupt(irq, data); 1076} 1077 1078/** 1079 * mid_setup_dma - Setup the DMA controller 1080 * @pdev: Controller PCI device structure 1081 * 1082 * Initialize the DMA controller, channels, registers with DMA engine, 1083 * ISR. Initialize DMA controller channels. 1084 */ 1085static int mid_setup_dma(struct pci_dev *pdev) 1086{ 1087 struct middma_device *dma = pci_get_drvdata(pdev); 1088 int err, i; 1089 1090 /* DMA coherent memory pool for DMA descriptor allocations */ 1091 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, 1092 sizeof(struct intel_mid_dma_desc), 1093 32, 0); 1094 if (NULL == dma->dma_pool) { 1095 pr_err("ERR_MDMA:pci_pool_create failed\n"); 1096 err = -ENOMEM; 1097 goto err_dma_pool; 1098 } 1099 1100 INIT_LIST_HEAD(&dma->common.channels); 1101 dma->pci_id = pdev->device; 1102 if (dma->pimr_mask) { 1103 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, 1104 LNW_PERIPHRAL_MASK_SIZE); 1105 if (dma->mask_reg == NULL) { 1106 pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); 1107 return -ENOMEM; 1108 } 1109 } else 1110 dma->mask_reg = NULL; 1111 1112 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); 1113 /*init CH structures*/ 1114 dma->intr_mask = 0; 1115 dma->state = RUNNING; 1116 for (i = 0; i < dma->max_chan; i++) { 1117 struct intel_mid_dma_chan *midch = &dma->ch[i]; 1118 1119 midch->chan.device = &dma->common; 1120 midch->chan.cookie = 1; 1121 midch->ch_id = dma->chan_base + i; 1122 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 1123 1124 midch->dma_base = dma->dma_base; 1125 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; 1126 midch->dma = dma; 1127 dma->intr_mask |= 1 << (dma->chan_base + i); 1128 spin_lock_init(&midch->lock); 1129 1130 INIT_LIST_HEAD(&midch->active_list); 1131 INIT_LIST_HEAD(&midch->queue); 1132 INIT_LIST_HEAD(&midch->free_list); 1133 /*mask interrupts*/ 1134 iowrite32(MASK_INTR_REG(midch->ch_id), 1135 dma->dma_base + MASK_BLOCK); 1136 iowrite32(MASK_INTR_REG(midch->ch_id), 1137 dma->dma_base + MASK_SRC_TRAN); 1138 iowrite32(MASK_INTR_REG(midch->ch_id), 1139 dma->dma_base + MASK_DST_TRAN); 1140 iowrite32(MASK_INTR_REG(midch->ch_id), 1141 dma->dma_base + MASK_ERR); 1142 iowrite32(MASK_INTR_REG(midch->ch_id), 1143 dma->dma_base + MASK_TFR); 1144 1145 disable_dma_interrupt(midch); 1146 list_add_tail(&midch->chan.device_node, &dma->common.channels); 1147 } 1148 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); 1149 1150 /*init dma structure*/ 1151 dma_cap_zero(dma->common.cap_mask); 1152 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); 1153 dma_cap_set(DMA_SLAVE, dma->common.cap_mask); 1154 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); 1155 dma->common.dev = &pdev->dev; 1156 1157 dma->common.device_alloc_chan_resources = 1158 intel_mid_dma_alloc_chan_resources; 1159 dma->common.device_free_chan_resources = 1160 intel_mid_dma_free_chan_resources; 1161 1162 dma->common.device_tx_status = intel_mid_dma_tx_status; 1163 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; 1164 dma->common.device_issue_pending = intel_mid_dma_issue_pending; 1165 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; 1166 dma->common.device_control = intel_mid_dma_device_control; 1167 1168 /*enable dma cntrl*/ 1169 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); 1170 1171 /*register irq */ 1172 if (dma->pimr_mask) { 1173 pr_debug("MDMA:Requesting irq shared for DMAC1\n"); 1174 err = request_irq(pdev->irq, intel_mid_dma_interrupt1, 1175 IRQF_SHARED, "INTEL_MID_DMAC1", dma); 1176 if (0 != err) 1177 goto err_irq; 1178 } else { 1179 dma->intr_mask = 0x03; 1180 pr_debug("MDMA:Requesting irq for DMAC2\n"); 1181 err = request_irq(pdev->irq, intel_mid_dma_interrupt2, 1182 IRQF_SHARED, "INTEL_MID_DMAC2", dma); 1183 if (0 != err) 1184 goto err_irq; 1185 } 1186 /*register device w/ engine*/ 1187 err = dma_async_device_register(&dma->common); 1188 if (0 != err) { 1189 pr_err("ERR_MDMA:device_register failed: %d\n", err); 1190 goto err_engine; 1191 } 1192 if (dma->pimr_mask) { 1193 pr_debug("setting up tasklet1 for DMAC1\n"); 1194 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); 1195 } else { 1196 pr_debug("setting up tasklet2 for DMAC2\n"); 1197 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); 1198 } 1199 return 0; 1200 1201err_engine: 1202 free_irq(pdev->irq, dma); 1203err_irq: 1204 pci_pool_destroy(dma->dma_pool); 1205err_dma_pool: 1206 pr_err("ERR_MDMA:setup_dma failed: %d\n", err); 1207 return err; 1208 1209} 1210 1211/** 1212 * middma_shutdown - Shutdown the DMA controller 1213 * @pdev: Controller PCI device structure 1214 * 1215 * Called by remove 1216 * Unregister DMa controller, clear all structures and free interrupt 1217 */ 1218static void middma_shutdown(struct pci_dev *pdev) 1219{ 1220 struct middma_device *device = pci_get_drvdata(pdev); 1221 1222 dma_async_device_unregister(&device->common); 1223 pci_pool_destroy(device->dma_pool); 1224 if (device->mask_reg) 1225 iounmap(device->mask_reg); 1226 if (device->dma_base) 1227 iounmap(device->dma_base); 1228 free_irq(pdev->irq, device); 1229 return; 1230} 1231 1232/** 1233 * intel_mid_dma_probe - PCI Probe 1234 * @pdev: Controller PCI device structure 1235 * @id: pci device id structure 1236 * 1237 * Initialize the PCI device, map BARs, query driver data. 1238 * Call setup_dma to complete contoller and chan initilzation 1239 */ 1240static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, 1241 const struct pci_device_id *id) 1242{ 1243 struct middma_device *device; 1244 u32 base_addr, bar_size; 1245 struct intel_mid_dma_probe_info *info; 1246 int err; 1247 1248 pr_debug("MDMA: probe for %x\n", pdev->device); 1249 info = (void *)id->driver_data; 1250 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", 1251 info->max_chan, info->ch_base, 1252 info->block_size, info->pimr_mask); 1253 1254 err = pci_enable_device(pdev); 1255 if (err) 1256 goto err_enable_device; 1257 1258 err = pci_request_regions(pdev, "intel_mid_dmac"); 1259 if (err) 1260 goto err_request_regions; 1261 1262 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1263 if (err) 1264 goto err_set_dma_mask; 1265 1266 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1267 if (err) 1268 goto err_set_dma_mask; 1269 1270 device = kzalloc(sizeof(*device), GFP_KERNEL); 1271 if (!device) { 1272 pr_err("ERR_MDMA:kzalloc failed probe\n"); 1273 err = -ENOMEM; 1274 goto err_kzalloc; 1275 } 1276 device->pdev = pci_dev_get(pdev); 1277 1278 base_addr = pci_resource_start(pdev, 0); 1279 bar_size = pci_resource_len(pdev, 0); 1280 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); 1281 if (!device->dma_base) { 1282 pr_err("ERR_MDMA:ioremap failed\n"); 1283 err = -ENOMEM; 1284 goto err_ioremap; 1285 } 1286 pci_set_drvdata(pdev, device); 1287 pci_set_master(pdev); 1288 device->max_chan = info->max_chan; 1289 device->chan_base = info->ch_base; 1290 device->block_size = info->block_size; 1291 device->pimr_mask = info->pimr_mask; 1292 1293 err = mid_setup_dma(pdev); 1294 if (err) 1295 goto err_dma; 1296 1297 pm_runtime_put_noidle(&pdev->dev); 1298 pm_runtime_allow(&pdev->dev); 1299 return 0; 1300 1301err_dma: 1302 iounmap(device->dma_base); 1303err_ioremap: 1304 pci_dev_put(pdev); 1305 kfree(device); 1306err_kzalloc: 1307err_set_dma_mask: 1308 pci_release_regions(pdev); 1309 pci_disable_device(pdev); 1310err_request_regions: 1311err_enable_device: 1312 pr_err("ERR_MDMA:Probe failed %d\n", err); 1313 return err; 1314} 1315 1316/** 1317 * intel_mid_dma_remove - PCI remove 1318 * @pdev: Controller PCI device structure 1319 * 1320 * Free up all resources and data 1321 * Call shutdown_dma to complete contoller and chan cleanup 1322 */ 1323static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) 1324{ 1325 struct middma_device *device = pci_get_drvdata(pdev); 1326 1327 pm_runtime_get_noresume(&pdev->dev); 1328 pm_runtime_forbid(&pdev->dev); 1329 middma_shutdown(pdev); 1330 pci_dev_put(pdev); 1331 kfree(device); 1332 pci_release_regions(pdev); 1333 pci_disable_device(pdev); 1334} 1335 1336/* Power Management */ 1337/* 1338* dma_suspend - PCI suspend function 1339* 1340* @pci: PCI device structure 1341* @state: PM message 1342* 1343* This function is called by OS when a power event occurs 1344*/ 1345int dma_suspend(struct pci_dev *pci, pm_message_t state) 1346{ 1347 int i; 1348 struct middma_device *device = pci_get_drvdata(pci); 1349 pr_debug("MDMA: dma_suspend called\n"); 1350 1351 for (i = 0; i < device->max_chan; i++) { 1352 if (device->ch[i].in_use) 1353 return -EAGAIN; 1354 } 1355 dmac1_mask_periphral_intr(device); 1356 device->state = SUSPENDED; 1357 pci_save_state(pci); 1358 pci_disable_device(pci); 1359 pci_set_power_state(pci, PCI_D3hot); 1360 return 0; 1361} 1362 1363/** 1364* dma_resume - PCI resume function 1365* 1366* @pci: PCI device structure 1367* 1368* This function is called by OS when a power event occurs 1369*/ 1370int dma_resume(struct pci_dev *pci) 1371{ 1372 int ret; 1373 struct middma_device *device = pci_get_drvdata(pci); 1374 1375 pr_debug("MDMA: dma_resume called\n"); 1376 pci_set_power_state(pci, PCI_D0); 1377 pci_restore_state(pci); 1378 ret = pci_enable_device(pci); 1379 if (ret) { 1380 pr_err("MDMA: device can't be enabled for %x\n", pci->device); 1381 return ret; 1382 } 1383 device->state = RUNNING; 1384 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1385 return 0; 1386} 1387 1388static int dma_runtime_suspend(struct device *dev) 1389{ 1390 struct pci_dev *pci_dev = to_pci_dev(dev); 1391 struct middma_device *device = pci_get_drvdata(pci_dev); 1392 1393 device->state = SUSPENDED; 1394 return 0; 1395} 1396 1397static int dma_runtime_resume(struct device *dev) 1398{ 1399 struct pci_dev *pci_dev = to_pci_dev(dev); 1400 struct middma_device *device = pci_get_drvdata(pci_dev); 1401 1402 device->state = RUNNING; 1403 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1404 return 0; 1405} 1406 1407static int dma_runtime_idle(struct device *dev) 1408{ 1409 struct pci_dev *pdev = to_pci_dev(dev); 1410 struct middma_device *device = pci_get_drvdata(pdev); 1411 int i; 1412 1413 for (i = 0; i < device->max_chan; i++) { 1414 if (device->ch[i].in_use) 1415 return -EAGAIN; 1416 } 1417 1418 return pm_schedule_suspend(dev, 0); 1419} 1420 1421/****************************************************************************** 1422* PCI stuff 1423*/ 1424static struct pci_device_id intel_mid_dma_ids[] = { 1425 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, 1426 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1427 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1428 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, 1429 { 0, } 1430}; 1431MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); 1432 1433static const struct dev_pm_ops intel_mid_dma_pm = { 1434 .runtime_suspend = dma_runtime_suspend, 1435 .runtime_resume = dma_runtime_resume, 1436 .runtime_idle = dma_runtime_idle, 1437}; 1438 1439static struct pci_driver intel_mid_dma_pci_driver = { 1440 .name = "Intel MID DMA", 1441 .id_table = intel_mid_dma_ids, 1442 .probe = intel_mid_dma_probe, 1443 .remove = __devexit_p(intel_mid_dma_remove), 1444#ifdef CONFIG_PM 1445 .suspend = dma_suspend, 1446 .resume = dma_resume, 1447 .driver = { 1448 .pm = &intel_mid_dma_pm, 1449 }, 1450#endif 1451}; 1452 1453static int __init intel_mid_dma_init(void) 1454{ 1455 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", 1456 INTEL_MID_DMA_DRIVER_VERSION); 1457 return pci_register_driver(&intel_mid_dma_pci_driver); 1458} 1459fs_initcall(intel_mid_dma_init); 1460 1461static void __exit intel_mid_dma_exit(void) 1462{ 1463 pci_unregister_driver(&intel_mid_dma_pci_driver); 1464} 1465module_exit(intel_mid_dma_exit); 1466 1467MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); 1468MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); 1469MODULE_LICENSE("GPL v2"); 1470MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION); 1471