intel_mid_dma.c revision d2ebfb335b0426deb1a4fb14e4e926d81ecd8235
1/* 2 * intel_mid_dma.c - Intel Langwell DMA Drivers 3 * 4 * Copyright (C) 2008-10 Intel Corp 5 * Author: Vinod Koul <vinod.koul@intel.com> 6 * The driver design is based on dw_dmac driver 7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, write to the Free Software Foundation, Inc., 20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * 24 * 25 */ 26#include <linux/pci.h> 27#include <linux/interrupt.h> 28#include <linux/pm_runtime.h> 29#include <linux/intel_mid_dma.h> 30#include <linux/module.h> 31 32#include "dmaengine.h" 33 34#define MAX_CHAN 4 /*max ch across controllers*/ 35#include "intel_mid_dma_regs.h" 36 37#define INTEL_MID_DMAC1_ID 0x0814 38#define INTEL_MID_DMAC2_ID 0x0813 39#define INTEL_MID_GP_DMAC2_ID 0x0827 40#define INTEL_MFLD_DMAC1_ID 0x0830 41#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 42#define LNW_PERIPHRAL_MASK_SIZE 0x10 43#define LNW_PERIPHRAL_STATUS 0x0 44#define LNW_PERIPHRAL_MASK 0x8 45 46struct intel_mid_dma_probe_info { 47 u8 max_chan; 48 u8 ch_base; 49 u16 block_size; 50 u32 pimr_mask; 51}; 52 53#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ 54 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ 55 .max_chan = (_max_chan), \ 56 .ch_base = (_ch_base), \ 57 .block_size = (_block_size), \ 58 .pimr_mask = (_pimr_mask), \ 59 }) 60 61/***************************************************************************** 62Utility Functions*/ 63/** 64 * get_ch_index - convert status to channel 65 * @status: status mask 66 * @base: dma ch base value 67 * 68 * Modify the status mask and return the channel index needing 69 * attention (or -1 if neither) 70 */ 71static int get_ch_index(int *status, unsigned int base) 72{ 73 int i; 74 for (i = 0; i < MAX_CHAN; i++) { 75 if (*status & (1 << (i + base))) { 76 *status = *status & ~(1 << (i + base)); 77 pr_debug("MDMA: index %d New status %x\n", i, *status); 78 return i; 79 } 80 } 81 return -1; 82} 83 84/** 85 * get_block_ts - calculates dma transaction length 86 * @len: dma transfer length 87 * @tx_width: dma transfer src width 88 * @block_size: dma controller max block size 89 * 90 * Based on src width calculate the DMA trsaction length in data items 91 * return data items or FFFF if exceeds max length for block 92 */ 93static int get_block_ts(int len, int tx_width, int block_size) 94{ 95 int byte_width = 0, block_ts = 0; 96 97 switch (tx_width) { 98 case DMA_SLAVE_BUSWIDTH_1_BYTE: 99 byte_width = 1; 100 break; 101 case DMA_SLAVE_BUSWIDTH_2_BYTES: 102 byte_width = 2; 103 break; 104 case DMA_SLAVE_BUSWIDTH_4_BYTES: 105 default: 106 byte_width = 4; 107 break; 108 } 109 110 block_ts = len/byte_width; 111 if (block_ts > block_size) 112 block_ts = 0xFFFF; 113 return block_ts; 114} 115 116/***************************************************************************** 117DMAC1 interrupt Functions*/ 118 119/** 120 * dmac1_mask_periphral_intr - mask the periphral interrupt 121 * @mid: dma device for which masking is required 122 * 123 * Masks the DMA periphral interrupt 124 * this is valid for DMAC1 family controllers only 125 * This controller should have periphral mask registers already mapped 126 */ 127static void dmac1_mask_periphral_intr(struct middma_device *mid) 128{ 129 u32 pimr; 130 131 if (mid->pimr_mask) { 132 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 133 pimr |= mid->pimr_mask; 134 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 135 } 136 return; 137} 138 139/** 140 * dmac1_unmask_periphral_intr - unmask the periphral interrupt 141 * @midc: dma channel for which masking is required 142 * 143 * UnMasks the DMA periphral interrupt, 144 * this is valid for DMAC1 family controllers only 145 * This controller should have periphral mask registers already mapped 146 */ 147static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) 148{ 149 u32 pimr; 150 struct middma_device *mid = to_middma_device(midc->chan.device); 151 152 if (mid->pimr_mask) { 153 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 154 pimr &= ~mid->pimr_mask; 155 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 156 } 157 return; 158} 159 160/** 161 * enable_dma_interrupt - enable the periphral interrupt 162 * @midc: dma channel for which enable interrupt is required 163 * 164 * Enable the DMA periphral interrupt, 165 * this is valid for DMAC1 family controllers only 166 * This controller should have periphral mask registers already mapped 167 */ 168static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) 169{ 170 dmac1_unmask_periphral_intr(midc); 171 172 /*en ch interrupts*/ 173 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 174 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 175 return; 176} 177 178/** 179 * disable_dma_interrupt - disable the periphral interrupt 180 * @midc: dma channel for which disable interrupt is required 181 * 182 * Disable the DMA periphral interrupt, 183 * this is valid for DMAC1 family controllers only 184 * This controller should have periphral mask registers already mapped 185 */ 186static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) 187{ 188 /*Check LPE PISR, make sure fwd is disabled*/ 189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); 190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 191 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 192 return; 193} 194 195/***************************************************************************** 196DMA channel helper Functions*/ 197/** 198 * mid_desc_get - get a descriptor 199 * @midc: dma channel for which descriptor is required 200 * 201 * Obtain a descriptor for the channel. Returns NULL if none are free. 202 * Once the descriptor is returned it is private until put on another 203 * list or freed 204 */ 205static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) 206{ 207 struct intel_mid_dma_desc *desc, *_desc; 208 struct intel_mid_dma_desc *ret = NULL; 209 210 spin_lock_bh(&midc->lock); 211 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 212 if (async_tx_test_ack(&desc->txd)) { 213 list_del(&desc->desc_node); 214 ret = desc; 215 break; 216 } 217 } 218 spin_unlock_bh(&midc->lock); 219 return ret; 220} 221 222/** 223 * mid_desc_put - put a descriptor 224 * @midc: dma channel for which descriptor is required 225 * @desc: descriptor to put 226 * 227 * Return a descriptor from lwn_desc_get back to the free pool 228 */ 229static void midc_desc_put(struct intel_mid_dma_chan *midc, 230 struct intel_mid_dma_desc *desc) 231{ 232 if (desc) { 233 spin_lock_bh(&midc->lock); 234 list_add_tail(&desc->desc_node, &midc->free_list); 235 spin_unlock_bh(&midc->lock); 236 } 237} 238/** 239 * midc_dostart - begin a DMA transaction 240 * @midc: channel for which txn is to be started 241 * @first: first descriptor of series 242 * 243 * Load a transaction into the engine. This must be called with midc->lock 244 * held and bh disabled. 245 */ 246static void midc_dostart(struct intel_mid_dma_chan *midc, 247 struct intel_mid_dma_desc *first) 248{ 249 struct middma_device *mid = to_middma_device(midc->chan.device); 250 251 /* channel is idle */ 252 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { 253 /*error*/ 254 pr_err("ERR_MDMA: channel is busy in start\n"); 255 /* The tasklet will hopefully advance the queue... */ 256 return; 257 } 258 midc->busy = true; 259 /*write registers and en*/ 260 iowrite32(first->sar, midc->ch_regs + SAR); 261 iowrite32(first->dar, midc->ch_regs + DAR); 262 iowrite32(first->lli_phys, midc->ch_regs + LLP); 263 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); 264 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); 265 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); 266 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); 267 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", 268 (int)first->sar, (int)first->dar, first->cfg_hi, 269 first->cfg_lo, first->ctl_hi, first->ctl_lo); 270 first->status = DMA_IN_PROGRESS; 271 272 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 273} 274 275/** 276 * midc_descriptor_complete - process completed descriptor 277 * @midc: channel owning the descriptor 278 * @desc: the descriptor itself 279 * 280 * Process a completed descriptor and perform any callbacks upon 281 * the completion. The completion handling drops the lock during the 282 * callbacks but must be called with the lock held. 283 */ 284static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, 285 struct intel_mid_dma_desc *desc) 286 __releases(&midc->lock) __acquires(&midc->lock) 287{ 288 struct dma_async_tx_descriptor *txd = &desc->txd; 289 dma_async_tx_callback callback_txd = NULL; 290 struct intel_mid_dma_lli *llitem; 291 void *param_txd = NULL; 292 293 midc->chan.completed_cookie = txd->cookie; 294 callback_txd = txd->callback; 295 param_txd = txd->callback_param; 296 297 if (desc->lli != NULL) { 298 /*clear the DONE bit of completed LLI in memory*/ 299 llitem = desc->lli + desc->current_lli; 300 llitem->ctl_hi &= CLEAR_DONE; 301 if (desc->current_lli < desc->lli_length-1) 302 (desc->current_lli)++; 303 else 304 desc->current_lli = 0; 305 } 306 spin_unlock_bh(&midc->lock); 307 if (callback_txd) { 308 pr_debug("MDMA: TXD callback set ... calling\n"); 309 callback_txd(param_txd); 310 } 311 if (midc->raw_tfr) { 312 desc->status = DMA_SUCCESS; 313 if (desc->lli != NULL) { 314 pci_pool_free(desc->lli_pool, desc->lli, 315 desc->lli_phys); 316 pci_pool_destroy(desc->lli_pool); 317 desc->lli = NULL; 318 } 319 list_move(&desc->desc_node, &midc->free_list); 320 midc->busy = false; 321 } 322 spin_lock_bh(&midc->lock); 323 324} 325/** 326 * midc_scan_descriptors - check the descriptors in channel 327 * mark completed when tx is completete 328 * @mid: device 329 * @midc: channel to scan 330 * 331 * Walk the descriptor chain for the device and process any entries 332 * that are complete. 333 */ 334static void midc_scan_descriptors(struct middma_device *mid, 335 struct intel_mid_dma_chan *midc) 336{ 337 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; 338 339 /*tx is complete*/ 340 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 341 if (desc->status == DMA_IN_PROGRESS) 342 midc_descriptor_complete(midc, desc); 343 } 344 return; 345 } 346/** 347 * midc_lli_fill_sg - Helper function to convert 348 * SG list to Linked List Items. 349 *@midc: Channel 350 *@desc: DMA descriptor 351 *@sglist: Pointer to SG list 352 *@sglen: SG list length 353 *@flags: DMA transaction flags 354 * 355 * Walk through the SG list and convert the SG list into Linked 356 * List Items (LLI). 357 */ 358static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, 359 struct intel_mid_dma_desc *desc, 360 struct scatterlist *sglist, 361 unsigned int sglen, 362 unsigned int flags) 363{ 364 struct intel_mid_dma_slave *mids; 365 struct scatterlist *sg; 366 dma_addr_t lli_next, sg_phy_addr; 367 struct intel_mid_dma_lli *lli_bloc_desc; 368 union intel_mid_dma_ctl_lo ctl_lo; 369 union intel_mid_dma_ctl_hi ctl_hi; 370 int i; 371 372 pr_debug("MDMA: Entered midc_lli_fill_sg\n"); 373 mids = midc->mid_slave; 374 375 lli_bloc_desc = desc->lli; 376 lli_next = desc->lli_phys; 377 378 ctl_lo.ctl_lo = desc->ctl_lo; 379 ctl_hi.ctl_hi = desc->ctl_hi; 380 for_each_sg(sglist, sg, sglen, i) { 381 /*Populate CTL_LOW and LLI values*/ 382 if (i != sglen - 1) { 383 lli_next = lli_next + 384 sizeof(struct intel_mid_dma_lli); 385 } else { 386 /*Check for circular list, otherwise terminate LLI to ZERO*/ 387 if (flags & DMA_PREP_CIRCULAR_LIST) { 388 pr_debug("MDMA: LLI is configured in circular mode\n"); 389 lli_next = desc->lli_phys; 390 } else { 391 lli_next = 0; 392 ctl_lo.ctlx.llp_dst_en = 0; 393 ctl_lo.ctlx.llp_src_en = 0; 394 } 395 } 396 /*Populate CTL_HI values*/ 397 ctl_hi.ctlx.block_ts = get_block_ts(sg->length, 398 desc->width, 399 midc->dma->block_size); 400 /*Populate SAR and DAR values*/ 401 sg_phy_addr = sg_phys(sg); 402 if (desc->dirn == DMA_MEM_TO_DEV) { 403 lli_bloc_desc->sar = sg_phy_addr; 404 lli_bloc_desc->dar = mids->dma_slave.dst_addr; 405 } else if (desc->dirn == DMA_DEV_TO_MEM) { 406 lli_bloc_desc->sar = mids->dma_slave.src_addr; 407 lli_bloc_desc->dar = sg_phy_addr; 408 } 409 /*Copy values into block descriptor in system memroy*/ 410 lli_bloc_desc->llp = lli_next; 411 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; 412 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; 413 414 lli_bloc_desc++; 415 } 416 /*Copy very first LLI values to descriptor*/ 417 desc->ctl_lo = desc->lli->ctl_lo; 418 desc->ctl_hi = desc->lli->ctl_hi; 419 desc->sar = desc->lli->sar; 420 desc->dar = desc->lli->dar; 421 422 return 0; 423} 424/***************************************************************************** 425DMA engine callback Functions*/ 426/** 427 * intel_mid_dma_tx_submit - callback to submit DMA transaction 428 * @tx: dma engine descriptor 429 * 430 * Submit the DMA trasaction for this descriptor, start if ch idle 431 */ 432static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) 433{ 434 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); 435 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); 436 dma_cookie_t cookie; 437 438 spin_lock_bh(&midc->lock); 439 cookie = midc->chan.cookie; 440 441 if (++cookie < 0) 442 cookie = 1; 443 444 midc->chan.cookie = cookie; 445 desc->txd.cookie = cookie; 446 447 448 if (list_empty(&midc->active_list)) 449 list_add_tail(&desc->desc_node, &midc->active_list); 450 else 451 list_add_tail(&desc->desc_node, &midc->queue); 452 453 midc_dostart(midc, desc); 454 spin_unlock_bh(&midc->lock); 455 456 return cookie; 457} 458 459/** 460 * intel_mid_dma_issue_pending - callback to issue pending txn 461 * @chan: chan where pending trascation needs to be checked and submitted 462 * 463 * Call for scan to issue pending descriptors 464 */ 465static void intel_mid_dma_issue_pending(struct dma_chan *chan) 466{ 467 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 468 469 spin_lock_bh(&midc->lock); 470 if (!list_empty(&midc->queue)) 471 midc_scan_descriptors(to_middma_device(chan->device), midc); 472 spin_unlock_bh(&midc->lock); 473} 474 475/** 476 * intel_mid_dma_tx_status - Return status of txn 477 * @chan: chan for where status needs to be checked 478 * @cookie: cookie for txn 479 * @txstate: DMA txn state 480 * 481 * Return status of DMA txn 482 */ 483static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, 484 dma_cookie_t cookie, 485 struct dma_tx_state *txstate) 486{ 487 dma_cookie_t last_used; 488 dma_cookie_t last_complete; 489 int ret; 490 491 last_complete = chan->completed_cookie; 492 last_used = chan->cookie; 493 494 ret = dma_async_is_complete(cookie, last_complete, last_used); 495 if (ret != DMA_SUCCESS) { 496 spin_lock_bh(&midc->lock); 497 midc_scan_descriptors(to_middma_device(chan->device), midc); 498 spin_unlock_bh(&midc->lock); 499 500 last_complete = chan->completed_cookie; 501 last_used = chan->cookie; 502 503 ret = dma_async_is_complete(cookie, last_complete, last_used); 504 } 505 506 if (txstate) { 507 txstate->last = last_complete; 508 txstate->used = last_used; 509 txstate->residue = 0; 510 } 511 return ret; 512} 513 514static int dma_slave_control(struct dma_chan *chan, unsigned long arg) 515{ 516 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 517 struct dma_slave_config *slave = (struct dma_slave_config *)arg; 518 struct intel_mid_dma_slave *mid_slave; 519 520 BUG_ON(!midc); 521 BUG_ON(!slave); 522 pr_debug("MDMA: slave control called\n"); 523 524 mid_slave = to_intel_mid_dma_slave(slave); 525 526 BUG_ON(!mid_slave); 527 528 midc->mid_slave = mid_slave; 529 return 0; 530} 531/** 532 * intel_mid_dma_device_control - DMA device control 533 * @chan: chan for DMA control 534 * @cmd: control cmd 535 * @arg: cmd arg value 536 * 537 * Perform DMA control command 538 */ 539static int intel_mid_dma_device_control(struct dma_chan *chan, 540 enum dma_ctrl_cmd cmd, unsigned long arg) 541{ 542 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 543 struct middma_device *mid = to_middma_device(chan->device); 544 struct intel_mid_dma_desc *desc, *_desc; 545 union intel_mid_dma_cfg_lo cfg_lo; 546 547 if (cmd == DMA_SLAVE_CONFIG) 548 return dma_slave_control(chan, arg); 549 550 if (cmd != DMA_TERMINATE_ALL) 551 return -ENXIO; 552 553 spin_lock_bh(&midc->lock); 554 if (midc->busy == false) { 555 spin_unlock_bh(&midc->lock); 556 return 0; 557 } 558 /*Suspend and disable the channel*/ 559 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); 560 cfg_lo.cfgx.ch_susp = 1; 561 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); 562 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 563 midc->busy = false; 564 /* Disable interrupts */ 565 disable_dma_interrupt(midc); 566 midc->descs_allocated = 0; 567 568 spin_unlock_bh(&midc->lock); 569 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 570 if (desc->lli != NULL) { 571 pci_pool_free(desc->lli_pool, desc->lli, 572 desc->lli_phys); 573 pci_pool_destroy(desc->lli_pool); 574 desc->lli = NULL; 575 } 576 list_move(&desc->desc_node, &midc->free_list); 577 } 578 return 0; 579} 580 581 582/** 583 * intel_mid_dma_prep_memcpy - Prep memcpy txn 584 * @chan: chan for DMA transfer 585 * @dest: destn address 586 * @src: src address 587 * @len: DMA transfer len 588 * @flags: DMA flags 589 * 590 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only 591 * The periphral txn details should be filled in slave structure properly 592 * Returns the descriptor for this txn 593 */ 594static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( 595 struct dma_chan *chan, dma_addr_t dest, 596 dma_addr_t src, size_t len, unsigned long flags) 597{ 598 struct intel_mid_dma_chan *midc; 599 struct intel_mid_dma_desc *desc = NULL; 600 struct intel_mid_dma_slave *mids; 601 union intel_mid_dma_ctl_lo ctl_lo; 602 union intel_mid_dma_ctl_hi ctl_hi; 603 union intel_mid_dma_cfg_lo cfg_lo; 604 union intel_mid_dma_cfg_hi cfg_hi; 605 enum dma_slave_buswidth width; 606 607 pr_debug("MDMA: Prep for memcpy\n"); 608 BUG_ON(!chan); 609 if (!len) 610 return NULL; 611 612 midc = to_intel_mid_dma_chan(chan); 613 BUG_ON(!midc); 614 615 mids = midc->mid_slave; 616 BUG_ON(!mids); 617 618 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 619 midc->dma->pci_id, midc->ch_id, len); 620 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 621 mids->cfg_mode, mids->dma_slave.direction, 622 mids->hs_mode, mids->dma_slave.src_addr_width); 623 624 /*calculate CFG_LO*/ 625 if (mids->hs_mode == LNW_DMA_SW_HS) { 626 cfg_lo.cfg_lo = 0; 627 cfg_lo.cfgx.hs_sel_dst = 1; 628 cfg_lo.cfgx.hs_sel_src = 1; 629 } else if (mids->hs_mode == LNW_DMA_HW_HS) 630 cfg_lo.cfg_lo = 0x00000; 631 632 /*calculate CFG_HI*/ 633 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 634 /*SW HS only*/ 635 cfg_hi.cfg_hi = 0; 636 } else { 637 cfg_hi.cfg_hi = 0; 638 if (midc->dma->pimr_mask) { 639 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 640 cfg_hi.cfgx.fifo_mode = 1; 641 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { 642 cfg_hi.cfgx.src_per = 0; 643 if (mids->device_instance == 0) 644 cfg_hi.cfgx.dst_per = 3; 645 if (mids->device_instance == 1) 646 cfg_hi.cfgx.dst_per = 1; 647 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { 648 if (mids->device_instance == 0) 649 cfg_hi.cfgx.src_per = 2; 650 if (mids->device_instance == 1) 651 cfg_hi.cfgx.src_per = 0; 652 cfg_hi.cfgx.dst_per = 0; 653 } 654 } else { 655 cfg_hi.cfgx.protctl = 0x1; /*default value*/ 656 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = 657 midc->ch_id - midc->dma->chan_base; 658 } 659 } 660 661 /*calculate CTL_HI*/ 662 ctl_hi.ctlx.reser = 0; 663 ctl_hi.ctlx.done = 0; 664 width = mids->dma_slave.src_addr_width; 665 666 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 667 pr_debug("MDMA:calc len %d for block size %d\n", 668 ctl_hi.ctlx.block_ts, midc->dma->block_size); 669 /*calculate CTL_LO*/ 670 ctl_lo.ctl_lo = 0; 671 ctl_lo.ctlx.int_en = 1; 672 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; 673 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; 674 675 /* 676 * Here we need some translation from "enum dma_slave_buswidth" 677 * to the format for our dma controller 678 * standard intel_mid_dmac's format 679 * 1 Byte 0b000 680 * 2 Bytes 0b001 681 * 4 Bytes 0b010 682 */ 683 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; 684 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; 685 686 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 687 ctl_lo.ctlx.tt_fc = 0; 688 ctl_lo.ctlx.sinc = 0; 689 ctl_lo.ctlx.dinc = 0; 690 } else { 691 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { 692 ctl_lo.ctlx.sinc = 0; 693 ctl_lo.ctlx.dinc = 2; 694 ctl_lo.ctlx.tt_fc = 1; 695 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { 696 ctl_lo.ctlx.sinc = 2; 697 ctl_lo.ctlx.dinc = 0; 698 ctl_lo.ctlx.tt_fc = 2; 699 } 700 } 701 702 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", 703 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); 704 705 enable_dma_interrupt(midc); 706 707 desc = midc_desc_get(midc); 708 if (desc == NULL) 709 goto err_desc_get; 710 desc->sar = src; 711 desc->dar = dest ; 712 desc->len = len; 713 desc->cfg_hi = cfg_hi.cfg_hi; 714 desc->cfg_lo = cfg_lo.cfg_lo; 715 desc->ctl_lo = ctl_lo.ctl_lo; 716 desc->ctl_hi = ctl_hi.ctl_hi; 717 desc->width = width; 718 desc->dirn = mids->dma_slave.direction; 719 desc->lli_phys = 0; 720 desc->lli = NULL; 721 desc->lli_pool = NULL; 722 return &desc->txd; 723 724err_desc_get: 725 pr_err("ERR_MDMA: Failed to get desc\n"); 726 midc_desc_put(midc, desc); 727 return NULL; 728} 729/** 730 * intel_mid_dma_prep_slave_sg - Prep slave sg txn 731 * @chan: chan for DMA transfer 732 * @sgl: scatter gather list 733 * @sg_len: length of sg txn 734 * @direction: DMA transfer dirtn 735 * @flags: DMA flags 736 * 737 * Prepares LLI based periphral transfer 738 */ 739static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 740 struct dma_chan *chan, struct scatterlist *sgl, 741 unsigned int sg_len, enum dma_transfer_direction direction, 742 unsigned long flags) 743{ 744 struct intel_mid_dma_chan *midc = NULL; 745 struct intel_mid_dma_slave *mids = NULL; 746 struct intel_mid_dma_desc *desc = NULL; 747 struct dma_async_tx_descriptor *txd = NULL; 748 union intel_mid_dma_ctl_lo ctl_lo; 749 750 pr_debug("MDMA: Prep for slave SG\n"); 751 752 if (!sg_len) { 753 pr_err("MDMA: Invalid SG length\n"); 754 return NULL; 755 } 756 midc = to_intel_mid_dma_chan(chan); 757 BUG_ON(!midc); 758 759 mids = midc->mid_slave; 760 BUG_ON(!mids); 761 762 if (!midc->dma->pimr_mask) { 763 /* We can still handle sg list with only one item */ 764 if (sg_len == 1) { 765 txd = intel_mid_dma_prep_memcpy(chan, 766 mids->dma_slave.dst_addr, 767 mids->dma_slave.src_addr, 768 sgl->length, 769 flags); 770 return txd; 771 } else { 772 pr_warn("MDMA: SG list is not supported by this controller\n"); 773 return NULL; 774 } 775 } 776 777 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 778 sg_len, direction, flags); 779 780 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); 781 if (NULL == txd) { 782 pr_err("MDMA: Prep memcpy failed\n"); 783 return NULL; 784 } 785 786 desc = to_intel_mid_dma_desc(txd); 787 desc->dirn = direction; 788 ctl_lo.ctl_lo = desc->ctl_lo; 789 ctl_lo.ctlx.llp_dst_en = 1; 790 ctl_lo.ctlx.llp_src_en = 1; 791 desc->ctl_lo = ctl_lo.ctl_lo; 792 desc->lli_length = sg_len; 793 desc->current_lli = 0; 794 /* DMA coherent memory pool for LLI descriptors*/ 795 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", 796 midc->dma->pdev, 797 (sizeof(struct intel_mid_dma_lli)*sg_len), 798 32, 0); 799 if (NULL == desc->lli_pool) { 800 pr_err("MID_DMA:LLI pool create failed\n"); 801 return NULL; 802 } 803 804 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); 805 if (!desc->lli) { 806 pr_err("MID_DMA: LLI alloc failed\n"); 807 pci_pool_destroy(desc->lli_pool); 808 return NULL; 809 } 810 811 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); 812 if (flags & DMA_PREP_INTERRUPT) { 813 iowrite32(UNMASK_INTR_REG(midc->ch_id), 814 midc->dma_base + MASK_BLOCK); 815 pr_debug("MDMA:Enabled Block interrupt\n"); 816 } 817 return &desc->txd; 818} 819 820/** 821 * intel_mid_dma_free_chan_resources - Frees dma resources 822 * @chan: chan requiring attention 823 * 824 * Frees the allocated resources on this DMA chan 825 */ 826static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) 827{ 828 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 829 struct middma_device *mid = to_middma_device(chan->device); 830 struct intel_mid_dma_desc *desc, *_desc; 831 832 if (true == midc->busy) { 833 /*trying to free ch in use!!!!!*/ 834 pr_err("ERR_MDMA: trying to free ch in use\n"); 835 } 836 spin_lock_bh(&midc->lock); 837 midc->descs_allocated = 0; 838 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 839 list_del(&desc->desc_node); 840 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 841 } 842 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 843 list_del(&desc->desc_node); 844 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 845 } 846 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { 847 list_del(&desc->desc_node); 848 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 849 } 850 spin_unlock_bh(&midc->lock); 851 midc->in_use = false; 852 midc->busy = false; 853 /* Disable CH interrupts */ 854 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 855 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); 856 pm_runtime_put(&mid->pdev->dev); 857} 858 859/** 860 * intel_mid_dma_alloc_chan_resources - Allocate dma resources 861 * @chan: chan requiring attention 862 * 863 * Allocates DMA resources on this chan 864 * Return the descriptors allocated 865 */ 866static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) 867{ 868 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 869 struct middma_device *mid = to_middma_device(chan->device); 870 struct intel_mid_dma_desc *desc; 871 dma_addr_t phys; 872 int i = 0; 873 874 pm_runtime_get_sync(&mid->pdev->dev); 875 876 if (mid->state == SUSPENDED) { 877 if (dma_resume(&mid->pdev->dev)) { 878 pr_err("ERR_MDMA: resume failed"); 879 return -EFAULT; 880 } 881 } 882 883 /* ASSERT: channel is idle */ 884 if (test_ch_en(mid->dma_base, midc->ch_id)) { 885 /*ch is not idle*/ 886 pr_err("ERR_MDMA: ch not idle\n"); 887 pm_runtime_put(&mid->pdev->dev); 888 return -EIO; 889 } 890 chan->completed_cookie = chan->cookie = 1; 891 892 spin_lock_bh(&midc->lock); 893 while (midc->descs_allocated < DESCS_PER_CHANNEL) { 894 spin_unlock_bh(&midc->lock); 895 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); 896 if (!desc) { 897 pr_err("ERR_MDMA: desc failed\n"); 898 pm_runtime_put(&mid->pdev->dev); 899 return -ENOMEM; 900 /*check*/ 901 } 902 dma_async_tx_descriptor_init(&desc->txd, chan); 903 desc->txd.tx_submit = intel_mid_dma_tx_submit; 904 desc->txd.flags = DMA_CTRL_ACK; 905 desc->txd.phys = phys; 906 spin_lock_bh(&midc->lock); 907 i = ++midc->descs_allocated; 908 list_add_tail(&desc->desc_node, &midc->free_list); 909 } 910 spin_unlock_bh(&midc->lock); 911 midc->in_use = true; 912 midc->busy = false; 913 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); 914 return i; 915} 916 917/** 918 * midc_handle_error - Handle DMA txn error 919 * @mid: controller where error occurred 920 * @midc: chan where error occurred 921 * 922 * Scan the descriptor for error 923 */ 924static void midc_handle_error(struct middma_device *mid, 925 struct intel_mid_dma_chan *midc) 926{ 927 midc_scan_descriptors(mid, midc); 928} 929 930/** 931 * dma_tasklet - DMA interrupt tasklet 932 * @data: tasklet arg (the controller structure) 933 * 934 * Scan the controller for interrupts for completion/error 935 * Clear the interrupt and call for handling completion/error 936 */ 937static void dma_tasklet(unsigned long data) 938{ 939 struct middma_device *mid = NULL; 940 struct intel_mid_dma_chan *midc = NULL; 941 u32 status, raw_tfr, raw_block; 942 int i; 943 944 mid = (struct middma_device *)data; 945 if (mid == NULL) { 946 pr_err("ERR_MDMA: tasklet Null param\n"); 947 return; 948 } 949 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); 950 raw_tfr = ioread32(mid->dma_base + RAW_TFR); 951 raw_block = ioread32(mid->dma_base + RAW_BLOCK); 952 status = raw_tfr | raw_block; 953 status &= mid->intr_mask; 954 while (status) { 955 /*txn interrupt*/ 956 i = get_ch_index(&status, mid->chan_base); 957 if (i < 0) { 958 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 959 return; 960 } 961 midc = &mid->ch[i]; 962 if (midc == NULL) { 963 pr_err("ERR_MDMA:Null param midc\n"); 964 return; 965 } 966 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 967 status, midc->ch_id, i); 968 midc->raw_tfr = raw_tfr; 969 midc->raw_block = raw_block; 970 spin_lock_bh(&midc->lock); 971 /*clearing this interrupts first*/ 972 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); 973 if (raw_block) { 974 iowrite32((1 << midc->ch_id), 975 mid->dma_base + CLEAR_BLOCK); 976 } 977 midc_scan_descriptors(mid, midc); 978 pr_debug("MDMA:Scan of desc... complete, unmasking\n"); 979 iowrite32(UNMASK_INTR_REG(midc->ch_id), 980 mid->dma_base + MASK_TFR); 981 if (raw_block) { 982 iowrite32(UNMASK_INTR_REG(midc->ch_id), 983 mid->dma_base + MASK_BLOCK); 984 } 985 spin_unlock_bh(&midc->lock); 986 } 987 988 status = ioread32(mid->dma_base + RAW_ERR); 989 status &= mid->intr_mask; 990 while (status) { 991 /*err interrupt*/ 992 i = get_ch_index(&status, mid->chan_base); 993 if (i < 0) { 994 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 995 return; 996 } 997 midc = &mid->ch[i]; 998 if (midc == NULL) { 999 pr_err("ERR_MDMA:Null param midc\n"); 1000 return; 1001 } 1002 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 1003 status, midc->ch_id, i); 1004 1005 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); 1006 spin_lock_bh(&midc->lock); 1007 midc_handle_error(mid, midc); 1008 iowrite32(UNMASK_INTR_REG(midc->ch_id), 1009 mid->dma_base + MASK_ERR); 1010 spin_unlock_bh(&midc->lock); 1011 } 1012 pr_debug("MDMA:Exiting takslet...\n"); 1013 return; 1014} 1015 1016static void dma_tasklet1(unsigned long data) 1017{ 1018 pr_debug("MDMA:in takslet1...\n"); 1019 return dma_tasklet(data); 1020} 1021 1022static void dma_tasklet2(unsigned long data) 1023{ 1024 pr_debug("MDMA:in takslet2...\n"); 1025 return dma_tasklet(data); 1026} 1027 1028/** 1029 * intel_mid_dma_interrupt - DMA ISR 1030 * @irq: IRQ where interrupt occurred 1031 * @data: ISR cllback data (the controller structure) 1032 * 1033 * See if this is our interrupt if so then schedule the tasklet 1034 * otherwise ignore 1035 */ 1036static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) 1037{ 1038 struct middma_device *mid = data; 1039 u32 tfr_status, err_status; 1040 int call_tasklet = 0; 1041 1042 tfr_status = ioread32(mid->dma_base + RAW_TFR); 1043 err_status = ioread32(mid->dma_base + RAW_ERR); 1044 if (!tfr_status && !err_status) 1045 return IRQ_NONE; 1046 1047 /*DMA Interrupt*/ 1048 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); 1049 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); 1050 tfr_status &= mid->intr_mask; 1051 if (tfr_status) { 1052 /*need to disable intr*/ 1053 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); 1054 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); 1055 pr_debug("MDMA: Calling tasklet %x\n", tfr_status); 1056 call_tasklet = 1; 1057 } 1058 err_status &= mid->intr_mask; 1059 if (err_status) { 1060 iowrite32((err_status << INT_MASK_WE), 1061 mid->dma_base + MASK_ERR); 1062 call_tasklet = 1; 1063 } 1064 if (call_tasklet) 1065 tasklet_schedule(&mid->tasklet); 1066 1067 return IRQ_HANDLED; 1068} 1069 1070static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) 1071{ 1072 return intel_mid_dma_interrupt(irq, data); 1073} 1074 1075static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) 1076{ 1077 return intel_mid_dma_interrupt(irq, data); 1078} 1079 1080/** 1081 * mid_setup_dma - Setup the DMA controller 1082 * @pdev: Controller PCI device structure 1083 * 1084 * Initialize the DMA controller, channels, registers with DMA engine, 1085 * ISR. Initialize DMA controller channels. 1086 */ 1087static int mid_setup_dma(struct pci_dev *pdev) 1088{ 1089 struct middma_device *dma = pci_get_drvdata(pdev); 1090 int err, i; 1091 1092 /* DMA coherent memory pool for DMA descriptor allocations */ 1093 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, 1094 sizeof(struct intel_mid_dma_desc), 1095 32, 0); 1096 if (NULL == dma->dma_pool) { 1097 pr_err("ERR_MDMA:pci_pool_create failed\n"); 1098 err = -ENOMEM; 1099 goto err_dma_pool; 1100 } 1101 1102 INIT_LIST_HEAD(&dma->common.channels); 1103 dma->pci_id = pdev->device; 1104 if (dma->pimr_mask) { 1105 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, 1106 LNW_PERIPHRAL_MASK_SIZE); 1107 if (dma->mask_reg == NULL) { 1108 pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); 1109 err = -ENOMEM; 1110 goto err_ioremap; 1111 } 1112 } else 1113 dma->mask_reg = NULL; 1114 1115 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); 1116 /*init CH structures*/ 1117 dma->intr_mask = 0; 1118 dma->state = RUNNING; 1119 for (i = 0; i < dma->max_chan; i++) { 1120 struct intel_mid_dma_chan *midch = &dma->ch[i]; 1121 1122 midch->chan.device = &dma->common; 1123 midch->chan.cookie = 1; 1124 midch->ch_id = dma->chan_base + i; 1125 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 1126 1127 midch->dma_base = dma->dma_base; 1128 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; 1129 midch->dma = dma; 1130 dma->intr_mask |= 1 << (dma->chan_base + i); 1131 spin_lock_init(&midch->lock); 1132 1133 INIT_LIST_HEAD(&midch->active_list); 1134 INIT_LIST_HEAD(&midch->queue); 1135 INIT_LIST_HEAD(&midch->free_list); 1136 /*mask interrupts*/ 1137 iowrite32(MASK_INTR_REG(midch->ch_id), 1138 dma->dma_base + MASK_BLOCK); 1139 iowrite32(MASK_INTR_REG(midch->ch_id), 1140 dma->dma_base + MASK_SRC_TRAN); 1141 iowrite32(MASK_INTR_REG(midch->ch_id), 1142 dma->dma_base + MASK_DST_TRAN); 1143 iowrite32(MASK_INTR_REG(midch->ch_id), 1144 dma->dma_base + MASK_ERR); 1145 iowrite32(MASK_INTR_REG(midch->ch_id), 1146 dma->dma_base + MASK_TFR); 1147 1148 disable_dma_interrupt(midch); 1149 list_add_tail(&midch->chan.device_node, &dma->common.channels); 1150 } 1151 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); 1152 1153 /*init dma structure*/ 1154 dma_cap_zero(dma->common.cap_mask); 1155 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); 1156 dma_cap_set(DMA_SLAVE, dma->common.cap_mask); 1157 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); 1158 dma->common.dev = &pdev->dev; 1159 1160 dma->common.device_alloc_chan_resources = 1161 intel_mid_dma_alloc_chan_resources; 1162 dma->common.device_free_chan_resources = 1163 intel_mid_dma_free_chan_resources; 1164 1165 dma->common.device_tx_status = intel_mid_dma_tx_status; 1166 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; 1167 dma->common.device_issue_pending = intel_mid_dma_issue_pending; 1168 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; 1169 dma->common.device_control = intel_mid_dma_device_control; 1170 1171 /*enable dma cntrl*/ 1172 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); 1173 1174 /*register irq */ 1175 if (dma->pimr_mask) { 1176 pr_debug("MDMA:Requesting irq shared for DMAC1\n"); 1177 err = request_irq(pdev->irq, intel_mid_dma_interrupt1, 1178 IRQF_SHARED, "INTEL_MID_DMAC1", dma); 1179 if (0 != err) 1180 goto err_irq; 1181 } else { 1182 dma->intr_mask = 0x03; 1183 pr_debug("MDMA:Requesting irq for DMAC2\n"); 1184 err = request_irq(pdev->irq, intel_mid_dma_interrupt2, 1185 IRQF_SHARED, "INTEL_MID_DMAC2", dma); 1186 if (0 != err) 1187 goto err_irq; 1188 } 1189 /*register device w/ engine*/ 1190 err = dma_async_device_register(&dma->common); 1191 if (0 != err) { 1192 pr_err("ERR_MDMA:device_register failed: %d\n", err); 1193 goto err_engine; 1194 } 1195 if (dma->pimr_mask) { 1196 pr_debug("setting up tasklet1 for DMAC1\n"); 1197 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); 1198 } else { 1199 pr_debug("setting up tasklet2 for DMAC2\n"); 1200 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); 1201 } 1202 return 0; 1203 1204err_engine: 1205 free_irq(pdev->irq, dma); 1206err_irq: 1207 if (dma->mask_reg) 1208 iounmap(dma->mask_reg); 1209err_ioremap: 1210 pci_pool_destroy(dma->dma_pool); 1211err_dma_pool: 1212 pr_err("ERR_MDMA:setup_dma failed: %d\n", err); 1213 return err; 1214 1215} 1216 1217/** 1218 * middma_shutdown - Shutdown the DMA controller 1219 * @pdev: Controller PCI device structure 1220 * 1221 * Called by remove 1222 * Unregister DMa controller, clear all structures and free interrupt 1223 */ 1224static void middma_shutdown(struct pci_dev *pdev) 1225{ 1226 struct middma_device *device = pci_get_drvdata(pdev); 1227 1228 dma_async_device_unregister(&device->common); 1229 pci_pool_destroy(device->dma_pool); 1230 if (device->mask_reg) 1231 iounmap(device->mask_reg); 1232 if (device->dma_base) 1233 iounmap(device->dma_base); 1234 free_irq(pdev->irq, device); 1235 return; 1236} 1237 1238/** 1239 * intel_mid_dma_probe - PCI Probe 1240 * @pdev: Controller PCI device structure 1241 * @id: pci device id structure 1242 * 1243 * Initialize the PCI device, map BARs, query driver data. 1244 * Call setup_dma to complete contoller and chan initilzation 1245 */ 1246static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, 1247 const struct pci_device_id *id) 1248{ 1249 struct middma_device *device; 1250 u32 base_addr, bar_size; 1251 struct intel_mid_dma_probe_info *info; 1252 int err; 1253 1254 pr_debug("MDMA: probe for %x\n", pdev->device); 1255 info = (void *)id->driver_data; 1256 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", 1257 info->max_chan, info->ch_base, 1258 info->block_size, info->pimr_mask); 1259 1260 err = pci_enable_device(pdev); 1261 if (err) 1262 goto err_enable_device; 1263 1264 err = pci_request_regions(pdev, "intel_mid_dmac"); 1265 if (err) 1266 goto err_request_regions; 1267 1268 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1269 if (err) 1270 goto err_set_dma_mask; 1271 1272 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1273 if (err) 1274 goto err_set_dma_mask; 1275 1276 device = kzalloc(sizeof(*device), GFP_KERNEL); 1277 if (!device) { 1278 pr_err("ERR_MDMA:kzalloc failed probe\n"); 1279 err = -ENOMEM; 1280 goto err_kzalloc; 1281 } 1282 device->pdev = pci_dev_get(pdev); 1283 1284 base_addr = pci_resource_start(pdev, 0); 1285 bar_size = pci_resource_len(pdev, 0); 1286 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); 1287 if (!device->dma_base) { 1288 pr_err("ERR_MDMA:ioremap failed\n"); 1289 err = -ENOMEM; 1290 goto err_ioremap; 1291 } 1292 pci_set_drvdata(pdev, device); 1293 pci_set_master(pdev); 1294 device->max_chan = info->max_chan; 1295 device->chan_base = info->ch_base; 1296 device->block_size = info->block_size; 1297 device->pimr_mask = info->pimr_mask; 1298 1299 err = mid_setup_dma(pdev); 1300 if (err) 1301 goto err_dma; 1302 1303 pm_runtime_put_noidle(&pdev->dev); 1304 pm_runtime_allow(&pdev->dev); 1305 return 0; 1306 1307err_dma: 1308 iounmap(device->dma_base); 1309err_ioremap: 1310 pci_dev_put(pdev); 1311 kfree(device); 1312err_kzalloc: 1313err_set_dma_mask: 1314 pci_release_regions(pdev); 1315 pci_disable_device(pdev); 1316err_request_regions: 1317err_enable_device: 1318 pr_err("ERR_MDMA:Probe failed %d\n", err); 1319 return err; 1320} 1321 1322/** 1323 * intel_mid_dma_remove - PCI remove 1324 * @pdev: Controller PCI device structure 1325 * 1326 * Free up all resources and data 1327 * Call shutdown_dma to complete contoller and chan cleanup 1328 */ 1329static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) 1330{ 1331 struct middma_device *device = pci_get_drvdata(pdev); 1332 1333 pm_runtime_get_noresume(&pdev->dev); 1334 pm_runtime_forbid(&pdev->dev); 1335 middma_shutdown(pdev); 1336 pci_dev_put(pdev); 1337 kfree(device); 1338 pci_release_regions(pdev); 1339 pci_disable_device(pdev); 1340} 1341 1342/* Power Management */ 1343/* 1344* dma_suspend - PCI suspend function 1345* 1346* @pci: PCI device structure 1347* @state: PM message 1348* 1349* This function is called by OS when a power event occurs 1350*/ 1351static int dma_suspend(struct device *dev) 1352{ 1353 struct pci_dev *pci = to_pci_dev(dev); 1354 int i; 1355 struct middma_device *device = pci_get_drvdata(pci); 1356 pr_debug("MDMA: dma_suspend called\n"); 1357 1358 for (i = 0; i < device->max_chan; i++) { 1359 if (device->ch[i].in_use) 1360 return -EAGAIN; 1361 } 1362 dmac1_mask_periphral_intr(device); 1363 device->state = SUSPENDED; 1364 pci_save_state(pci); 1365 pci_disable_device(pci); 1366 pci_set_power_state(pci, PCI_D3hot); 1367 return 0; 1368} 1369 1370/** 1371* dma_resume - PCI resume function 1372* 1373* @pci: PCI device structure 1374* 1375* This function is called by OS when a power event occurs 1376*/ 1377int dma_resume(struct device *dev) 1378{ 1379 struct pci_dev *pci = to_pci_dev(dev); 1380 int ret; 1381 struct middma_device *device = pci_get_drvdata(pci); 1382 1383 pr_debug("MDMA: dma_resume called\n"); 1384 pci_set_power_state(pci, PCI_D0); 1385 pci_restore_state(pci); 1386 ret = pci_enable_device(pci); 1387 if (ret) { 1388 pr_err("MDMA: device can't be enabled for %x\n", pci->device); 1389 return ret; 1390 } 1391 device->state = RUNNING; 1392 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1393 return 0; 1394} 1395 1396static int dma_runtime_suspend(struct device *dev) 1397{ 1398 struct pci_dev *pci_dev = to_pci_dev(dev); 1399 struct middma_device *device = pci_get_drvdata(pci_dev); 1400 1401 device->state = SUSPENDED; 1402 return 0; 1403} 1404 1405static int dma_runtime_resume(struct device *dev) 1406{ 1407 struct pci_dev *pci_dev = to_pci_dev(dev); 1408 struct middma_device *device = pci_get_drvdata(pci_dev); 1409 1410 device->state = RUNNING; 1411 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1412 return 0; 1413} 1414 1415static int dma_runtime_idle(struct device *dev) 1416{ 1417 struct pci_dev *pdev = to_pci_dev(dev); 1418 struct middma_device *device = pci_get_drvdata(pdev); 1419 int i; 1420 1421 for (i = 0; i < device->max_chan; i++) { 1422 if (device->ch[i].in_use) 1423 return -EAGAIN; 1424 } 1425 1426 return pm_schedule_suspend(dev, 0); 1427} 1428 1429/****************************************************************************** 1430* PCI stuff 1431*/ 1432static struct pci_device_id intel_mid_dma_ids[] = { 1433 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, 1434 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1435 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1436 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, 1437 { 0, } 1438}; 1439MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); 1440 1441static const struct dev_pm_ops intel_mid_dma_pm = { 1442 .runtime_suspend = dma_runtime_suspend, 1443 .runtime_resume = dma_runtime_resume, 1444 .runtime_idle = dma_runtime_idle, 1445 .suspend = dma_suspend, 1446 .resume = dma_resume, 1447}; 1448 1449static struct pci_driver intel_mid_dma_pci_driver = { 1450 .name = "Intel MID DMA", 1451 .id_table = intel_mid_dma_ids, 1452 .probe = intel_mid_dma_probe, 1453 .remove = __devexit_p(intel_mid_dma_remove), 1454#ifdef CONFIG_PM 1455 .driver = { 1456 .pm = &intel_mid_dma_pm, 1457 }, 1458#endif 1459}; 1460 1461static int __init intel_mid_dma_init(void) 1462{ 1463 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", 1464 INTEL_MID_DMA_DRIVER_VERSION); 1465 return pci_register_driver(&intel_mid_dma_pci_driver); 1466} 1467fs_initcall(intel_mid_dma_init); 1468 1469static void __exit intel_mid_dma_exit(void) 1470{ 1471 pci_unregister_driver(&intel_mid_dma_pci_driver); 1472} 1473module_exit(intel_mid_dma_exit); 1474 1475MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); 1476MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); 1477MODULE_LICENSE("GPL v2"); 1478MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION); 1479