intel_mid_dma.c revision 576e3c394a6c427c9a1378ec88ef7eb97e731992
1/* 2 * intel_mid_dma.c - Intel Langwell DMA Drivers 3 * 4 * Copyright (C) 2008-10 Intel Corp 5 * Author: Vinod Koul <vinod.koul@intel.com> 6 * The driver design is based on dw_dmac driver 7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, write to the Free Software Foundation, Inc., 20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * 24 * 25 */ 26#include <linux/pci.h> 27#include <linux/interrupt.h> 28#include <linux/pm_runtime.h> 29#include <linux/intel_mid_dma.h> 30 31#define MAX_CHAN 4 /*max ch across controllers*/ 32#include "intel_mid_dma_regs.h" 33 34#define INTEL_MID_DMAC1_ID 0x0814 35#define INTEL_MID_DMAC2_ID 0x0813 36#define INTEL_MID_GP_DMAC2_ID 0x0827 37#define INTEL_MFLD_DMAC1_ID 0x0830 38#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 39#define LNW_PERIPHRAL_MASK_SIZE 0x10 40#define LNW_PERIPHRAL_STATUS 0x0 41#define LNW_PERIPHRAL_MASK 0x8 42 43struct intel_mid_dma_probe_info { 44 u8 max_chan; 45 u8 ch_base; 46 u16 block_size; 47 u32 pimr_mask; 48}; 49 50#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ 51 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ 52 .max_chan = (_max_chan), \ 53 .ch_base = (_ch_base), \ 54 .block_size = (_block_size), \ 55 .pimr_mask = (_pimr_mask), \ 56 }) 57 58/***************************************************************************** 59Utility Functions*/ 60/** 61 * get_ch_index - convert status to channel 62 * @status: status mask 63 * @base: dma ch base value 64 * 65 * Modify the status mask and return the channel index needing 66 * attention (or -1 if neither) 67 */ 68static int get_ch_index(int *status, unsigned int base) 69{ 70 int i; 71 for (i = 0; i < MAX_CHAN; i++) { 72 if (*status & (1 << (i + base))) { 73 *status = *status & ~(1 << (i + base)); 74 pr_debug("MDMA: index %d New status %x\n", i, *status); 75 return i; 76 } 77 } 78 return -1; 79} 80 81/** 82 * get_block_ts - calculates dma transaction length 83 * @len: dma transfer length 84 * @tx_width: dma transfer src width 85 * @block_size: dma controller max block size 86 * 87 * Based on src width calculate the DMA trsaction length in data items 88 * return data items or FFFF if exceeds max length for block 89 */ 90static int get_block_ts(int len, int tx_width, int block_size) 91{ 92 int byte_width = 0, block_ts = 0; 93 94 switch (tx_width) { 95 case LNW_DMA_WIDTH_8BIT: 96 byte_width = 1; 97 break; 98 case LNW_DMA_WIDTH_16BIT: 99 byte_width = 2; 100 break; 101 case LNW_DMA_WIDTH_32BIT: 102 default: 103 byte_width = 4; 104 break; 105 } 106 107 block_ts = len/byte_width; 108 if (block_ts > block_size) 109 block_ts = 0xFFFF; 110 return block_ts; 111} 112 113/***************************************************************************** 114DMAC1 interrupt Functions*/ 115 116/** 117 * dmac1_mask_periphral_intr - mask the periphral interrupt 118 * @midc: dma channel for which masking is required 119 * 120 * Masks the DMA periphral interrupt 121 * this is valid for DMAC1 family controllers only 122 * This controller should have periphral mask registers already mapped 123 */ 124static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc) 125{ 126 u32 pimr; 127 struct middma_device *mid = to_middma_device(midc->chan.device); 128 129 if (mid->pimr_mask) { 130 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 131 pimr |= mid->pimr_mask; 132 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 133 } 134 return; 135} 136 137/** 138 * dmac1_unmask_periphral_intr - unmask the periphral interrupt 139 * @midc: dma channel for which masking is required 140 * 141 * UnMasks the DMA periphral interrupt, 142 * this is valid for DMAC1 family controllers only 143 * This controller should have periphral mask registers already mapped 144 */ 145static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) 146{ 147 u32 pimr; 148 struct middma_device *mid = to_middma_device(midc->chan.device); 149 150 if (mid->pimr_mask) { 151 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 152 pimr &= ~mid->pimr_mask; 153 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 154 } 155 return; 156} 157 158/** 159 * enable_dma_interrupt - enable the periphral interrupt 160 * @midc: dma channel for which enable interrupt is required 161 * 162 * Enable the DMA periphral interrupt, 163 * this is valid for DMAC1 family controllers only 164 * This controller should have periphral mask registers already mapped 165 */ 166static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) 167{ 168 dmac1_unmask_periphral_intr(midc); 169 170 /*en ch interrupts*/ 171 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 172 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 173 return; 174} 175 176/** 177 * disable_dma_interrupt - disable the periphral interrupt 178 * @midc: dma channel for which disable interrupt is required 179 * 180 * Disable the DMA periphral interrupt, 181 * this is valid for DMAC1 family controllers only 182 * This controller should have periphral mask registers already mapped 183 */ 184static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) 185{ 186 /*Check LPE PISR, make sure fwd is disabled*/ 187 dmac1_mask_periphral_intr(midc); 188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); 189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 191 return; 192} 193 194/***************************************************************************** 195DMA channel helper Functions*/ 196/** 197 * mid_desc_get - get a descriptor 198 * @midc: dma channel for which descriptor is required 199 * 200 * Obtain a descriptor for the channel. Returns NULL if none are free. 201 * Once the descriptor is returned it is private until put on another 202 * list or freed 203 */ 204static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) 205{ 206 struct intel_mid_dma_desc *desc, *_desc; 207 struct intel_mid_dma_desc *ret = NULL; 208 209 spin_lock_bh(&midc->lock); 210 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 211 if (async_tx_test_ack(&desc->txd)) { 212 list_del(&desc->desc_node); 213 ret = desc; 214 break; 215 } 216 } 217 spin_unlock_bh(&midc->lock); 218 return ret; 219} 220 221/** 222 * mid_desc_put - put a descriptor 223 * @midc: dma channel for which descriptor is required 224 * @desc: descriptor to put 225 * 226 * Return a descriptor from lwn_desc_get back to the free pool 227 */ 228static void midc_desc_put(struct intel_mid_dma_chan *midc, 229 struct intel_mid_dma_desc *desc) 230{ 231 if (desc) { 232 spin_lock_bh(&midc->lock); 233 list_add_tail(&desc->desc_node, &midc->free_list); 234 spin_unlock_bh(&midc->lock); 235 } 236} 237/** 238 * midc_dostart - begin a DMA transaction 239 * @midc: channel for which txn is to be started 240 * @first: first descriptor of series 241 * 242 * Load a transaction into the engine. This must be called with midc->lock 243 * held and bh disabled. 244 */ 245static void midc_dostart(struct intel_mid_dma_chan *midc, 246 struct intel_mid_dma_desc *first) 247{ 248 struct middma_device *mid = to_middma_device(midc->chan.device); 249 250 /* channel is idle */ 251 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { 252 /*error*/ 253 pr_err("ERR_MDMA: channel is busy in start\n"); 254 /* The tasklet will hopefully advance the queue... */ 255 return; 256 } 257 midc->busy = true; 258 /*write registers and en*/ 259 iowrite32(first->sar, midc->ch_regs + SAR); 260 iowrite32(first->dar, midc->ch_regs + DAR); 261 iowrite32(first->lli_phys, midc->ch_regs + LLP); 262 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); 263 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); 264 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); 265 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); 266 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", 267 (int)first->sar, (int)first->dar, first->cfg_hi, 268 first->cfg_lo, first->ctl_hi, first->ctl_lo); 269 first->status = DMA_IN_PROGRESS; 270 271 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 272} 273 274/** 275 * midc_descriptor_complete - process completed descriptor 276 * @midc: channel owning the descriptor 277 * @desc: the descriptor itself 278 * 279 * Process a completed descriptor and perform any callbacks upon 280 * the completion. The completion handling drops the lock during the 281 * callbacks but must be called with the lock held. 282 */ 283static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, 284 struct intel_mid_dma_desc *desc) 285{ 286 struct dma_async_tx_descriptor *txd = &desc->txd; 287 dma_async_tx_callback callback_txd = NULL; 288 struct intel_mid_dma_lli *llitem; 289 void *param_txd = NULL; 290 291 midc->completed = txd->cookie; 292 callback_txd = txd->callback; 293 param_txd = txd->callback_param; 294 295 if (desc->lli != NULL) { 296 /*clear the DONE bit of completed LLI in memory*/ 297 llitem = desc->lli + desc->current_lli; 298 llitem->ctl_hi &= CLEAR_DONE; 299 if (desc->current_lli < desc->lli_length-1) 300 (desc->current_lli)++; 301 else 302 desc->current_lli = 0; 303 } 304 spin_unlock_bh(&midc->lock); 305 if (callback_txd) { 306 pr_debug("MDMA: TXD callback set ... calling\n"); 307 callback_txd(param_txd); 308 } 309 if (midc->raw_tfr) { 310 desc->status = DMA_SUCCESS; 311 if (desc->lli != NULL) { 312 pci_pool_free(desc->lli_pool, desc->lli, 313 desc->lli_phys); 314 pci_pool_destroy(desc->lli_pool); 315 } 316 list_move(&desc->desc_node, &midc->free_list); 317 midc->busy = false; 318 } 319 spin_lock_bh(&midc->lock); 320 321} 322/** 323 * midc_scan_descriptors - check the descriptors in channel 324 * mark completed when tx is completete 325 * @mid: device 326 * @midc: channel to scan 327 * 328 * Walk the descriptor chain for the device and process any entries 329 * that are complete. 330 */ 331static void midc_scan_descriptors(struct middma_device *mid, 332 struct intel_mid_dma_chan *midc) 333{ 334 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; 335 336 /*tx is complete*/ 337 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 338 if (desc->status == DMA_IN_PROGRESS) 339 midc_descriptor_complete(midc, desc); 340 } 341 return; 342 } 343/** 344 * midc_lli_fill_sg - Helper function to convert 345 * SG list to Linked List Items. 346 *@midc: Channel 347 *@desc: DMA descriptor 348 *@sglist: Pointer to SG list 349 *@sglen: SG list length 350 *@flags: DMA transaction flags 351 * 352 * Walk through the SG list and convert the SG list into Linked 353 * List Items (LLI). 354 */ 355static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, 356 struct intel_mid_dma_desc *desc, 357 struct scatterlist *sglist, 358 unsigned int sglen, 359 unsigned int flags) 360{ 361 struct intel_mid_dma_slave *mids; 362 struct scatterlist *sg; 363 dma_addr_t lli_next, sg_phy_addr; 364 struct intel_mid_dma_lli *lli_bloc_desc; 365 union intel_mid_dma_ctl_lo ctl_lo; 366 union intel_mid_dma_ctl_hi ctl_hi; 367 int i; 368 369 pr_debug("MDMA: Entered midc_lli_fill_sg\n"); 370 mids = midc->chan.private; 371 372 lli_bloc_desc = desc->lli; 373 lli_next = desc->lli_phys; 374 375 ctl_lo.ctl_lo = desc->ctl_lo; 376 ctl_hi.ctl_hi = desc->ctl_hi; 377 for_each_sg(sglist, sg, sglen, i) { 378 /*Populate CTL_LOW and LLI values*/ 379 if (i != sglen - 1) { 380 lli_next = lli_next + 381 sizeof(struct intel_mid_dma_lli); 382 } else { 383 /*Check for circular list, otherwise terminate LLI to ZERO*/ 384 if (flags & DMA_PREP_CIRCULAR_LIST) { 385 pr_debug("MDMA: LLI is configured in circular mode\n"); 386 lli_next = desc->lli_phys; 387 } else { 388 lli_next = 0; 389 ctl_lo.ctlx.llp_dst_en = 0; 390 ctl_lo.ctlx.llp_src_en = 0; 391 } 392 } 393 /*Populate CTL_HI values*/ 394 ctl_hi.ctlx.block_ts = get_block_ts(sg->length, 395 desc->width, 396 midc->dma->block_size); 397 /*Populate SAR and DAR values*/ 398 sg_phy_addr = sg_phys(sg); 399 if (desc->dirn == DMA_TO_DEVICE) { 400 lli_bloc_desc->sar = sg_phy_addr; 401 lli_bloc_desc->dar = mids->per_addr; 402 } else if (desc->dirn == DMA_FROM_DEVICE) { 403 lli_bloc_desc->sar = mids->per_addr; 404 lli_bloc_desc->dar = sg_phy_addr; 405 } 406 /*Copy values into block descriptor in system memroy*/ 407 lli_bloc_desc->llp = lli_next; 408 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; 409 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; 410 411 lli_bloc_desc++; 412 } 413 /*Copy very first LLI values to descriptor*/ 414 desc->ctl_lo = desc->lli->ctl_lo; 415 desc->ctl_hi = desc->lli->ctl_hi; 416 desc->sar = desc->lli->sar; 417 desc->dar = desc->lli->dar; 418 419 return 0; 420} 421/***************************************************************************** 422DMA engine callback Functions*/ 423/** 424 * intel_mid_dma_tx_submit - callback to submit DMA transaction 425 * @tx: dma engine descriptor 426 * 427 * Submit the DMA trasaction for this descriptor, start if ch idle 428 */ 429static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) 430{ 431 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); 432 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); 433 dma_cookie_t cookie; 434 435 spin_lock_bh(&midc->lock); 436 cookie = midc->chan.cookie; 437 438 if (++cookie < 0) 439 cookie = 1; 440 441 midc->chan.cookie = cookie; 442 desc->txd.cookie = cookie; 443 444 445 if (list_empty(&midc->active_list)) 446 list_add_tail(&desc->desc_node, &midc->active_list); 447 else 448 list_add_tail(&desc->desc_node, &midc->queue); 449 450 midc_dostart(midc, desc); 451 spin_unlock_bh(&midc->lock); 452 453 return cookie; 454} 455 456/** 457 * intel_mid_dma_issue_pending - callback to issue pending txn 458 * @chan: chan where pending trascation needs to be checked and submitted 459 * 460 * Call for scan to issue pending descriptors 461 */ 462static void intel_mid_dma_issue_pending(struct dma_chan *chan) 463{ 464 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 465 466 spin_lock_bh(&midc->lock); 467 if (!list_empty(&midc->queue)) 468 midc_scan_descriptors(to_middma_device(chan->device), midc); 469 spin_unlock_bh(&midc->lock); 470} 471 472/** 473 * intel_mid_dma_tx_status - Return status of txn 474 * @chan: chan for where status needs to be checked 475 * @cookie: cookie for txn 476 * @txstate: DMA txn state 477 * 478 * Return status of DMA txn 479 */ 480static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, 481 dma_cookie_t cookie, 482 struct dma_tx_state *txstate) 483{ 484 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 485 dma_cookie_t last_used; 486 dma_cookie_t last_complete; 487 int ret; 488 489 last_complete = midc->completed; 490 last_used = chan->cookie; 491 492 ret = dma_async_is_complete(cookie, last_complete, last_used); 493 if (ret != DMA_SUCCESS) { 494 midc_scan_descriptors(to_middma_device(chan->device), midc); 495 496 last_complete = midc->completed; 497 last_used = chan->cookie; 498 499 ret = dma_async_is_complete(cookie, last_complete, last_used); 500 } 501 502 if (txstate) { 503 txstate->last = last_complete; 504 txstate->used = last_used; 505 txstate->residue = 0; 506 } 507 return ret; 508} 509 510/** 511 * intel_mid_dma_device_control - DMA device control 512 * @chan: chan for DMA control 513 * @cmd: control cmd 514 * @arg: cmd arg value 515 * 516 * Perform DMA control command 517 */ 518static int intel_mid_dma_device_control(struct dma_chan *chan, 519 enum dma_ctrl_cmd cmd, unsigned long arg) 520{ 521 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 522 struct middma_device *mid = to_middma_device(chan->device); 523 struct intel_mid_dma_desc *desc, *_desc; 524 union intel_mid_dma_cfg_lo cfg_lo; 525 526 if (cmd != DMA_TERMINATE_ALL) 527 return -ENXIO; 528 529 spin_lock_bh(&midc->lock); 530 if (midc->busy == false) { 531 spin_unlock_bh(&midc->lock); 532 return 0; 533 } 534 /*Suspend and disable the channel*/ 535 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); 536 cfg_lo.cfgx.ch_susp = 1; 537 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); 538 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 539 midc->busy = false; 540 /* Disable interrupts */ 541 disable_dma_interrupt(midc); 542 midc->descs_allocated = 0; 543 midc->slave = NULL; 544 545 spin_unlock_bh(&midc->lock); 546 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 547 if (desc->lli != NULL) { 548 pci_pool_free(desc->lli_pool, desc->lli, 549 desc->lli_phys); 550 pci_pool_destroy(desc->lli_pool); 551 } 552 list_move(&desc->desc_node, &midc->free_list); 553 } 554 return 0; 555} 556 557 558/** 559 * intel_mid_dma_prep_memcpy - Prep memcpy txn 560 * @chan: chan for DMA transfer 561 * @dest: destn address 562 * @src: src address 563 * @len: DMA transfer len 564 * @flags: DMA flags 565 * 566 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only 567 * The periphral txn details should be filled in slave structure properly 568 * Returns the descriptor for this txn 569 */ 570static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( 571 struct dma_chan *chan, dma_addr_t dest, 572 dma_addr_t src, size_t len, unsigned long flags) 573{ 574 struct intel_mid_dma_chan *midc; 575 struct intel_mid_dma_desc *desc = NULL; 576 struct intel_mid_dma_slave *mids; 577 union intel_mid_dma_ctl_lo ctl_lo; 578 union intel_mid_dma_ctl_hi ctl_hi; 579 union intel_mid_dma_cfg_lo cfg_lo; 580 union intel_mid_dma_cfg_hi cfg_hi; 581 enum intel_mid_dma_width width = 0; 582 583 pr_debug("MDMA: Prep for memcpy\n"); 584 WARN_ON(!chan); 585 if (!len) 586 return NULL; 587 588 mids = chan->private; 589 WARN_ON(!mids); 590 591 midc = to_intel_mid_dma_chan(chan); 592 WARN_ON(!midc); 593 594 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 595 midc->dma->pci_id, midc->ch_id, len); 596 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 597 mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width); 598 599 /*calculate CFG_LO*/ 600 if (mids->hs_mode == LNW_DMA_SW_HS) { 601 cfg_lo.cfg_lo = 0; 602 cfg_lo.cfgx.hs_sel_dst = 1; 603 cfg_lo.cfgx.hs_sel_src = 1; 604 } else if (mids->hs_mode == LNW_DMA_HW_HS) 605 cfg_lo.cfg_lo = 0x00000; 606 607 /*calculate CFG_HI*/ 608 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 609 /*SW HS only*/ 610 cfg_hi.cfg_hi = 0; 611 } else { 612 cfg_hi.cfg_hi = 0; 613 if (midc->dma->pimr_mask) { 614 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 615 cfg_hi.cfgx.fifo_mode = 1; 616 if (mids->dirn == DMA_TO_DEVICE) { 617 cfg_hi.cfgx.src_per = 0; 618 if (mids->device_instance == 0) 619 cfg_hi.cfgx.dst_per = 3; 620 if (mids->device_instance == 1) 621 cfg_hi.cfgx.dst_per = 1; 622 } else if (mids->dirn == DMA_FROM_DEVICE) { 623 if (mids->device_instance == 0) 624 cfg_hi.cfgx.src_per = 2; 625 if (mids->device_instance == 1) 626 cfg_hi.cfgx.src_per = 0; 627 cfg_hi.cfgx.dst_per = 0; 628 } 629 } else { 630 cfg_hi.cfgx.protctl = 0x1; /*default value*/ 631 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = 632 midc->ch_id - midc->dma->chan_base; 633 } 634 } 635 636 /*calculate CTL_HI*/ 637 ctl_hi.ctlx.reser = 0; 638 ctl_hi.ctlx.done = 0; 639 width = mids->src_width; 640 641 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 642 pr_debug("MDMA:calc len %d for block size %d\n", 643 ctl_hi.ctlx.block_ts, midc->dma->block_size); 644 /*calculate CTL_LO*/ 645 ctl_lo.ctl_lo = 0; 646 ctl_lo.ctlx.int_en = 1; 647 ctl_lo.ctlx.dst_tr_width = mids->dst_width; 648 ctl_lo.ctlx.src_tr_width = mids->src_width; 649 ctl_lo.ctlx.dst_msize = mids->src_msize; 650 ctl_lo.ctlx.src_msize = mids->dst_msize; 651 652 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 653 ctl_lo.ctlx.tt_fc = 0; 654 ctl_lo.ctlx.sinc = 0; 655 ctl_lo.ctlx.dinc = 0; 656 } else { 657 if (mids->dirn == DMA_TO_DEVICE) { 658 ctl_lo.ctlx.sinc = 0; 659 ctl_lo.ctlx.dinc = 2; 660 ctl_lo.ctlx.tt_fc = 1; 661 } else if (mids->dirn == DMA_FROM_DEVICE) { 662 ctl_lo.ctlx.sinc = 2; 663 ctl_lo.ctlx.dinc = 0; 664 ctl_lo.ctlx.tt_fc = 2; 665 } 666 } 667 668 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", 669 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); 670 671 enable_dma_interrupt(midc); 672 673 desc = midc_desc_get(midc); 674 if (desc == NULL) 675 goto err_desc_get; 676 desc->sar = src; 677 desc->dar = dest ; 678 desc->len = len; 679 desc->cfg_hi = cfg_hi.cfg_hi; 680 desc->cfg_lo = cfg_lo.cfg_lo; 681 desc->ctl_lo = ctl_lo.ctl_lo; 682 desc->ctl_hi = ctl_hi.ctl_hi; 683 desc->width = width; 684 desc->dirn = mids->dirn; 685 desc->lli_phys = 0; 686 desc->lli = NULL; 687 desc->lli_pool = NULL; 688 return &desc->txd; 689 690err_desc_get: 691 pr_err("ERR_MDMA: Failed to get desc\n"); 692 midc_desc_put(midc, desc); 693 return NULL; 694} 695/** 696 * intel_mid_dma_prep_slave_sg - Prep slave sg txn 697 * @chan: chan for DMA transfer 698 * @sgl: scatter gather list 699 * @sg_len: length of sg txn 700 * @direction: DMA transfer dirtn 701 * @flags: DMA flags 702 * 703 * Prepares LLI based periphral transfer 704 */ 705static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 706 struct dma_chan *chan, struct scatterlist *sgl, 707 unsigned int sg_len, enum dma_data_direction direction, 708 unsigned long flags) 709{ 710 struct intel_mid_dma_chan *midc = NULL; 711 struct intel_mid_dma_slave *mids = NULL; 712 struct intel_mid_dma_desc *desc = NULL; 713 struct dma_async_tx_descriptor *txd = NULL; 714 union intel_mid_dma_ctl_lo ctl_lo; 715 716 pr_debug("MDMA: Prep for slave SG\n"); 717 718 if (!sg_len) { 719 pr_err("MDMA: Invalid SG length\n"); 720 return NULL; 721 } 722 midc = to_intel_mid_dma_chan(chan); 723 BUG_ON(!midc); 724 725 mids = chan->private; 726 BUG_ON(!mids); 727 728 if (!midc->dma->pimr_mask) { 729 pr_debug("MDMA: SG list is not supported by this controller\n"); 730 return NULL; 731 } 732 733 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 734 sg_len, direction, flags); 735 736 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); 737 if (NULL == txd) { 738 pr_err("MDMA: Prep memcpy failed\n"); 739 return NULL; 740 } 741 desc = to_intel_mid_dma_desc(txd); 742 desc->dirn = direction; 743 ctl_lo.ctl_lo = desc->ctl_lo; 744 ctl_lo.ctlx.llp_dst_en = 1; 745 ctl_lo.ctlx.llp_src_en = 1; 746 desc->ctl_lo = ctl_lo.ctl_lo; 747 desc->lli_length = sg_len; 748 desc->current_lli = 0; 749 /* DMA coherent memory pool for LLI descriptors*/ 750 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", 751 midc->dma->pdev, 752 (sizeof(struct intel_mid_dma_lli)*sg_len), 753 32, 0); 754 if (NULL == desc->lli_pool) { 755 pr_err("MID_DMA:LLI pool create failed\n"); 756 return NULL; 757 } 758 759 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); 760 if (!desc->lli) { 761 pr_err("MID_DMA: LLI alloc failed\n"); 762 pci_pool_destroy(desc->lli_pool); 763 return NULL; 764 } 765 766 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); 767 if (flags & DMA_PREP_INTERRUPT) { 768 iowrite32(UNMASK_INTR_REG(midc->ch_id), 769 midc->dma_base + MASK_BLOCK); 770 pr_debug("MDMA:Enabled Block interrupt\n"); 771 } 772 return &desc->txd; 773} 774 775/** 776 * intel_mid_dma_free_chan_resources - Frees dma resources 777 * @chan: chan requiring attention 778 * 779 * Frees the allocated resources on this DMA chan 780 */ 781static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) 782{ 783 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 784 struct middma_device *mid = to_middma_device(chan->device); 785 struct intel_mid_dma_desc *desc, *_desc; 786 787 if (true == midc->busy) { 788 /*trying to free ch in use!!!!!*/ 789 pr_err("ERR_MDMA: trying to free ch in use\n"); 790 } 791 pm_runtime_put(&mid->pdev->dev); 792 spin_lock_bh(&midc->lock); 793 midc->descs_allocated = 0; 794 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 795 list_del(&desc->desc_node); 796 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 797 } 798 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 799 list_del(&desc->desc_node); 800 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 801 } 802 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { 803 list_del(&desc->desc_node); 804 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 805 } 806 spin_unlock_bh(&midc->lock); 807 midc->in_use = false; 808 midc->busy = false; 809 /* Disable CH interrupts */ 810 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 811 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); 812} 813 814/** 815 * intel_mid_dma_alloc_chan_resources - Allocate dma resources 816 * @chan: chan requiring attention 817 * 818 * Allocates DMA resources on this chan 819 * Return the descriptors allocated 820 */ 821static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) 822{ 823 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 824 struct middma_device *mid = to_middma_device(chan->device); 825 struct intel_mid_dma_desc *desc; 826 dma_addr_t phys; 827 int i = 0; 828 829 pm_runtime_get_sync(&mid->pdev->dev); 830 831 if (mid->state == SUSPENDED) { 832 if (dma_resume(mid->pdev)) { 833 pr_err("ERR_MDMA: resume failed"); 834 return -EFAULT; 835 } 836 } 837 838 /* ASSERT: channel is idle */ 839 if (test_ch_en(mid->dma_base, midc->ch_id)) { 840 /*ch is not idle*/ 841 pr_err("ERR_MDMA: ch not idle\n"); 842 pm_runtime_put(&mid->pdev->dev); 843 return -EIO; 844 } 845 midc->completed = chan->cookie = 1; 846 847 spin_lock_bh(&midc->lock); 848 while (midc->descs_allocated < DESCS_PER_CHANNEL) { 849 spin_unlock_bh(&midc->lock); 850 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); 851 if (!desc) { 852 pr_err("ERR_MDMA: desc failed\n"); 853 pm_runtime_put(&mid->pdev->dev); 854 return -ENOMEM; 855 /*check*/ 856 } 857 dma_async_tx_descriptor_init(&desc->txd, chan); 858 desc->txd.tx_submit = intel_mid_dma_tx_submit; 859 desc->txd.flags = DMA_CTRL_ACK; 860 desc->txd.phys = phys; 861 spin_lock_bh(&midc->lock); 862 i = ++midc->descs_allocated; 863 list_add_tail(&desc->desc_node, &midc->free_list); 864 } 865 spin_unlock_bh(&midc->lock); 866 midc->in_use = true; 867 midc->busy = false; 868 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); 869 return i; 870} 871 872/** 873 * midc_handle_error - Handle DMA txn error 874 * @mid: controller where error occured 875 * @midc: chan where error occured 876 * 877 * Scan the descriptor for error 878 */ 879static void midc_handle_error(struct middma_device *mid, 880 struct intel_mid_dma_chan *midc) 881{ 882 midc_scan_descriptors(mid, midc); 883} 884 885/** 886 * dma_tasklet - DMA interrupt tasklet 887 * @data: tasklet arg (the controller structure) 888 * 889 * Scan the controller for interrupts for completion/error 890 * Clear the interrupt and call for handling completion/error 891 */ 892static void dma_tasklet(unsigned long data) 893{ 894 struct middma_device *mid = NULL; 895 struct intel_mid_dma_chan *midc = NULL; 896 u32 status, raw_tfr, raw_block; 897 int i; 898 899 mid = (struct middma_device *)data; 900 if (mid == NULL) { 901 pr_err("ERR_MDMA: tasklet Null param\n"); 902 return; 903 } 904 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); 905 raw_tfr = ioread32(mid->dma_base + RAW_TFR); 906 raw_block = ioread32(mid->dma_base + RAW_BLOCK); 907 status = raw_tfr | raw_block; 908 status &= mid->intr_mask; 909 while (status) { 910 /*txn interrupt*/ 911 i = get_ch_index(&status, mid->chan_base); 912 if (i < 0) { 913 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 914 return; 915 } 916 midc = &mid->ch[i]; 917 if (midc == NULL) { 918 pr_err("ERR_MDMA:Null param midc\n"); 919 return; 920 } 921 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 922 status, midc->ch_id, i); 923 midc->raw_tfr = raw_tfr; 924 midc->raw_block = raw_block; 925 spin_lock_bh(&midc->lock); 926 /*clearing this interrupts first*/ 927 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); 928 if (raw_block) { 929 iowrite32((1 << midc->ch_id), 930 mid->dma_base + CLEAR_BLOCK); 931 } 932 midc_scan_descriptors(mid, midc); 933 pr_debug("MDMA:Scan of desc... complete, unmasking\n"); 934 iowrite32(UNMASK_INTR_REG(midc->ch_id), 935 mid->dma_base + MASK_TFR); 936 if (raw_block) { 937 iowrite32(UNMASK_INTR_REG(midc->ch_id), 938 mid->dma_base + MASK_BLOCK); 939 } 940 spin_unlock_bh(&midc->lock); 941 } 942 943 status = ioread32(mid->dma_base + RAW_ERR); 944 status &= mid->intr_mask; 945 while (status) { 946 /*err interrupt*/ 947 i = get_ch_index(&status, mid->chan_base); 948 if (i < 0) { 949 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 950 return; 951 } 952 midc = &mid->ch[i]; 953 if (midc == NULL) { 954 pr_err("ERR_MDMA:Null param midc\n"); 955 return; 956 } 957 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 958 status, midc->ch_id, i); 959 960 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); 961 spin_lock_bh(&midc->lock); 962 midc_handle_error(mid, midc); 963 iowrite32(UNMASK_INTR_REG(midc->ch_id), 964 mid->dma_base + MASK_ERR); 965 spin_unlock_bh(&midc->lock); 966 } 967 pr_debug("MDMA:Exiting takslet...\n"); 968 return; 969} 970 971static void dma_tasklet1(unsigned long data) 972{ 973 pr_debug("MDMA:in takslet1...\n"); 974 return dma_tasklet(data); 975} 976 977static void dma_tasklet2(unsigned long data) 978{ 979 pr_debug("MDMA:in takslet2...\n"); 980 return dma_tasklet(data); 981} 982 983/** 984 * intel_mid_dma_interrupt - DMA ISR 985 * @irq: IRQ where interrupt occurred 986 * @data: ISR cllback data (the controller structure) 987 * 988 * See if this is our interrupt if so then schedule the tasklet 989 * otherwise ignore 990 */ 991static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) 992{ 993 struct middma_device *mid = data; 994 u32 tfr_status, err_status; 995 int call_tasklet = 0; 996 997 tfr_status = ioread32(mid->dma_base + RAW_TFR); 998 err_status = ioread32(mid->dma_base + RAW_ERR); 999 if (!tfr_status && !err_status) 1000 return IRQ_NONE; 1001 1002 /*DMA Interrupt*/ 1003 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); 1004 if (!mid) { 1005 pr_err("ERR_MDMA:null pointer mid\n"); 1006 return -EINVAL; 1007 } 1008 1009 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); 1010 tfr_status &= mid->intr_mask; 1011 if (tfr_status) { 1012 /*need to disable intr*/ 1013 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); 1014 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); 1015 pr_debug("MDMA: Calling tasklet %x\n", tfr_status); 1016 call_tasklet = 1; 1017 } 1018 err_status &= mid->intr_mask; 1019 if (err_status) { 1020 iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); 1021 call_tasklet = 1; 1022 } 1023 if (call_tasklet) 1024 tasklet_schedule(&mid->tasklet); 1025 1026 return IRQ_HANDLED; 1027} 1028 1029static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) 1030{ 1031 return intel_mid_dma_interrupt(irq, data); 1032} 1033 1034static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) 1035{ 1036 return intel_mid_dma_interrupt(irq, data); 1037} 1038 1039/** 1040 * mid_setup_dma - Setup the DMA controller 1041 * @pdev: Controller PCI device structure 1042 * 1043 * Initilize the DMA controller, channels, registers with DMA engine, 1044 * ISR. Initilize DMA controller channels. 1045 */ 1046static int mid_setup_dma(struct pci_dev *pdev) 1047{ 1048 struct middma_device *dma = pci_get_drvdata(pdev); 1049 int err, i; 1050 1051 /* DMA coherent memory pool for DMA descriptor allocations */ 1052 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, 1053 sizeof(struct intel_mid_dma_desc), 1054 32, 0); 1055 if (NULL == dma->dma_pool) { 1056 pr_err("ERR_MDMA:pci_pool_create failed\n"); 1057 err = -ENOMEM; 1058 kfree(dma); 1059 goto err_dma_pool; 1060 } 1061 1062 INIT_LIST_HEAD(&dma->common.channels); 1063 dma->pci_id = pdev->device; 1064 if (dma->pimr_mask) { 1065 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, 1066 LNW_PERIPHRAL_MASK_SIZE); 1067 if (dma->mask_reg == NULL) { 1068 pr_err("ERR_MDMA:Cant map periphral intr space !!\n"); 1069 return -ENOMEM; 1070 } 1071 } else 1072 dma->mask_reg = NULL; 1073 1074 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); 1075 /*init CH structures*/ 1076 dma->intr_mask = 0; 1077 dma->state = RUNNING; 1078 for (i = 0; i < dma->max_chan; i++) { 1079 struct intel_mid_dma_chan *midch = &dma->ch[i]; 1080 1081 midch->chan.device = &dma->common; 1082 midch->chan.cookie = 1; 1083 midch->chan.chan_id = i; 1084 midch->ch_id = dma->chan_base + i; 1085 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 1086 1087 midch->dma_base = dma->dma_base; 1088 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; 1089 midch->dma = dma; 1090 dma->intr_mask |= 1 << (dma->chan_base + i); 1091 spin_lock_init(&midch->lock); 1092 1093 INIT_LIST_HEAD(&midch->active_list); 1094 INIT_LIST_HEAD(&midch->queue); 1095 INIT_LIST_HEAD(&midch->free_list); 1096 /*mask interrupts*/ 1097 iowrite32(MASK_INTR_REG(midch->ch_id), 1098 dma->dma_base + MASK_BLOCK); 1099 iowrite32(MASK_INTR_REG(midch->ch_id), 1100 dma->dma_base + MASK_SRC_TRAN); 1101 iowrite32(MASK_INTR_REG(midch->ch_id), 1102 dma->dma_base + MASK_DST_TRAN); 1103 iowrite32(MASK_INTR_REG(midch->ch_id), 1104 dma->dma_base + MASK_ERR); 1105 iowrite32(MASK_INTR_REG(midch->ch_id), 1106 dma->dma_base + MASK_TFR); 1107 1108 disable_dma_interrupt(midch); 1109 list_add_tail(&midch->chan.device_node, &dma->common.channels); 1110 } 1111 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); 1112 1113 /*init dma structure*/ 1114 dma_cap_zero(dma->common.cap_mask); 1115 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); 1116 dma_cap_set(DMA_SLAVE, dma->common.cap_mask); 1117 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); 1118 dma->common.dev = &pdev->dev; 1119 dma->common.chancnt = dma->max_chan; 1120 1121 dma->common.device_alloc_chan_resources = 1122 intel_mid_dma_alloc_chan_resources; 1123 dma->common.device_free_chan_resources = 1124 intel_mid_dma_free_chan_resources; 1125 1126 dma->common.device_tx_status = intel_mid_dma_tx_status; 1127 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; 1128 dma->common.device_issue_pending = intel_mid_dma_issue_pending; 1129 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; 1130 dma->common.device_control = intel_mid_dma_device_control; 1131 1132 /*enable dma cntrl*/ 1133 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); 1134 1135 /*register irq */ 1136 if (dma->pimr_mask) { 1137 pr_debug("MDMA:Requesting irq shared for DMAC1\n"); 1138 err = request_irq(pdev->irq, intel_mid_dma_interrupt1, 1139 IRQF_SHARED, "INTEL_MID_DMAC1", dma); 1140 if (0 != err) 1141 goto err_irq; 1142 } else { 1143 dma->intr_mask = 0x03; 1144 pr_debug("MDMA:Requesting irq for DMAC2\n"); 1145 err = request_irq(pdev->irq, intel_mid_dma_interrupt2, 1146 IRQF_SHARED, "INTEL_MID_DMAC2", dma); 1147 if (0 != err) 1148 goto err_irq; 1149 } 1150 /*register device w/ engine*/ 1151 err = dma_async_device_register(&dma->common); 1152 if (0 != err) { 1153 pr_err("ERR_MDMA:device_register failed: %d\n", err); 1154 goto err_engine; 1155 } 1156 if (dma->pimr_mask) { 1157 pr_debug("setting up tasklet1 for DMAC1\n"); 1158 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); 1159 } else { 1160 pr_debug("setting up tasklet2 for DMAC2\n"); 1161 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); 1162 } 1163 return 0; 1164 1165err_engine: 1166 free_irq(pdev->irq, dma); 1167err_irq: 1168 pci_pool_destroy(dma->dma_pool); 1169 kfree(dma); 1170err_dma_pool: 1171 pr_err("ERR_MDMA:setup_dma failed: %d\n", err); 1172 return err; 1173 1174} 1175 1176/** 1177 * middma_shutdown - Shutdown the DMA controller 1178 * @pdev: Controller PCI device structure 1179 * 1180 * Called by remove 1181 * Unregister DMa controller, clear all structures and free interrupt 1182 */ 1183static void middma_shutdown(struct pci_dev *pdev) 1184{ 1185 struct middma_device *device = pci_get_drvdata(pdev); 1186 1187 dma_async_device_unregister(&device->common); 1188 pci_pool_destroy(device->dma_pool); 1189 if (device->mask_reg) 1190 iounmap(device->mask_reg); 1191 if (device->dma_base) 1192 iounmap(device->dma_base); 1193 free_irq(pdev->irq, device); 1194 return; 1195} 1196 1197/** 1198 * intel_mid_dma_probe - PCI Probe 1199 * @pdev: Controller PCI device structure 1200 * @id: pci device id structure 1201 * 1202 * Initilize the PCI device, map BARs, query driver data. 1203 * Call setup_dma to complete contoller and chan initilzation 1204 */ 1205static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, 1206 const struct pci_device_id *id) 1207{ 1208 struct middma_device *device; 1209 u32 base_addr, bar_size; 1210 struct intel_mid_dma_probe_info *info; 1211 int err; 1212 1213 pr_debug("MDMA: probe for %x\n", pdev->device); 1214 info = (void *)id->driver_data; 1215 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", 1216 info->max_chan, info->ch_base, 1217 info->block_size, info->pimr_mask); 1218 1219 err = pci_enable_device(pdev); 1220 if (err) 1221 goto err_enable_device; 1222 1223 err = pci_request_regions(pdev, "intel_mid_dmac"); 1224 if (err) 1225 goto err_request_regions; 1226 1227 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1228 if (err) 1229 goto err_set_dma_mask; 1230 1231 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1232 if (err) 1233 goto err_set_dma_mask; 1234 1235 device = kzalloc(sizeof(*device), GFP_KERNEL); 1236 if (!device) { 1237 pr_err("ERR_MDMA:kzalloc failed probe\n"); 1238 err = -ENOMEM; 1239 goto err_kzalloc; 1240 } 1241 device->pdev = pci_dev_get(pdev); 1242 1243 base_addr = pci_resource_start(pdev, 0); 1244 bar_size = pci_resource_len(pdev, 0); 1245 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); 1246 if (!device->dma_base) { 1247 pr_err("ERR_MDMA:ioremap failed\n"); 1248 err = -ENOMEM; 1249 goto err_ioremap; 1250 } 1251 pci_set_drvdata(pdev, device); 1252 pci_set_master(pdev); 1253 device->max_chan = info->max_chan; 1254 device->chan_base = info->ch_base; 1255 device->block_size = info->block_size; 1256 device->pimr_mask = info->pimr_mask; 1257 1258 err = mid_setup_dma(pdev); 1259 if (err) 1260 goto err_dma; 1261 1262 pm_runtime_set_active(&pdev->dev); 1263 pm_runtime_enable(&pdev->dev); 1264 pm_runtime_allow(&pdev->dev); 1265 return 0; 1266 1267err_dma: 1268 iounmap(device->dma_base); 1269err_ioremap: 1270 pci_dev_put(pdev); 1271 kfree(device); 1272err_kzalloc: 1273err_set_dma_mask: 1274 pci_release_regions(pdev); 1275 pci_disable_device(pdev); 1276err_request_regions: 1277err_enable_device: 1278 pr_err("ERR_MDMA:Probe failed %d\n", err); 1279 return err; 1280} 1281 1282/** 1283 * intel_mid_dma_remove - PCI remove 1284 * @pdev: Controller PCI device structure 1285 * 1286 * Free up all resources and data 1287 * Call shutdown_dma to complete contoller and chan cleanup 1288 */ 1289static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) 1290{ 1291 struct middma_device *device = pci_get_drvdata(pdev); 1292 middma_shutdown(pdev); 1293 pci_dev_put(pdev); 1294 kfree(device); 1295 pci_release_regions(pdev); 1296 pci_disable_device(pdev); 1297} 1298 1299/* Power Management */ 1300/* 1301* dma_suspend - PCI suspend function 1302* 1303* @pci: PCI device structure 1304* @state: PM message 1305* 1306* This function is called by OS when a power event occurs 1307*/ 1308int dma_suspend(struct pci_dev *pci, pm_message_t state) 1309{ 1310 int i; 1311 struct middma_device *device = pci_get_drvdata(pci); 1312 pr_debug("MDMA: dma_suspend called\n"); 1313 1314 for (i = 0; i < device->max_chan; i++) { 1315 if (device->ch[i].in_use) 1316 return -EAGAIN; 1317 } 1318 device->state = SUSPENDED; 1319 pci_set_drvdata(pci, device); 1320 pci_save_state(pci); 1321 pci_disable_device(pci); 1322 pci_set_power_state(pci, PCI_D3hot); 1323 return 0; 1324} 1325 1326/** 1327* dma_resume - PCI resume function 1328* 1329* @pci: PCI device structure 1330* 1331* This function is called by OS when a power event occurs 1332*/ 1333int dma_resume(struct pci_dev *pci) 1334{ 1335 int ret; 1336 struct middma_device *device = pci_get_drvdata(pci); 1337 1338 pr_debug("MDMA: dma_resume called\n"); 1339 pci_set_power_state(pci, PCI_D0); 1340 pci_restore_state(pci); 1341 ret = pci_enable_device(pci); 1342 if (ret) { 1343 pr_err("MDMA: device cant be enabled for %x\n", pci->device); 1344 return ret; 1345 } 1346 device->state = RUNNING; 1347 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1348 pci_set_drvdata(pci, device); 1349 return 0; 1350} 1351 1352static int dma_runtime_suspend(struct device *dev) 1353{ 1354 struct pci_dev *pci_dev = to_pci_dev(dev); 1355 return dma_suspend(pci_dev, PMSG_SUSPEND); 1356} 1357 1358static int dma_runtime_resume(struct device *dev) 1359{ 1360 struct pci_dev *pci_dev = to_pci_dev(dev); 1361 return dma_resume(pci_dev); 1362} 1363 1364static int dma_runtime_idle(struct device *dev) 1365{ 1366 struct pci_dev *pdev = to_pci_dev(dev); 1367 struct middma_device *device = pci_get_drvdata(pdev); 1368 int i; 1369 1370 for (i = 0; i < device->max_chan; i++) { 1371 if (device->ch[i].in_use) 1372 return -EAGAIN; 1373 } 1374 1375 return pm_schedule_suspend(dev, 0); 1376} 1377 1378/****************************************************************************** 1379* PCI stuff 1380*/ 1381static struct pci_device_id intel_mid_dma_ids[] = { 1382 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, 1383 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1384 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1385 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, 1386 { 0, } 1387}; 1388MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); 1389 1390static const struct dev_pm_ops intel_mid_dma_pm = { 1391 .runtime_suspend = dma_runtime_suspend, 1392 .runtime_resume = dma_runtime_resume, 1393 .runtime_idle = dma_runtime_idle, 1394}; 1395 1396static struct pci_driver intel_mid_dma_pci = { 1397 .name = "Intel MID DMA", 1398 .id_table = intel_mid_dma_ids, 1399 .probe = intel_mid_dma_probe, 1400 .remove = __devexit_p(intel_mid_dma_remove), 1401#ifdef CONFIG_PM 1402 .suspend = dma_suspend, 1403 .resume = dma_resume, 1404 .driver = { 1405 .pm = &intel_mid_dma_pm, 1406 }, 1407#endif 1408}; 1409 1410static int __init intel_mid_dma_init(void) 1411{ 1412 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", 1413 INTEL_MID_DMA_DRIVER_VERSION); 1414 return pci_register_driver(&intel_mid_dma_pci); 1415} 1416fs_initcall(intel_mid_dma_init); 1417 1418static void __exit intel_mid_dma_exit(void) 1419{ 1420 pci_unregister_driver(&intel_mid_dma_pci); 1421} 1422module_exit(intel_mid_dma_exit); 1423 1424MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); 1425MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); 1426MODULE_LICENSE("GPL v2"); 1427MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION); 1428