intel_mid_dma.c revision 45f0a85c8258741d11bda25c0a5669c06267204a
1/* 2 * intel_mid_dma.c - Intel Langwell DMA Drivers 3 * 4 * Copyright (C) 2008-10 Intel Corp 5 * Author: Vinod Koul <vinod.koul@intel.com> 6 * The driver design is based on dw_dmac driver 7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, write to the Free Software Foundation, Inc., 20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * 24 * 25 */ 26#include <linux/pci.h> 27#include <linux/interrupt.h> 28#include <linux/pm_runtime.h> 29#include <linux/intel_mid_dma.h> 30#include <linux/module.h> 31 32#include "dmaengine.h" 33 34#define MAX_CHAN 4 /*max ch across controllers*/ 35#include "intel_mid_dma_regs.h" 36 37#define INTEL_MID_DMAC1_ID 0x0814 38#define INTEL_MID_DMAC2_ID 0x0813 39#define INTEL_MID_GP_DMAC2_ID 0x0827 40#define INTEL_MFLD_DMAC1_ID 0x0830 41#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 42#define LNW_PERIPHRAL_MASK_SIZE 0x10 43#define LNW_PERIPHRAL_STATUS 0x0 44#define LNW_PERIPHRAL_MASK 0x8 45 46struct intel_mid_dma_probe_info { 47 u8 max_chan; 48 u8 ch_base; 49 u16 block_size; 50 u32 pimr_mask; 51}; 52 53#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ 54 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ 55 .max_chan = (_max_chan), \ 56 .ch_base = (_ch_base), \ 57 .block_size = (_block_size), \ 58 .pimr_mask = (_pimr_mask), \ 59 }) 60 61/***************************************************************************** 62Utility Functions*/ 63/** 64 * get_ch_index - convert status to channel 65 * @status: status mask 66 * @base: dma ch base value 67 * 68 * Modify the status mask and return the channel index needing 69 * attention (or -1 if neither) 70 */ 71static int get_ch_index(int *status, unsigned int base) 72{ 73 int i; 74 for (i = 0; i < MAX_CHAN; i++) { 75 if (*status & (1 << (i + base))) { 76 *status = *status & ~(1 << (i + base)); 77 pr_debug("MDMA: index %d New status %x\n", i, *status); 78 return i; 79 } 80 } 81 return -1; 82} 83 84/** 85 * get_block_ts - calculates dma transaction length 86 * @len: dma transfer length 87 * @tx_width: dma transfer src width 88 * @block_size: dma controller max block size 89 * 90 * Based on src width calculate the DMA trsaction length in data items 91 * return data items or FFFF if exceeds max length for block 92 */ 93static int get_block_ts(int len, int tx_width, int block_size) 94{ 95 int byte_width = 0, block_ts = 0; 96 97 switch (tx_width) { 98 case DMA_SLAVE_BUSWIDTH_1_BYTE: 99 byte_width = 1; 100 break; 101 case DMA_SLAVE_BUSWIDTH_2_BYTES: 102 byte_width = 2; 103 break; 104 case DMA_SLAVE_BUSWIDTH_4_BYTES: 105 default: 106 byte_width = 4; 107 break; 108 } 109 110 block_ts = len/byte_width; 111 if (block_ts > block_size) 112 block_ts = 0xFFFF; 113 return block_ts; 114} 115 116/***************************************************************************** 117DMAC1 interrupt Functions*/ 118 119/** 120 * dmac1_mask_periphral_intr - mask the periphral interrupt 121 * @mid: dma device for which masking is required 122 * 123 * Masks the DMA periphral interrupt 124 * this is valid for DMAC1 family controllers only 125 * This controller should have periphral mask registers already mapped 126 */ 127static void dmac1_mask_periphral_intr(struct middma_device *mid) 128{ 129 u32 pimr; 130 131 if (mid->pimr_mask) { 132 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 133 pimr |= mid->pimr_mask; 134 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 135 } 136 return; 137} 138 139/** 140 * dmac1_unmask_periphral_intr - unmask the periphral interrupt 141 * @midc: dma channel for which masking is required 142 * 143 * UnMasks the DMA periphral interrupt, 144 * this is valid for DMAC1 family controllers only 145 * This controller should have periphral mask registers already mapped 146 */ 147static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) 148{ 149 u32 pimr; 150 struct middma_device *mid = to_middma_device(midc->chan.device); 151 152 if (mid->pimr_mask) { 153 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 154 pimr &= ~mid->pimr_mask; 155 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 156 } 157 return; 158} 159 160/** 161 * enable_dma_interrupt - enable the periphral interrupt 162 * @midc: dma channel for which enable interrupt is required 163 * 164 * Enable the DMA periphral interrupt, 165 * this is valid for DMAC1 family controllers only 166 * This controller should have periphral mask registers already mapped 167 */ 168static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) 169{ 170 dmac1_unmask_periphral_intr(midc); 171 172 /*en ch interrupts*/ 173 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 174 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 175 return; 176} 177 178/** 179 * disable_dma_interrupt - disable the periphral interrupt 180 * @midc: dma channel for which disable interrupt is required 181 * 182 * Disable the DMA periphral interrupt, 183 * this is valid for DMAC1 family controllers only 184 * This controller should have periphral mask registers already mapped 185 */ 186static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) 187{ 188 /*Check LPE PISR, make sure fwd is disabled*/ 189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); 190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 191 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 192 return; 193} 194 195/***************************************************************************** 196DMA channel helper Functions*/ 197/** 198 * mid_desc_get - get a descriptor 199 * @midc: dma channel for which descriptor is required 200 * 201 * Obtain a descriptor for the channel. Returns NULL if none are free. 202 * Once the descriptor is returned it is private until put on another 203 * list or freed 204 */ 205static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) 206{ 207 struct intel_mid_dma_desc *desc, *_desc; 208 struct intel_mid_dma_desc *ret = NULL; 209 210 spin_lock_bh(&midc->lock); 211 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 212 if (async_tx_test_ack(&desc->txd)) { 213 list_del(&desc->desc_node); 214 ret = desc; 215 break; 216 } 217 } 218 spin_unlock_bh(&midc->lock); 219 return ret; 220} 221 222/** 223 * mid_desc_put - put a descriptor 224 * @midc: dma channel for which descriptor is required 225 * @desc: descriptor to put 226 * 227 * Return a descriptor from lwn_desc_get back to the free pool 228 */ 229static void midc_desc_put(struct intel_mid_dma_chan *midc, 230 struct intel_mid_dma_desc *desc) 231{ 232 if (desc) { 233 spin_lock_bh(&midc->lock); 234 list_add_tail(&desc->desc_node, &midc->free_list); 235 spin_unlock_bh(&midc->lock); 236 } 237} 238/** 239 * midc_dostart - begin a DMA transaction 240 * @midc: channel for which txn is to be started 241 * @first: first descriptor of series 242 * 243 * Load a transaction into the engine. This must be called with midc->lock 244 * held and bh disabled. 245 */ 246static void midc_dostart(struct intel_mid_dma_chan *midc, 247 struct intel_mid_dma_desc *first) 248{ 249 struct middma_device *mid = to_middma_device(midc->chan.device); 250 251 /* channel is idle */ 252 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { 253 /*error*/ 254 pr_err("ERR_MDMA: channel is busy in start\n"); 255 /* The tasklet will hopefully advance the queue... */ 256 return; 257 } 258 midc->busy = true; 259 /*write registers and en*/ 260 iowrite32(first->sar, midc->ch_regs + SAR); 261 iowrite32(first->dar, midc->ch_regs + DAR); 262 iowrite32(first->lli_phys, midc->ch_regs + LLP); 263 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); 264 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); 265 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); 266 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); 267 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", 268 (int)first->sar, (int)first->dar, first->cfg_hi, 269 first->cfg_lo, first->ctl_hi, first->ctl_lo); 270 first->status = DMA_IN_PROGRESS; 271 272 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 273} 274 275/** 276 * midc_descriptor_complete - process completed descriptor 277 * @midc: channel owning the descriptor 278 * @desc: the descriptor itself 279 * 280 * Process a completed descriptor and perform any callbacks upon 281 * the completion. The completion handling drops the lock during the 282 * callbacks but must be called with the lock held. 283 */ 284static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, 285 struct intel_mid_dma_desc *desc) 286 __releases(&midc->lock) __acquires(&midc->lock) 287{ 288 struct dma_async_tx_descriptor *txd = &desc->txd; 289 dma_async_tx_callback callback_txd = NULL; 290 struct intel_mid_dma_lli *llitem; 291 void *param_txd = NULL; 292 293 dma_cookie_complete(txd); 294 callback_txd = txd->callback; 295 param_txd = txd->callback_param; 296 297 if (desc->lli != NULL) { 298 /*clear the DONE bit of completed LLI in memory*/ 299 llitem = desc->lli + desc->current_lli; 300 llitem->ctl_hi &= CLEAR_DONE; 301 if (desc->current_lli < desc->lli_length-1) 302 (desc->current_lli)++; 303 else 304 desc->current_lli = 0; 305 } 306 spin_unlock_bh(&midc->lock); 307 if (callback_txd) { 308 pr_debug("MDMA: TXD callback set ... calling\n"); 309 callback_txd(param_txd); 310 } 311 if (midc->raw_tfr) { 312 desc->status = DMA_SUCCESS; 313 if (desc->lli != NULL) { 314 pci_pool_free(desc->lli_pool, desc->lli, 315 desc->lli_phys); 316 pci_pool_destroy(desc->lli_pool); 317 desc->lli = NULL; 318 } 319 list_move(&desc->desc_node, &midc->free_list); 320 midc->busy = false; 321 } 322 spin_lock_bh(&midc->lock); 323 324} 325/** 326 * midc_scan_descriptors - check the descriptors in channel 327 * mark completed when tx is completete 328 * @mid: device 329 * @midc: channel to scan 330 * 331 * Walk the descriptor chain for the device and process any entries 332 * that are complete. 333 */ 334static void midc_scan_descriptors(struct middma_device *mid, 335 struct intel_mid_dma_chan *midc) 336{ 337 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; 338 339 /*tx is complete*/ 340 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 341 if (desc->status == DMA_IN_PROGRESS) 342 midc_descriptor_complete(midc, desc); 343 } 344 return; 345 } 346/** 347 * midc_lli_fill_sg - Helper function to convert 348 * SG list to Linked List Items. 349 *@midc: Channel 350 *@desc: DMA descriptor 351 *@sglist: Pointer to SG list 352 *@sglen: SG list length 353 *@flags: DMA transaction flags 354 * 355 * Walk through the SG list and convert the SG list into Linked 356 * List Items (LLI). 357 */ 358static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, 359 struct intel_mid_dma_desc *desc, 360 struct scatterlist *sglist, 361 unsigned int sglen, 362 unsigned int flags) 363{ 364 struct intel_mid_dma_slave *mids; 365 struct scatterlist *sg; 366 dma_addr_t lli_next, sg_phy_addr; 367 struct intel_mid_dma_lli *lli_bloc_desc; 368 union intel_mid_dma_ctl_lo ctl_lo; 369 union intel_mid_dma_ctl_hi ctl_hi; 370 int i; 371 372 pr_debug("MDMA: Entered midc_lli_fill_sg\n"); 373 mids = midc->mid_slave; 374 375 lli_bloc_desc = desc->lli; 376 lli_next = desc->lli_phys; 377 378 ctl_lo.ctl_lo = desc->ctl_lo; 379 ctl_hi.ctl_hi = desc->ctl_hi; 380 for_each_sg(sglist, sg, sglen, i) { 381 /*Populate CTL_LOW and LLI values*/ 382 if (i != sglen - 1) { 383 lli_next = lli_next + 384 sizeof(struct intel_mid_dma_lli); 385 } else { 386 /*Check for circular list, otherwise terminate LLI to ZERO*/ 387 if (flags & DMA_PREP_CIRCULAR_LIST) { 388 pr_debug("MDMA: LLI is configured in circular mode\n"); 389 lli_next = desc->lli_phys; 390 } else { 391 lli_next = 0; 392 ctl_lo.ctlx.llp_dst_en = 0; 393 ctl_lo.ctlx.llp_src_en = 0; 394 } 395 } 396 /*Populate CTL_HI values*/ 397 ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg), 398 desc->width, 399 midc->dma->block_size); 400 /*Populate SAR and DAR values*/ 401 sg_phy_addr = sg_dma_address(sg); 402 if (desc->dirn == DMA_MEM_TO_DEV) { 403 lli_bloc_desc->sar = sg_phy_addr; 404 lli_bloc_desc->dar = mids->dma_slave.dst_addr; 405 } else if (desc->dirn == DMA_DEV_TO_MEM) { 406 lli_bloc_desc->sar = mids->dma_slave.src_addr; 407 lli_bloc_desc->dar = sg_phy_addr; 408 } 409 /*Copy values into block descriptor in system memroy*/ 410 lli_bloc_desc->llp = lli_next; 411 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; 412 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; 413 414 lli_bloc_desc++; 415 } 416 /*Copy very first LLI values to descriptor*/ 417 desc->ctl_lo = desc->lli->ctl_lo; 418 desc->ctl_hi = desc->lli->ctl_hi; 419 desc->sar = desc->lli->sar; 420 desc->dar = desc->lli->dar; 421 422 return 0; 423} 424/***************************************************************************** 425DMA engine callback Functions*/ 426/** 427 * intel_mid_dma_tx_submit - callback to submit DMA transaction 428 * @tx: dma engine descriptor 429 * 430 * Submit the DMA transaction for this descriptor, start if ch idle 431 */ 432static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) 433{ 434 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); 435 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); 436 dma_cookie_t cookie; 437 438 spin_lock_bh(&midc->lock); 439 cookie = dma_cookie_assign(tx); 440 441 if (list_empty(&midc->active_list)) 442 list_add_tail(&desc->desc_node, &midc->active_list); 443 else 444 list_add_tail(&desc->desc_node, &midc->queue); 445 446 midc_dostart(midc, desc); 447 spin_unlock_bh(&midc->lock); 448 449 return cookie; 450} 451 452/** 453 * intel_mid_dma_issue_pending - callback to issue pending txn 454 * @chan: chan where pending trascation needs to be checked and submitted 455 * 456 * Call for scan to issue pending descriptors 457 */ 458static void intel_mid_dma_issue_pending(struct dma_chan *chan) 459{ 460 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 461 462 spin_lock_bh(&midc->lock); 463 if (!list_empty(&midc->queue)) 464 midc_scan_descriptors(to_middma_device(chan->device), midc); 465 spin_unlock_bh(&midc->lock); 466} 467 468/** 469 * intel_mid_dma_tx_status - Return status of txn 470 * @chan: chan for where status needs to be checked 471 * @cookie: cookie for txn 472 * @txstate: DMA txn state 473 * 474 * Return status of DMA txn 475 */ 476static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, 477 dma_cookie_t cookie, 478 struct dma_tx_state *txstate) 479{ 480 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 481 enum dma_status ret; 482 483 ret = dma_cookie_status(chan, cookie, txstate); 484 if (ret != DMA_SUCCESS) { 485 spin_lock_bh(&midc->lock); 486 midc_scan_descriptors(to_middma_device(chan->device), midc); 487 spin_unlock_bh(&midc->lock); 488 489 ret = dma_cookie_status(chan, cookie, txstate); 490 } 491 492 return ret; 493} 494 495static int dma_slave_control(struct dma_chan *chan, unsigned long arg) 496{ 497 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 498 struct dma_slave_config *slave = (struct dma_slave_config *)arg; 499 struct intel_mid_dma_slave *mid_slave; 500 501 BUG_ON(!midc); 502 BUG_ON(!slave); 503 pr_debug("MDMA: slave control called\n"); 504 505 mid_slave = to_intel_mid_dma_slave(slave); 506 507 BUG_ON(!mid_slave); 508 509 midc->mid_slave = mid_slave; 510 return 0; 511} 512/** 513 * intel_mid_dma_device_control - DMA device control 514 * @chan: chan for DMA control 515 * @cmd: control cmd 516 * @arg: cmd arg value 517 * 518 * Perform DMA control command 519 */ 520static int intel_mid_dma_device_control(struct dma_chan *chan, 521 enum dma_ctrl_cmd cmd, unsigned long arg) 522{ 523 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 524 struct middma_device *mid = to_middma_device(chan->device); 525 struct intel_mid_dma_desc *desc, *_desc; 526 union intel_mid_dma_cfg_lo cfg_lo; 527 528 if (cmd == DMA_SLAVE_CONFIG) 529 return dma_slave_control(chan, arg); 530 531 if (cmd != DMA_TERMINATE_ALL) 532 return -ENXIO; 533 534 spin_lock_bh(&midc->lock); 535 if (midc->busy == false) { 536 spin_unlock_bh(&midc->lock); 537 return 0; 538 } 539 /*Suspend and disable the channel*/ 540 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); 541 cfg_lo.cfgx.ch_susp = 1; 542 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); 543 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 544 midc->busy = false; 545 /* Disable interrupts */ 546 disable_dma_interrupt(midc); 547 midc->descs_allocated = 0; 548 549 spin_unlock_bh(&midc->lock); 550 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 551 if (desc->lli != NULL) { 552 pci_pool_free(desc->lli_pool, desc->lli, 553 desc->lli_phys); 554 pci_pool_destroy(desc->lli_pool); 555 desc->lli = NULL; 556 } 557 list_move(&desc->desc_node, &midc->free_list); 558 } 559 return 0; 560} 561 562 563/** 564 * intel_mid_dma_prep_memcpy - Prep memcpy txn 565 * @chan: chan for DMA transfer 566 * @dest: destn address 567 * @src: src address 568 * @len: DMA transfer len 569 * @flags: DMA flags 570 * 571 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only 572 * The periphral txn details should be filled in slave structure properly 573 * Returns the descriptor for this txn 574 */ 575static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( 576 struct dma_chan *chan, dma_addr_t dest, 577 dma_addr_t src, size_t len, unsigned long flags) 578{ 579 struct intel_mid_dma_chan *midc; 580 struct intel_mid_dma_desc *desc = NULL; 581 struct intel_mid_dma_slave *mids; 582 union intel_mid_dma_ctl_lo ctl_lo; 583 union intel_mid_dma_ctl_hi ctl_hi; 584 union intel_mid_dma_cfg_lo cfg_lo; 585 union intel_mid_dma_cfg_hi cfg_hi; 586 enum dma_slave_buswidth width; 587 588 pr_debug("MDMA: Prep for memcpy\n"); 589 BUG_ON(!chan); 590 if (!len) 591 return NULL; 592 593 midc = to_intel_mid_dma_chan(chan); 594 BUG_ON(!midc); 595 596 mids = midc->mid_slave; 597 BUG_ON(!mids); 598 599 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 600 midc->dma->pci_id, midc->ch_id, len); 601 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 602 mids->cfg_mode, mids->dma_slave.direction, 603 mids->hs_mode, mids->dma_slave.src_addr_width); 604 605 /*calculate CFG_LO*/ 606 if (mids->hs_mode == LNW_DMA_SW_HS) { 607 cfg_lo.cfg_lo = 0; 608 cfg_lo.cfgx.hs_sel_dst = 1; 609 cfg_lo.cfgx.hs_sel_src = 1; 610 } else if (mids->hs_mode == LNW_DMA_HW_HS) 611 cfg_lo.cfg_lo = 0x00000; 612 613 /*calculate CFG_HI*/ 614 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 615 /*SW HS only*/ 616 cfg_hi.cfg_hi = 0; 617 } else { 618 cfg_hi.cfg_hi = 0; 619 if (midc->dma->pimr_mask) { 620 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 621 cfg_hi.cfgx.fifo_mode = 1; 622 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { 623 cfg_hi.cfgx.src_per = 0; 624 if (mids->device_instance == 0) 625 cfg_hi.cfgx.dst_per = 3; 626 if (mids->device_instance == 1) 627 cfg_hi.cfgx.dst_per = 1; 628 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { 629 if (mids->device_instance == 0) 630 cfg_hi.cfgx.src_per = 2; 631 if (mids->device_instance == 1) 632 cfg_hi.cfgx.src_per = 0; 633 cfg_hi.cfgx.dst_per = 0; 634 } 635 } else { 636 cfg_hi.cfgx.protctl = 0x1; /*default value*/ 637 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = 638 midc->ch_id - midc->dma->chan_base; 639 } 640 } 641 642 /*calculate CTL_HI*/ 643 ctl_hi.ctlx.reser = 0; 644 ctl_hi.ctlx.done = 0; 645 width = mids->dma_slave.src_addr_width; 646 647 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 648 pr_debug("MDMA:calc len %d for block size %d\n", 649 ctl_hi.ctlx.block_ts, midc->dma->block_size); 650 /*calculate CTL_LO*/ 651 ctl_lo.ctl_lo = 0; 652 ctl_lo.ctlx.int_en = 1; 653 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; 654 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; 655 656 /* 657 * Here we need some translation from "enum dma_slave_buswidth" 658 * to the format for our dma controller 659 * standard intel_mid_dmac's format 660 * 1 Byte 0b000 661 * 2 Bytes 0b001 662 * 4 Bytes 0b010 663 */ 664 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; 665 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; 666 667 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 668 ctl_lo.ctlx.tt_fc = 0; 669 ctl_lo.ctlx.sinc = 0; 670 ctl_lo.ctlx.dinc = 0; 671 } else { 672 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { 673 ctl_lo.ctlx.sinc = 0; 674 ctl_lo.ctlx.dinc = 2; 675 ctl_lo.ctlx.tt_fc = 1; 676 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { 677 ctl_lo.ctlx.sinc = 2; 678 ctl_lo.ctlx.dinc = 0; 679 ctl_lo.ctlx.tt_fc = 2; 680 } 681 } 682 683 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", 684 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); 685 686 enable_dma_interrupt(midc); 687 688 desc = midc_desc_get(midc); 689 if (desc == NULL) 690 goto err_desc_get; 691 desc->sar = src; 692 desc->dar = dest ; 693 desc->len = len; 694 desc->cfg_hi = cfg_hi.cfg_hi; 695 desc->cfg_lo = cfg_lo.cfg_lo; 696 desc->ctl_lo = ctl_lo.ctl_lo; 697 desc->ctl_hi = ctl_hi.ctl_hi; 698 desc->width = width; 699 desc->dirn = mids->dma_slave.direction; 700 desc->lli_phys = 0; 701 desc->lli = NULL; 702 desc->lli_pool = NULL; 703 return &desc->txd; 704 705err_desc_get: 706 pr_err("ERR_MDMA: Failed to get desc\n"); 707 midc_desc_put(midc, desc); 708 return NULL; 709} 710/** 711 * intel_mid_dma_prep_slave_sg - Prep slave sg txn 712 * @chan: chan for DMA transfer 713 * @sgl: scatter gather list 714 * @sg_len: length of sg txn 715 * @direction: DMA transfer dirtn 716 * @flags: DMA flags 717 * @context: transfer context (ignored) 718 * 719 * Prepares LLI based periphral transfer 720 */ 721static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 722 struct dma_chan *chan, struct scatterlist *sgl, 723 unsigned int sg_len, enum dma_transfer_direction direction, 724 unsigned long flags, void *context) 725{ 726 struct intel_mid_dma_chan *midc = NULL; 727 struct intel_mid_dma_slave *mids = NULL; 728 struct intel_mid_dma_desc *desc = NULL; 729 struct dma_async_tx_descriptor *txd = NULL; 730 union intel_mid_dma_ctl_lo ctl_lo; 731 732 pr_debug("MDMA: Prep for slave SG\n"); 733 734 if (!sg_len) { 735 pr_err("MDMA: Invalid SG length\n"); 736 return NULL; 737 } 738 midc = to_intel_mid_dma_chan(chan); 739 BUG_ON(!midc); 740 741 mids = midc->mid_slave; 742 BUG_ON(!mids); 743 744 if (!midc->dma->pimr_mask) { 745 /* We can still handle sg list with only one item */ 746 if (sg_len == 1) { 747 txd = intel_mid_dma_prep_memcpy(chan, 748 mids->dma_slave.dst_addr, 749 mids->dma_slave.src_addr, 750 sg_dma_len(sgl), 751 flags); 752 return txd; 753 } else { 754 pr_warn("MDMA: SG list is not supported by this controller\n"); 755 return NULL; 756 } 757 } 758 759 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 760 sg_len, direction, flags); 761 762 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags); 763 if (NULL == txd) { 764 pr_err("MDMA: Prep memcpy failed\n"); 765 return NULL; 766 } 767 768 desc = to_intel_mid_dma_desc(txd); 769 desc->dirn = direction; 770 ctl_lo.ctl_lo = desc->ctl_lo; 771 ctl_lo.ctlx.llp_dst_en = 1; 772 ctl_lo.ctlx.llp_src_en = 1; 773 desc->ctl_lo = ctl_lo.ctl_lo; 774 desc->lli_length = sg_len; 775 desc->current_lli = 0; 776 /* DMA coherent memory pool for LLI descriptors*/ 777 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", 778 midc->dma->pdev, 779 (sizeof(struct intel_mid_dma_lli)*sg_len), 780 32, 0); 781 if (NULL == desc->lli_pool) { 782 pr_err("MID_DMA:LLI pool create failed\n"); 783 return NULL; 784 } 785 786 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); 787 if (!desc->lli) { 788 pr_err("MID_DMA: LLI alloc failed\n"); 789 pci_pool_destroy(desc->lli_pool); 790 return NULL; 791 } 792 793 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); 794 if (flags & DMA_PREP_INTERRUPT) { 795 iowrite32(UNMASK_INTR_REG(midc->ch_id), 796 midc->dma_base + MASK_BLOCK); 797 pr_debug("MDMA:Enabled Block interrupt\n"); 798 } 799 return &desc->txd; 800} 801 802/** 803 * intel_mid_dma_free_chan_resources - Frees dma resources 804 * @chan: chan requiring attention 805 * 806 * Frees the allocated resources on this DMA chan 807 */ 808static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) 809{ 810 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 811 struct middma_device *mid = to_middma_device(chan->device); 812 struct intel_mid_dma_desc *desc, *_desc; 813 814 if (true == midc->busy) { 815 /*trying to free ch in use!!!!!*/ 816 pr_err("ERR_MDMA: trying to free ch in use\n"); 817 } 818 spin_lock_bh(&midc->lock); 819 midc->descs_allocated = 0; 820 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 821 list_del(&desc->desc_node); 822 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 823 } 824 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 825 list_del(&desc->desc_node); 826 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 827 } 828 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { 829 list_del(&desc->desc_node); 830 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 831 } 832 spin_unlock_bh(&midc->lock); 833 midc->in_use = false; 834 midc->busy = false; 835 /* Disable CH interrupts */ 836 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 837 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); 838 pm_runtime_put(&mid->pdev->dev); 839} 840 841/** 842 * intel_mid_dma_alloc_chan_resources - Allocate dma resources 843 * @chan: chan requiring attention 844 * 845 * Allocates DMA resources on this chan 846 * Return the descriptors allocated 847 */ 848static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) 849{ 850 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 851 struct middma_device *mid = to_middma_device(chan->device); 852 struct intel_mid_dma_desc *desc; 853 dma_addr_t phys; 854 int i = 0; 855 856 pm_runtime_get_sync(&mid->pdev->dev); 857 858 if (mid->state == SUSPENDED) { 859 if (dma_resume(&mid->pdev->dev)) { 860 pr_err("ERR_MDMA: resume failed"); 861 return -EFAULT; 862 } 863 } 864 865 /* ASSERT: channel is idle */ 866 if (test_ch_en(mid->dma_base, midc->ch_id)) { 867 /*ch is not idle*/ 868 pr_err("ERR_MDMA: ch not idle\n"); 869 pm_runtime_put(&mid->pdev->dev); 870 return -EIO; 871 } 872 dma_cookie_init(chan); 873 874 spin_lock_bh(&midc->lock); 875 while (midc->descs_allocated < DESCS_PER_CHANNEL) { 876 spin_unlock_bh(&midc->lock); 877 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); 878 if (!desc) { 879 pr_err("ERR_MDMA: desc failed\n"); 880 pm_runtime_put(&mid->pdev->dev); 881 return -ENOMEM; 882 /*check*/ 883 } 884 dma_async_tx_descriptor_init(&desc->txd, chan); 885 desc->txd.tx_submit = intel_mid_dma_tx_submit; 886 desc->txd.flags = DMA_CTRL_ACK; 887 desc->txd.phys = phys; 888 spin_lock_bh(&midc->lock); 889 i = ++midc->descs_allocated; 890 list_add_tail(&desc->desc_node, &midc->free_list); 891 } 892 spin_unlock_bh(&midc->lock); 893 midc->in_use = true; 894 midc->busy = false; 895 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); 896 return i; 897} 898 899/** 900 * midc_handle_error - Handle DMA txn error 901 * @mid: controller where error occurred 902 * @midc: chan where error occurred 903 * 904 * Scan the descriptor for error 905 */ 906static void midc_handle_error(struct middma_device *mid, 907 struct intel_mid_dma_chan *midc) 908{ 909 midc_scan_descriptors(mid, midc); 910} 911 912/** 913 * dma_tasklet - DMA interrupt tasklet 914 * @data: tasklet arg (the controller structure) 915 * 916 * Scan the controller for interrupts for completion/error 917 * Clear the interrupt and call for handling completion/error 918 */ 919static void dma_tasklet(unsigned long data) 920{ 921 struct middma_device *mid = NULL; 922 struct intel_mid_dma_chan *midc = NULL; 923 u32 status, raw_tfr, raw_block; 924 int i; 925 926 mid = (struct middma_device *)data; 927 if (mid == NULL) { 928 pr_err("ERR_MDMA: tasklet Null param\n"); 929 return; 930 } 931 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); 932 raw_tfr = ioread32(mid->dma_base + RAW_TFR); 933 raw_block = ioread32(mid->dma_base + RAW_BLOCK); 934 status = raw_tfr | raw_block; 935 status &= mid->intr_mask; 936 while (status) { 937 /*txn interrupt*/ 938 i = get_ch_index(&status, mid->chan_base); 939 if (i < 0) { 940 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 941 return; 942 } 943 midc = &mid->ch[i]; 944 if (midc == NULL) { 945 pr_err("ERR_MDMA:Null param midc\n"); 946 return; 947 } 948 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 949 status, midc->ch_id, i); 950 midc->raw_tfr = raw_tfr; 951 midc->raw_block = raw_block; 952 spin_lock_bh(&midc->lock); 953 /*clearing this interrupts first*/ 954 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); 955 if (raw_block) { 956 iowrite32((1 << midc->ch_id), 957 mid->dma_base + CLEAR_BLOCK); 958 } 959 midc_scan_descriptors(mid, midc); 960 pr_debug("MDMA:Scan of desc... complete, unmasking\n"); 961 iowrite32(UNMASK_INTR_REG(midc->ch_id), 962 mid->dma_base + MASK_TFR); 963 if (raw_block) { 964 iowrite32(UNMASK_INTR_REG(midc->ch_id), 965 mid->dma_base + MASK_BLOCK); 966 } 967 spin_unlock_bh(&midc->lock); 968 } 969 970 status = ioread32(mid->dma_base + RAW_ERR); 971 status &= mid->intr_mask; 972 while (status) { 973 /*err interrupt*/ 974 i = get_ch_index(&status, mid->chan_base); 975 if (i < 0) { 976 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 977 return; 978 } 979 midc = &mid->ch[i]; 980 if (midc == NULL) { 981 pr_err("ERR_MDMA:Null param midc\n"); 982 return; 983 } 984 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 985 status, midc->ch_id, i); 986 987 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); 988 spin_lock_bh(&midc->lock); 989 midc_handle_error(mid, midc); 990 iowrite32(UNMASK_INTR_REG(midc->ch_id), 991 mid->dma_base + MASK_ERR); 992 spin_unlock_bh(&midc->lock); 993 } 994 pr_debug("MDMA:Exiting takslet...\n"); 995 return; 996} 997 998static void dma_tasklet1(unsigned long data) 999{ 1000 pr_debug("MDMA:in takslet1...\n"); 1001 return dma_tasklet(data); 1002} 1003 1004static void dma_tasklet2(unsigned long data) 1005{ 1006 pr_debug("MDMA:in takslet2...\n"); 1007 return dma_tasklet(data); 1008} 1009 1010/** 1011 * intel_mid_dma_interrupt - DMA ISR 1012 * @irq: IRQ where interrupt occurred 1013 * @data: ISR cllback data (the controller structure) 1014 * 1015 * See if this is our interrupt if so then schedule the tasklet 1016 * otherwise ignore 1017 */ 1018static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) 1019{ 1020 struct middma_device *mid = data; 1021 u32 tfr_status, err_status; 1022 int call_tasklet = 0; 1023 1024 tfr_status = ioread32(mid->dma_base + RAW_TFR); 1025 err_status = ioread32(mid->dma_base + RAW_ERR); 1026 if (!tfr_status && !err_status) 1027 return IRQ_NONE; 1028 1029 /*DMA Interrupt*/ 1030 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); 1031 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); 1032 tfr_status &= mid->intr_mask; 1033 if (tfr_status) { 1034 /*need to disable intr*/ 1035 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); 1036 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); 1037 pr_debug("MDMA: Calling tasklet %x\n", tfr_status); 1038 call_tasklet = 1; 1039 } 1040 err_status &= mid->intr_mask; 1041 if (err_status) { 1042 iowrite32((err_status << INT_MASK_WE), 1043 mid->dma_base + MASK_ERR); 1044 call_tasklet = 1; 1045 } 1046 if (call_tasklet) 1047 tasklet_schedule(&mid->tasklet); 1048 1049 return IRQ_HANDLED; 1050} 1051 1052static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) 1053{ 1054 return intel_mid_dma_interrupt(irq, data); 1055} 1056 1057static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) 1058{ 1059 return intel_mid_dma_interrupt(irq, data); 1060} 1061 1062/** 1063 * mid_setup_dma - Setup the DMA controller 1064 * @pdev: Controller PCI device structure 1065 * 1066 * Initialize the DMA controller, channels, registers with DMA engine, 1067 * ISR. Initialize DMA controller channels. 1068 */ 1069static int mid_setup_dma(struct pci_dev *pdev) 1070{ 1071 struct middma_device *dma = pci_get_drvdata(pdev); 1072 int err, i; 1073 1074 /* DMA coherent memory pool for DMA descriptor allocations */ 1075 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, 1076 sizeof(struct intel_mid_dma_desc), 1077 32, 0); 1078 if (NULL == dma->dma_pool) { 1079 pr_err("ERR_MDMA:pci_pool_create failed\n"); 1080 err = -ENOMEM; 1081 goto err_dma_pool; 1082 } 1083 1084 INIT_LIST_HEAD(&dma->common.channels); 1085 dma->pci_id = pdev->device; 1086 if (dma->pimr_mask) { 1087 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, 1088 LNW_PERIPHRAL_MASK_SIZE); 1089 if (dma->mask_reg == NULL) { 1090 pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); 1091 err = -ENOMEM; 1092 goto err_ioremap; 1093 } 1094 } else 1095 dma->mask_reg = NULL; 1096 1097 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); 1098 /*init CH structures*/ 1099 dma->intr_mask = 0; 1100 dma->state = RUNNING; 1101 for (i = 0; i < dma->max_chan; i++) { 1102 struct intel_mid_dma_chan *midch = &dma->ch[i]; 1103 1104 midch->chan.device = &dma->common; 1105 dma_cookie_init(&midch->chan); 1106 midch->ch_id = dma->chan_base + i; 1107 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 1108 1109 midch->dma_base = dma->dma_base; 1110 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; 1111 midch->dma = dma; 1112 dma->intr_mask |= 1 << (dma->chan_base + i); 1113 spin_lock_init(&midch->lock); 1114 1115 INIT_LIST_HEAD(&midch->active_list); 1116 INIT_LIST_HEAD(&midch->queue); 1117 INIT_LIST_HEAD(&midch->free_list); 1118 /*mask interrupts*/ 1119 iowrite32(MASK_INTR_REG(midch->ch_id), 1120 dma->dma_base + MASK_BLOCK); 1121 iowrite32(MASK_INTR_REG(midch->ch_id), 1122 dma->dma_base + MASK_SRC_TRAN); 1123 iowrite32(MASK_INTR_REG(midch->ch_id), 1124 dma->dma_base + MASK_DST_TRAN); 1125 iowrite32(MASK_INTR_REG(midch->ch_id), 1126 dma->dma_base + MASK_ERR); 1127 iowrite32(MASK_INTR_REG(midch->ch_id), 1128 dma->dma_base + MASK_TFR); 1129 1130 disable_dma_interrupt(midch); 1131 list_add_tail(&midch->chan.device_node, &dma->common.channels); 1132 } 1133 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); 1134 1135 /*init dma structure*/ 1136 dma_cap_zero(dma->common.cap_mask); 1137 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); 1138 dma_cap_set(DMA_SLAVE, dma->common.cap_mask); 1139 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); 1140 dma->common.dev = &pdev->dev; 1141 1142 dma->common.device_alloc_chan_resources = 1143 intel_mid_dma_alloc_chan_resources; 1144 dma->common.device_free_chan_resources = 1145 intel_mid_dma_free_chan_resources; 1146 1147 dma->common.device_tx_status = intel_mid_dma_tx_status; 1148 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; 1149 dma->common.device_issue_pending = intel_mid_dma_issue_pending; 1150 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; 1151 dma->common.device_control = intel_mid_dma_device_control; 1152 1153 /*enable dma cntrl*/ 1154 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); 1155 1156 /*register irq */ 1157 if (dma->pimr_mask) { 1158 pr_debug("MDMA:Requesting irq shared for DMAC1\n"); 1159 err = request_irq(pdev->irq, intel_mid_dma_interrupt1, 1160 IRQF_SHARED, "INTEL_MID_DMAC1", dma); 1161 if (0 != err) 1162 goto err_irq; 1163 } else { 1164 dma->intr_mask = 0x03; 1165 pr_debug("MDMA:Requesting irq for DMAC2\n"); 1166 err = request_irq(pdev->irq, intel_mid_dma_interrupt2, 1167 IRQF_SHARED, "INTEL_MID_DMAC2", dma); 1168 if (0 != err) 1169 goto err_irq; 1170 } 1171 /*register device w/ engine*/ 1172 err = dma_async_device_register(&dma->common); 1173 if (0 != err) { 1174 pr_err("ERR_MDMA:device_register failed: %d\n", err); 1175 goto err_engine; 1176 } 1177 if (dma->pimr_mask) { 1178 pr_debug("setting up tasklet1 for DMAC1\n"); 1179 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); 1180 } else { 1181 pr_debug("setting up tasklet2 for DMAC2\n"); 1182 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); 1183 } 1184 return 0; 1185 1186err_engine: 1187 free_irq(pdev->irq, dma); 1188err_irq: 1189 if (dma->mask_reg) 1190 iounmap(dma->mask_reg); 1191err_ioremap: 1192 pci_pool_destroy(dma->dma_pool); 1193err_dma_pool: 1194 pr_err("ERR_MDMA:setup_dma failed: %d\n", err); 1195 return err; 1196 1197} 1198 1199/** 1200 * middma_shutdown - Shutdown the DMA controller 1201 * @pdev: Controller PCI device structure 1202 * 1203 * Called by remove 1204 * Unregister DMa controller, clear all structures and free interrupt 1205 */ 1206static void middma_shutdown(struct pci_dev *pdev) 1207{ 1208 struct middma_device *device = pci_get_drvdata(pdev); 1209 1210 dma_async_device_unregister(&device->common); 1211 pci_pool_destroy(device->dma_pool); 1212 if (device->mask_reg) 1213 iounmap(device->mask_reg); 1214 if (device->dma_base) 1215 iounmap(device->dma_base); 1216 free_irq(pdev->irq, device); 1217 return; 1218} 1219 1220/** 1221 * intel_mid_dma_probe - PCI Probe 1222 * @pdev: Controller PCI device structure 1223 * @id: pci device id structure 1224 * 1225 * Initialize the PCI device, map BARs, query driver data. 1226 * Call setup_dma to complete contoller and chan initilzation 1227 */ 1228static int intel_mid_dma_probe(struct pci_dev *pdev, 1229 const struct pci_device_id *id) 1230{ 1231 struct middma_device *device; 1232 u32 base_addr, bar_size; 1233 struct intel_mid_dma_probe_info *info; 1234 int err; 1235 1236 pr_debug("MDMA: probe for %x\n", pdev->device); 1237 info = (void *)id->driver_data; 1238 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", 1239 info->max_chan, info->ch_base, 1240 info->block_size, info->pimr_mask); 1241 1242 err = pci_enable_device(pdev); 1243 if (err) 1244 goto err_enable_device; 1245 1246 err = pci_request_regions(pdev, "intel_mid_dmac"); 1247 if (err) 1248 goto err_request_regions; 1249 1250 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1251 if (err) 1252 goto err_set_dma_mask; 1253 1254 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1255 if (err) 1256 goto err_set_dma_mask; 1257 1258 device = kzalloc(sizeof(*device), GFP_KERNEL); 1259 if (!device) { 1260 pr_err("ERR_MDMA:kzalloc failed probe\n"); 1261 err = -ENOMEM; 1262 goto err_kzalloc; 1263 } 1264 device->pdev = pci_dev_get(pdev); 1265 1266 base_addr = pci_resource_start(pdev, 0); 1267 bar_size = pci_resource_len(pdev, 0); 1268 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); 1269 if (!device->dma_base) { 1270 pr_err("ERR_MDMA:ioremap failed\n"); 1271 err = -ENOMEM; 1272 goto err_ioremap; 1273 } 1274 pci_set_drvdata(pdev, device); 1275 pci_set_master(pdev); 1276 device->max_chan = info->max_chan; 1277 device->chan_base = info->ch_base; 1278 device->block_size = info->block_size; 1279 device->pimr_mask = info->pimr_mask; 1280 1281 err = mid_setup_dma(pdev); 1282 if (err) 1283 goto err_dma; 1284 1285 pm_runtime_put_noidle(&pdev->dev); 1286 pm_runtime_allow(&pdev->dev); 1287 return 0; 1288 1289err_dma: 1290 iounmap(device->dma_base); 1291err_ioremap: 1292 pci_dev_put(pdev); 1293 kfree(device); 1294err_kzalloc: 1295err_set_dma_mask: 1296 pci_release_regions(pdev); 1297 pci_disable_device(pdev); 1298err_request_regions: 1299err_enable_device: 1300 pr_err("ERR_MDMA:Probe failed %d\n", err); 1301 return err; 1302} 1303 1304/** 1305 * intel_mid_dma_remove - PCI remove 1306 * @pdev: Controller PCI device structure 1307 * 1308 * Free up all resources and data 1309 * Call shutdown_dma to complete contoller and chan cleanup 1310 */ 1311static void intel_mid_dma_remove(struct pci_dev *pdev) 1312{ 1313 struct middma_device *device = pci_get_drvdata(pdev); 1314 1315 pm_runtime_get_noresume(&pdev->dev); 1316 pm_runtime_forbid(&pdev->dev); 1317 middma_shutdown(pdev); 1318 pci_dev_put(pdev); 1319 kfree(device); 1320 pci_release_regions(pdev); 1321 pci_disable_device(pdev); 1322} 1323 1324/* Power Management */ 1325/* 1326* dma_suspend - PCI suspend function 1327* 1328* @pci: PCI device structure 1329* @state: PM message 1330* 1331* This function is called by OS when a power event occurs 1332*/ 1333static int dma_suspend(struct device *dev) 1334{ 1335 struct pci_dev *pci = to_pci_dev(dev); 1336 int i; 1337 struct middma_device *device = pci_get_drvdata(pci); 1338 pr_debug("MDMA: dma_suspend called\n"); 1339 1340 for (i = 0; i < device->max_chan; i++) { 1341 if (device->ch[i].in_use) 1342 return -EAGAIN; 1343 } 1344 dmac1_mask_periphral_intr(device); 1345 device->state = SUSPENDED; 1346 pci_save_state(pci); 1347 pci_disable_device(pci); 1348 pci_set_power_state(pci, PCI_D3hot); 1349 return 0; 1350} 1351 1352/** 1353* dma_resume - PCI resume function 1354* 1355* @pci: PCI device structure 1356* 1357* This function is called by OS when a power event occurs 1358*/ 1359int dma_resume(struct device *dev) 1360{ 1361 struct pci_dev *pci = to_pci_dev(dev); 1362 int ret; 1363 struct middma_device *device = pci_get_drvdata(pci); 1364 1365 pr_debug("MDMA: dma_resume called\n"); 1366 pci_set_power_state(pci, PCI_D0); 1367 pci_restore_state(pci); 1368 ret = pci_enable_device(pci); 1369 if (ret) { 1370 pr_err("MDMA: device can't be enabled for %x\n", pci->device); 1371 return ret; 1372 } 1373 device->state = RUNNING; 1374 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1375 return 0; 1376} 1377 1378static int dma_runtime_suspend(struct device *dev) 1379{ 1380 struct pci_dev *pci_dev = to_pci_dev(dev); 1381 struct middma_device *device = pci_get_drvdata(pci_dev); 1382 1383 device->state = SUSPENDED; 1384 return 0; 1385} 1386 1387static int dma_runtime_resume(struct device *dev) 1388{ 1389 struct pci_dev *pci_dev = to_pci_dev(dev); 1390 struct middma_device *device = pci_get_drvdata(pci_dev); 1391 1392 device->state = RUNNING; 1393 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1394 return 0; 1395} 1396 1397static int dma_runtime_idle(struct device *dev) 1398{ 1399 struct pci_dev *pdev = to_pci_dev(dev); 1400 struct middma_device *device = pci_get_drvdata(pdev); 1401 int i; 1402 1403 for (i = 0; i < device->max_chan; i++) { 1404 if (device->ch[i].in_use) 1405 return -EAGAIN; 1406 } 1407 1408 return 0; 1409} 1410 1411/****************************************************************************** 1412* PCI stuff 1413*/ 1414static struct pci_device_id intel_mid_dma_ids[] = { 1415 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, 1416 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1417 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1418 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, 1419 { 0, } 1420}; 1421MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); 1422 1423static const struct dev_pm_ops intel_mid_dma_pm = { 1424 .runtime_suspend = dma_runtime_suspend, 1425 .runtime_resume = dma_runtime_resume, 1426 .runtime_idle = dma_runtime_idle, 1427 .suspend = dma_suspend, 1428 .resume = dma_resume, 1429}; 1430 1431static struct pci_driver intel_mid_dma_pci_driver = { 1432 .name = "Intel MID DMA", 1433 .id_table = intel_mid_dma_ids, 1434 .probe = intel_mid_dma_probe, 1435 .remove = intel_mid_dma_remove, 1436#ifdef CONFIG_PM 1437 .driver = { 1438 .pm = &intel_mid_dma_pm, 1439 }, 1440#endif 1441}; 1442 1443static int __init intel_mid_dma_init(void) 1444{ 1445 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", 1446 INTEL_MID_DMA_DRIVER_VERSION); 1447 return pci_register_driver(&intel_mid_dma_pci_driver); 1448} 1449fs_initcall(intel_mid_dma_init); 1450 1451static void __exit intel_mid_dma_exit(void) 1452{ 1453 pci_unregister_driver(&intel_mid_dma_pci_driver); 1454} 1455module_exit(intel_mid_dma_exit); 1456 1457MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); 1458MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); 1459MODULE_LICENSE("GPL v2"); 1460MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION); 1461