1/* 2 * Renesas SuperH DMA Engine support 3 * 4 * base is drivers/dma/flsdma.c 5 * 6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 9 * 10 * This is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * - DMA of SuperH does not have Hardware DMA chain mode. 16 * - MAX DMA size is 16MB. 17 * 18 */ 19 20#include <linux/init.h> 21#include <linux/module.h> 22#include <linux/slab.h> 23#include <linux/interrupt.h> 24#include <linux/dmaengine.h> 25#include <linux/delay.h> 26#include <linux/platform_device.h> 27#include <linux/pm_runtime.h> 28#include <linux/sh_dma.h> 29#include <linux/notifier.h> 30#include <linux/kdebug.h> 31#include <linux/spinlock.h> 32#include <linux/rculist.h> 33#include "shdma.h" 34 35/* DMA descriptor control */ 36enum sh_dmae_desc_status { 37 DESC_IDLE, 38 DESC_PREPARED, 39 DESC_SUBMITTED, 40 DESC_COMPLETED, /* completed, have to call callback */ 41 DESC_WAITING, /* callback called, waiting for ack / re-submit */ 42}; 43 44#define NR_DESCS_PER_CHANNEL 32 45/* Default MEMCPY transfer size = 2^2 = 4 bytes */ 46#define LOG2_DEFAULT_XFER_SIZE 2 47 48/* 49 * Used for write-side mutual exclusion for the global device list, 50 * read-side synchronization by way of RCU, and per-controller data. 51 */ 52static DEFINE_SPINLOCK(sh_dmae_lock); 53static LIST_HEAD(sh_dmae_devices); 54 55/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ 56static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; 57 58static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 59static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); 60 61static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) 62{ 63 struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 64 65 __raw_writel(data, shdev->chan_reg + 66 shdev->pdata->channel[sh_dc->id].chclr_offset); 67} 68 69static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 70{ 71 __raw_writel(data, sh_dc->base + reg / sizeof(u32)); 72} 73 74static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 75{ 76 return __raw_readl(sh_dc->base + reg / sizeof(u32)); 77} 78 79static u16 dmaor_read(struct sh_dmae_device *shdev) 80{ 81 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); 82 83 if (shdev->pdata->dmaor_is_32bit) 84 return __raw_readl(addr); 85 else 86 return __raw_readw(addr); 87} 88 89static void dmaor_write(struct sh_dmae_device *shdev, u16 data) 90{ 91 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); 92 93 if (shdev->pdata->dmaor_is_32bit) 94 __raw_writel(data, addr); 95 else 96 __raw_writew(data, addr); 97} 98 99static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) 100{ 101 struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 102 103 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); 104} 105 106static u32 chcr_read(struct sh_dmae_chan *sh_dc) 107{ 108 struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 109 110 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); 111} 112 113/* 114 * Reset DMA controller 115 * 116 * SH7780 has two DMAOR register 117 */ 118static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 119{ 120 unsigned short dmaor; 121 unsigned long flags; 122 123 spin_lock_irqsave(&sh_dmae_lock, flags); 124 125 dmaor = dmaor_read(shdev); 126 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 127 128 spin_unlock_irqrestore(&sh_dmae_lock, flags); 129} 130 131static int sh_dmae_rst(struct sh_dmae_device *shdev) 132{ 133 unsigned short dmaor; 134 unsigned long flags; 135 136 spin_lock_irqsave(&sh_dmae_lock, flags); 137 138 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); 139 140 if (shdev->pdata->chclr_present) { 141 int i; 142 for (i = 0; i < shdev->pdata->channel_num; i++) { 143 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 144 if (sh_chan) 145 chclr_write(sh_chan, 0); 146 } 147 } 148 149 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); 150 151 dmaor = dmaor_read(shdev); 152 153 spin_unlock_irqrestore(&sh_dmae_lock, flags); 154 155 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { 156 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); 157 return -EIO; 158 } 159 if (shdev->pdata->dmaor_init & ~dmaor) 160 dev_warn(shdev->common.dev, 161 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", 162 dmaor, shdev->pdata->dmaor_init); 163 return 0; 164} 165 166static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) 167{ 168 u32 chcr = chcr_read(sh_chan); 169 170 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) 171 return true; /* working */ 172 173 return false; /* waiting */ 174} 175 176static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) 177{ 178 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 179 struct sh_dmae_pdata *pdata = shdev->pdata; 180 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | 181 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); 182 183 if (cnt >= pdata->ts_shift_num) 184 cnt = 0; 185 186 return pdata->ts_shift[cnt]; 187} 188 189static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) 190{ 191 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 192 struct sh_dmae_pdata *pdata = shdev->pdata; 193 int i; 194 195 for (i = 0; i < pdata->ts_shift_num; i++) 196 if (pdata->ts_shift[i] == l2size) 197 break; 198 199 if (i == pdata->ts_shift_num) 200 i = 0; 201 202 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | 203 ((i << pdata->ts_high_shift) & pdata->ts_high_mask); 204} 205 206static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 207{ 208 sh_dmae_writel(sh_chan, hw->sar, SAR); 209 sh_dmae_writel(sh_chan, hw->dar, DAR); 210 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); 211} 212 213static void dmae_start(struct sh_dmae_chan *sh_chan) 214{ 215 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 216 u32 chcr = chcr_read(sh_chan); 217 218 if (shdev->pdata->needs_tend_set) 219 sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); 220 221 chcr |= CHCR_DE | shdev->chcr_ie_bit; 222 chcr_write(sh_chan, chcr & ~CHCR_TE); 223} 224 225static void dmae_halt(struct sh_dmae_chan *sh_chan) 226{ 227 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 228 u32 chcr = chcr_read(sh_chan); 229 230 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); 231 chcr_write(sh_chan, chcr); 232} 233 234static void dmae_init(struct sh_dmae_chan *sh_chan) 235{ 236 /* 237 * Default configuration for dual address memory-memory transfer. 238 * 0x400 represents auto-request. 239 */ 240 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, 241 LOG2_DEFAULT_XFER_SIZE); 242 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); 243 chcr_write(sh_chan, chcr); 244} 245 246static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 247{ 248 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ 249 if (dmae_is_busy(sh_chan)) 250 return -EBUSY; 251 252 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); 253 chcr_write(sh_chan, val); 254 255 return 0; 256} 257 258static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 259{ 260 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 261 struct sh_dmae_pdata *pdata = shdev->pdata; 262 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 263 u16 __iomem *addr = shdev->dmars; 264 unsigned int shift = chan_pdata->dmars_bit; 265 266 if (dmae_is_busy(sh_chan)) 267 return -EBUSY; 268 269 if (pdata->no_dmars) 270 return 0; 271 272 /* in the case of a missing DMARS resource use first memory window */ 273 if (!addr) 274 addr = (u16 __iomem *)shdev->chan_reg; 275 addr += chan_pdata->dmars / sizeof(u16); 276 277 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 278 addr); 279 280 return 0; 281} 282 283static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 284{ 285 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; 286 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); 287 struct sh_dmae_slave *param = tx->chan->private; 288 dma_async_tx_callback callback = tx->callback; 289 dma_cookie_t cookie; 290 bool power_up; 291 292 spin_lock_irq(&sh_chan->desc_lock); 293 294 if (list_empty(&sh_chan->ld_queue)) 295 power_up = true; 296 else 297 power_up = false; 298 299 cookie = sh_chan->common.cookie; 300 cookie++; 301 if (cookie < 0) 302 cookie = 1; 303 304 sh_chan->common.cookie = cookie; 305 tx->cookie = cookie; 306 307 /* Mark all chunks of this descriptor as submitted, move to the queue */ 308 list_for_each_entry_safe(chunk, c, desc->node.prev, node) { 309 /* 310 * All chunks are on the global ld_free, so, we have to find 311 * the end of the chain ourselves 312 */ 313 if (chunk != desc && (chunk->mark == DESC_IDLE || 314 chunk->async_tx.cookie > 0 || 315 chunk->async_tx.cookie == -EBUSY || 316 &chunk->node == &sh_chan->ld_free)) 317 break; 318 chunk->mark = DESC_SUBMITTED; 319 /* Callback goes to the last chunk */ 320 chunk->async_tx.callback = NULL; 321 chunk->cookie = cookie; 322 list_move_tail(&chunk->node, &sh_chan->ld_queue); 323 last = chunk; 324 } 325 326 last->async_tx.callback = callback; 327 last->async_tx.callback_param = tx->callback_param; 328 329 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", 330 tx->cookie, &last->async_tx, sh_chan->id, 331 desc->hw.sar, desc->hw.tcr, desc->hw.dar); 332 333 if (power_up) { 334 sh_chan->pm_state = DMAE_PM_BUSY; 335 336 pm_runtime_get(sh_chan->dev); 337 338 spin_unlock_irq(&sh_chan->desc_lock); 339 340 pm_runtime_barrier(sh_chan->dev); 341 342 spin_lock_irq(&sh_chan->desc_lock); 343 344 /* Have we been reset, while waiting? */ 345 if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) { 346 dev_dbg(sh_chan->dev, "Bring up channel %d\n", 347 sh_chan->id); 348 if (param) { 349 const struct sh_dmae_slave_config *cfg = 350 param->config; 351 352 dmae_set_dmars(sh_chan, cfg->mid_rid); 353 dmae_set_chcr(sh_chan, cfg->chcr); 354 } else { 355 dmae_init(sh_chan); 356 } 357 358 if (sh_chan->pm_state == DMAE_PM_PENDING) 359 sh_chan_xfer_ld_queue(sh_chan); 360 sh_chan->pm_state = DMAE_PM_ESTABLISHED; 361 } 362 } else { 363 sh_chan->pm_state = DMAE_PM_PENDING; 364 } 365 366 spin_unlock_irq(&sh_chan->desc_lock); 367 368 return cookie; 369} 370 371/* Called with desc_lock held */ 372static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) 373{ 374 struct sh_desc *desc; 375 376 list_for_each_entry(desc, &sh_chan->ld_free, node) 377 if (desc->mark != DESC_PREPARED) { 378 BUG_ON(desc->mark != DESC_IDLE); 379 list_del(&desc->node); 380 return desc; 381 } 382 383 return NULL; 384} 385 386static const struct sh_dmae_slave_config *sh_dmae_find_slave( 387 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) 388{ 389 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 390 struct sh_dmae_pdata *pdata = shdev->pdata; 391 int i; 392 393 if (param->slave_id >= SH_DMA_SLAVE_NUMBER) 394 return NULL; 395 396 for (i = 0; i < pdata->slave_num; i++) 397 if (pdata->slave[i].slave_id == param->slave_id) 398 return pdata->slave + i; 399 400 return NULL; 401} 402 403static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) 404{ 405 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 406 struct sh_desc *desc; 407 struct sh_dmae_slave *param = chan->private; 408 int ret; 409 410 /* 411 * This relies on the guarantee from dmaengine that alloc_chan_resources 412 * never runs concurrently with itself or free_chan_resources. 413 */ 414 if (param) { 415 const struct sh_dmae_slave_config *cfg; 416 417 cfg = sh_dmae_find_slave(sh_chan, param); 418 if (!cfg) { 419 ret = -EINVAL; 420 goto efindslave; 421 } 422 423 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) { 424 ret = -EBUSY; 425 goto etestused; 426 } 427 428 param->config = cfg; 429 } 430 431 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { 432 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); 433 if (!desc) 434 break; 435 dma_async_tx_descriptor_init(&desc->async_tx, 436 &sh_chan->common); 437 desc->async_tx.tx_submit = sh_dmae_tx_submit; 438 desc->mark = DESC_IDLE; 439 440 list_add(&desc->node, &sh_chan->ld_free); 441 sh_chan->descs_allocated++; 442 } 443 444 if (!sh_chan->descs_allocated) { 445 ret = -ENOMEM; 446 goto edescalloc; 447 } 448 449 return sh_chan->descs_allocated; 450 451edescalloc: 452 if (param) 453 clear_bit(param->slave_id, sh_dmae_slave_used); 454etestused: 455efindslave: 456 chan->private = NULL; 457 return ret; 458} 459 460/* 461 * sh_dma_free_chan_resources - Free all resources of the channel. 462 */ 463static void sh_dmae_free_chan_resources(struct dma_chan *chan) 464{ 465 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 466 struct sh_desc *desc, *_desc; 467 LIST_HEAD(list); 468 469 /* Protect against ISR */ 470 spin_lock_irq(&sh_chan->desc_lock); 471 dmae_halt(sh_chan); 472 spin_unlock_irq(&sh_chan->desc_lock); 473 474 /* Now no new interrupts will occur */ 475 476 /* Prepared and not submitted descriptors can still be on the queue */ 477 if (!list_empty(&sh_chan->ld_queue)) 478 sh_dmae_chan_ld_cleanup(sh_chan, true); 479 480 if (chan->private) { 481 /* The caller is holding dma_list_mutex */ 482 struct sh_dmae_slave *param = chan->private; 483 clear_bit(param->slave_id, sh_dmae_slave_used); 484 chan->private = NULL; 485 } 486 487 spin_lock_irq(&sh_chan->desc_lock); 488 489 list_splice_init(&sh_chan->ld_free, &list); 490 sh_chan->descs_allocated = 0; 491 492 spin_unlock_irq(&sh_chan->desc_lock); 493 494 list_for_each_entry_safe(desc, _desc, &list, node) 495 kfree(desc); 496} 497 498/** 499 * sh_dmae_add_desc - get, set up and return one transfer descriptor 500 * @sh_chan: DMA channel 501 * @flags: DMA transfer flags 502 * @dest: destination DMA address, incremented when direction equals 503 * DMA_DEV_TO_MEM 504 * @src: source DMA address, incremented when direction equals 505 * DMA_MEM_TO_DEV 506 * @len: DMA transfer length 507 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY 508 * @direction: needed for slave DMA to decide which address to keep constant, 509 * equals DMA_MEM_TO_MEM for MEMCPY 510 * Returns 0 or an error 511 * Locks: called with desc_lock held 512 */ 513static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, 514 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, 515 struct sh_desc **first, enum dma_transfer_direction direction) 516{ 517 struct sh_desc *new; 518 size_t copy_size; 519 520 if (!*len) 521 return NULL; 522 523 /* Allocate the link descriptor from the free list */ 524 new = sh_dmae_get_desc(sh_chan); 525 if (!new) { 526 dev_err(sh_chan->dev, "No free link descriptor available\n"); 527 return NULL; 528 } 529 530 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); 531 532 new->hw.sar = *src; 533 new->hw.dar = *dest; 534 new->hw.tcr = copy_size; 535 536 if (!*first) { 537 /* First desc */ 538 new->async_tx.cookie = -EBUSY; 539 *first = new; 540 } else { 541 /* Other desc - invisible to the user */ 542 new->async_tx.cookie = -EINVAL; 543 } 544 545 dev_dbg(sh_chan->dev, 546 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", 547 copy_size, *len, *src, *dest, &new->async_tx, 548 new->async_tx.cookie, sh_chan->xmit_shift); 549 550 new->mark = DESC_PREPARED; 551 new->async_tx.flags = flags; 552 new->direction = direction; 553 554 *len -= copy_size; 555 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) 556 *src += copy_size; 557 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) 558 *dest += copy_size; 559 560 return new; 561} 562 563/* 564 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list 565 * 566 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also 567 * converted to scatter-gather to guarantee consistent locking and a correct 568 * list manipulation. For slave DMA direction carries the usual meaning, and, 569 * logically, the SG list is RAM and the addr variable contains slave address, 570 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM 571 * and the SG list contains only one element and points at the source buffer. 572 */ 573static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, 574 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, 575 enum dma_transfer_direction direction, unsigned long flags) 576{ 577 struct scatterlist *sg; 578 struct sh_desc *first = NULL, *new = NULL /* compiler... */; 579 LIST_HEAD(tx_list); 580 int chunks = 0; 581 unsigned long irq_flags; 582 int i; 583 584 if (!sg_len) 585 return NULL; 586 587 for_each_sg(sgl, sg, sg_len, i) 588 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / 589 (SH_DMA_TCR_MAX + 1); 590 591 /* Have to lock the whole loop to protect against concurrent release */ 592 spin_lock_irqsave(&sh_chan->desc_lock, irq_flags); 593 594 /* 595 * Chaining: 596 * first descriptor is what user is dealing with in all API calls, its 597 * cookie is at first set to -EBUSY, at tx-submit to a positive 598 * number 599 * if more than one chunk is needed further chunks have cookie = -EINVAL 600 * the last chunk, if not equal to the first, has cookie = -ENOSPC 601 * all chunks are linked onto the tx_list head with their .node heads 602 * only during this function, then they are immediately spliced 603 * back onto the free list in form of a chain 604 */ 605 for_each_sg(sgl, sg, sg_len, i) { 606 dma_addr_t sg_addr = sg_dma_address(sg); 607 size_t len = sg_dma_len(sg); 608 609 if (!len) 610 goto err_get_desc; 611 612 do { 613 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", 614 i, sg, len, (unsigned long long)sg_addr); 615 616 if (direction == DMA_DEV_TO_MEM) 617 new = sh_dmae_add_desc(sh_chan, flags, 618 &sg_addr, addr, &len, &first, 619 direction); 620 else 621 new = sh_dmae_add_desc(sh_chan, flags, 622 addr, &sg_addr, &len, &first, 623 direction); 624 if (!new) 625 goto err_get_desc; 626 627 new->chunks = chunks--; 628 list_add_tail(&new->node, &tx_list); 629 } while (len); 630 } 631 632 if (new != first) 633 new->async_tx.cookie = -ENOSPC; 634 635 /* Put them back on the free list, so, they don't get lost */ 636 list_splice_tail(&tx_list, &sh_chan->ld_free); 637 638 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); 639 640 return &first->async_tx; 641 642err_get_desc: 643 list_for_each_entry(new, &tx_list, node) 644 new->mark = DESC_IDLE; 645 list_splice(&tx_list, &sh_chan->ld_free); 646 647 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); 648 649 return NULL; 650} 651 652static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( 653 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 654 size_t len, unsigned long flags) 655{ 656 struct sh_dmae_chan *sh_chan; 657 struct scatterlist sg; 658 659 if (!chan || !len) 660 return NULL; 661 662 sh_chan = to_sh_chan(chan); 663 664 sg_init_table(&sg, 1); 665 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, 666 offset_in_page(dma_src)); 667 sg_dma_address(&sg) = dma_src; 668 sg_dma_len(&sg) = len; 669 670 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, 671 flags); 672} 673 674static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( 675 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 676 enum dma_transfer_direction direction, unsigned long flags) 677{ 678 struct sh_dmae_slave *param; 679 struct sh_dmae_chan *sh_chan; 680 dma_addr_t slave_addr; 681 682 if (!chan) 683 return NULL; 684 685 sh_chan = to_sh_chan(chan); 686 param = chan->private; 687 688 /* Someone calling slave DMA on a public channel? */ 689 if (!param || !sg_len) { 690 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", 691 __func__, param, sg_len, param ? param->slave_id : -1); 692 return NULL; 693 } 694 695 slave_addr = param->config->addr; 696 697 /* 698 * if (param != NULL), this is a successfully requested slave channel, 699 * therefore param->config != NULL too. 700 */ 701 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr, 702 direction, flags); 703} 704 705static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 706 unsigned long arg) 707{ 708 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 709 unsigned long flags; 710 711 /* Only supports DMA_TERMINATE_ALL */ 712 if (cmd != DMA_TERMINATE_ALL) 713 return -ENXIO; 714 715 if (!chan) 716 return -EINVAL; 717 718 spin_lock_irqsave(&sh_chan->desc_lock, flags); 719 dmae_halt(sh_chan); 720 721 if (!list_empty(&sh_chan->ld_queue)) { 722 /* Record partial transfer */ 723 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, 724 struct sh_desc, node); 725 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << 726 sh_chan->xmit_shift; 727 } 728 spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 729 730 sh_dmae_chan_ld_cleanup(sh_chan, true); 731 732 return 0; 733} 734 735static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 736{ 737 struct sh_desc *desc, *_desc; 738 /* Is the "exposed" head of a chain acked? */ 739 bool head_acked = false; 740 dma_cookie_t cookie = 0; 741 dma_async_tx_callback callback = NULL; 742 void *param = NULL; 743 unsigned long flags; 744 745 spin_lock_irqsave(&sh_chan->desc_lock, flags); 746 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { 747 struct dma_async_tx_descriptor *tx = &desc->async_tx; 748 749 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); 750 BUG_ON(desc->mark != DESC_SUBMITTED && 751 desc->mark != DESC_COMPLETED && 752 desc->mark != DESC_WAITING); 753 754 /* 755 * queue is ordered, and we use this loop to (1) clean up all 756 * completed descriptors, and to (2) update descriptor flags of 757 * any chunks in a (partially) completed chain 758 */ 759 if (!all && desc->mark == DESC_SUBMITTED && 760 desc->cookie != cookie) 761 break; 762 763 if (tx->cookie > 0) 764 cookie = tx->cookie; 765 766 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 767 if (sh_chan->completed_cookie != desc->cookie - 1) 768 dev_dbg(sh_chan->dev, 769 "Completing cookie %d, expected %d\n", 770 desc->cookie, 771 sh_chan->completed_cookie + 1); 772 sh_chan->completed_cookie = desc->cookie; 773 } 774 775 /* Call callback on the last chunk */ 776 if (desc->mark == DESC_COMPLETED && tx->callback) { 777 desc->mark = DESC_WAITING; 778 callback = tx->callback; 779 param = tx->callback_param; 780 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", 781 tx->cookie, tx, sh_chan->id); 782 BUG_ON(desc->chunks != 1); 783 break; 784 } 785 786 if (tx->cookie > 0 || tx->cookie == -EBUSY) { 787 if (desc->mark == DESC_COMPLETED) { 788 BUG_ON(tx->cookie < 0); 789 desc->mark = DESC_WAITING; 790 } 791 head_acked = async_tx_test_ack(tx); 792 } else { 793 switch (desc->mark) { 794 case DESC_COMPLETED: 795 desc->mark = DESC_WAITING; 796 /* Fall through */ 797 case DESC_WAITING: 798 if (head_acked) 799 async_tx_ack(&desc->async_tx); 800 } 801 } 802 803 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", 804 tx, tx->cookie); 805 806 if (((desc->mark == DESC_COMPLETED || 807 desc->mark == DESC_WAITING) && 808 async_tx_test_ack(&desc->async_tx)) || all) { 809 /* Remove from ld_queue list */ 810 desc->mark = DESC_IDLE; 811 812 list_move(&desc->node, &sh_chan->ld_free); 813 814 if (list_empty(&sh_chan->ld_queue)) { 815 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); 816 pm_runtime_put(sh_chan->dev); 817 } 818 } 819 } 820 821 if (all && !callback) 822 /* 823 * Terminating and the loop completed normally: forgive 824 * uncompleted cookies 825 */ 826 sh_chan->completed_cookie = sh_chan->common.cookie; 827 828 spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 829 830 if (callback) 831 callback(param); 832 833 return callback; 834} 835 836/* 837 * sh_chan_ld_cleanup - Clean up link descriptors 838 * 839 * This function cleans up the ld_queue of DMA channel. 840 */ 841static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 842{ 843 while (__ld_cleanup(sh_chan, all)) 844 ; 845} 846 847/* Called under spin_lock_irq(&sh_chan->desc_lock) */ 848static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 849{ 850 struct sh_desc *desc; 851 852 /* DMA work check */ 853 if (dmae_is_busy(sh_chan)) 854 return; 855 856 /* Find the first not transferred descriptor */ 857 list_for_each_entry(desc, &sh_chan->ld_queue, node) 858 if (desc->mark == DESC_SUBMITTED) { 859 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", 860 desc->async_tx.cookie, sh_chan->id, 861 desc->hw.tcr, desc->hw.sar, desc->hw.dar); 862 /* Get the ld start address from ld_queue */ 863 dmae_set_reg(sh_chan, &desc->hw); 864 dmae_start(sh_chan); 865 break; 866 } 867} 868 869static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) 870{ 871 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 872 873 spin_lock_irq(&sh_chan->desc_lock); 874 if (sh_chan->pm_state == DMAE_PM_ESTABLISHED) 875 sh_chan_xfer_ld_queue(sh_chan); 876 else 877 sh_chan->pm_state = DMAE_PM_PENDING; 878 spin_unlock_irq(&sh_chan->desc_lock); 879} 880 881static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, 882 dma_cookie_t cookie, 883 struct dma_tx_state *txstate) 884{ 885 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 886 dma_cookie_t last_used; 887 dma_cookie_t last_complete; 888 enum dma_status status; 889 unsigned long flags; 890 891 sh_dmae_chan_ld_cleanup(sh_chan, false); 892 893 /* First read completed cookie to avoid a skew */ 894 last_complete = sh_chan->completed_cookie; 895 rmb(); 896 last_used = chan->cookie; 897 BUG_ON(last_complete < 0); 898 dma_set_tx_state(txstate, last_complete, last_used, 0); 899 900 spin_lock_irqsave(&sh_chan->desc_lock, flags); 901 902 status = dma_async_is_complete(cookie, last_complete, last_used); 903 904 /* 905 * If we don't find cookie on the queue, it has been aborted and we have 906 * to report error 907 */ 908 if (status != DMA_SUCCESS) { 909 struct sh_desc *desc; 910 status = DMA_ERROR; 911 list_for_each_entry(desc, &sh_chan->ld_queue, node) 912 if (desc->cookie == cookie) { 913 status = DMA_IN_PROGRESS; 914 break; 915 } 916 } 917 918 spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 919 920 return status; 921} 922 923static irqreturn_t sh_dmae_interrupt(int irq, void *data) 924{ 925 irqreturn_t ret = IRQ_NONE; 926 struct sh_dmae_chan *sh_chan = data; 927 u32 chcr; 928 929 spin_lock(&sh_chan->desc_lock); 930 931 chcr = chcr_read(sh_chan); 932 933 if (chcr & CHCR_TE) { 934 /* DMA stop */ 935 dmae_halt(sh_chan); 936 937 ret = IRQ_HANDLED; 938 tasklet_schedule(&sh_chan->tasklet); 939 } 940 941 spin_unlock(&sh_chan->desc_lock); 942 943 return ret; 944} 945 946/* Called from error IRQ or NMI */ 947static bool sh_dmae_reset(struct sh_dmae_device *shdev) 948{ 949 unsigned int handled = 0; 950 int i; 951 952 /* halt the dma controller */ 953 sh_dmae_ctl_stop(shdev); 954 955 /* We cannot detect, which channel caused the error, have to reset all */ 956 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { 957 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 958 struct sh_desc *desc; 959 LIST_HEAD(dl); 960 961 if (!sh_chan) 962 continue; 963 964 spin_lock(&sh_chan->desc_lock); 965 966 /* Stop the channel */ 967 dmae_halt(sh_chan); 968 969 list_splice_init(&sh_chan->ld_queue, &dl); 970 971 if (!list_empty(&dl)) { 972 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); 973 pm_runtime_put(sh_chan->dev); 974 } 975 sh_chan->pm_state = DMAE_PM_ESTABLISHED; 976 977 spin_unlock(&sh_chan->desc_lock); 978 979 /* Complete all */ 980 list_for_each_entry(desc, &dl, node) { 981 struct dma_async_tx_descriptor *tx = &desc->async_tx; 982 desc->mark = DESC_IDLE; 983 if (tx->callback) 984 tx->callback(tx->callback_param); 985 } 986 987 spin_lock(&sh_chan->desc_lock); 988 list_splice(&dl, &sh_chan->ld_free); 989 spin_unlock(&sh_chan->desc_lock); 990 991 handled++; 992 } 993 994 sh_dmae_rst(shdev); 995 996 return !!handled; 997} 998 999static irqreturn_t sh_dmae_err(int irq, void *data) 1000{ 1001 struct sh_dmae_device *shdev = data; 1002 1003 if (!(dmaor_read(shdev) & DMAOR_AE)) 1004 return IRQ_NONE; 1005 1006 sh_dmae_reset(data); 1007 return IRQ_HANDLED; 1008} 1009 1010static void dmae_do_tasklet(unsigned long data) 1011{ 1012 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 1013 struct sh_desc *desc; 1014 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 1015 u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 1016 1017 spin_lock_irq(&sh_chan->desc_lock); 1018 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 1019 if (desc->mark == DESC_SUBMITTED && 1020 ((desc->direction == DMA_DEV_TO_MEM && 1021 (desc->hw.dar + desc->hw.tcr) == dar_buf) || 1022 (desc->hw.sar + desc->hw.tcr) == sar_buf)) { 1023 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", 1024 desc->async_tx.cookie, &desc->async_tx, 1025 desc->hw.dar); 1026 desc->mark = DESC_COMPLETED; 1027 break; 1028 } 1029 } 1030 /* Next desc */ 1031 sh_chan_xfer_ld_queue(sh_chan); 1032 spin_unlock_irq(&sh_chan->desc_lock); 1033 1034 sh_dmae_chan_ld_cleanup(sh_chan, false); 1035} 1036 1037static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) 1038{ 1039 /* Fast path out if NMIF is not asserted for this controller */ 1040 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) 1041 return false; 1042 1043 return sh_dmae_reset(shdev); 1044} 1045 1046static int sh_dmae_nmi_handler(struct notifier_block *self, 1047 unsigned long cmd, void *data) 1048{ 1049 struct sh_dmae_device *shdev; 1050 int ret = NOTIFY_DONE; 1051 bool triggered; 1052 1053 /* 1054 * Only concern ourselves with NMI events. 1055 * 1056 * Normally we would check the die chain value, but as this needs 1057 * to be architecture independent, check for NMI context instead. 1058 */ 1059 if (!in_nmi()) 1060 return NOTIFY_DONE; 1061 1062 rcu_read_lock(); 1063 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { 1064 /* 1065 * Only stop if one of the controllers has NMIF asserted, 1066 * we do not want to interfere with regular address error 1067 * handling or NMI events that don't concern the DMACs. 1068 */ 1069 triggered = sh_dmae_nmi_notify(shdev); 1070 if (triggered == true) 1071 ret = NOTIFY_OK; 1072 } 1073 rcu_read_unlock(); 1074 1075 return ret; 1076} 1077 1078static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { 1079 .notifier_call = sh_dmae_nmi_handler, 1080 1081 /* Run before NMI debug handler and KGDB */ 1082 .priority = 1, 1083}; 1084 1085static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, 1086 int irq, unsigned long flags) 1087{ 1088 int err; 1089 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 1090 struct platform_device *pdev = to_platform_device(shdev->common.dev); 1091 struct sh_dmae_chan *new_sh_chan; 1092 1093 /* alloc channel */ 1094 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); 1095 if (!new_sh_chan) { 1096 dev_err(shdev->common.dev, 1097 "No free memory for allocating dma channels!\n"); 1098 return -ENOMEM; 1099 } 1100 1101 new_sh_chan->pm_state = DMAE_PM_ESTABLISHED; 1102 1103 /* reference struct dma_device */ 1104 new_sh_chan->common.device = &shdev->common; 1105 1106 new_sh_chan->dev = shdev->common.dev; 1107 new_sh_chan->id = id; 1108 new_sh_chan->irq = irq; 1109 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); 1110 1111 /* Init DMA tasklet */ 1112 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, 1113 (unsigned long)new_sh_chan); 1114 1115 spin_lock_init(&new_sh_chan->desc_lock); 1116 1117 /* Init descripter manage list */ 1118 INIT_LIST_HEAD(&new_sh_chan->ld_queue); 1119 INIT_LIST_HEAD(&new_sh_chan->ld_free); 1120 1121 /* Add the channel to DMA device channel list */ 1122 list_add_tail(&new_sh_chan->common.device_node, 1123 &shdev->common.channels); 1124 shdev->common.chancnt++; 1125 1126 if (pdev->id >= 0) 1127 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 1128 "sh-dmae%d.%d", pdev->id, new_sh_chan->id); 1129 else 1130 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 1131 "sh-dma%d", new_sh_chan->id); 1132 1133 /* set up channel irq */ 1134 err = request_irq(irq, &sh_dmae_interrupt, flags, 1135 new_sh_chan->dev_id, new_sh_chan); 1136 if (err) { 1137 dev_err(shdev->common.dev, "DMA channel %d request_irq error " 1138 "with return %d\n", id, err); 1139 goto err_no_irq; 1140 } 1141 1142 shdev->chan[id] = new_sh_chan; 1143 return 0; 1144 1145err_no_irq: 1146 /* remove from dmaengine device node */ 1147 list_del(&new_sh_chan->common.device_node); 1148 kfree(new_sh_chan); 1149 return err; 1150} 1151 1152static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) 1153{ 1154 int i; 1155 1156 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { 1157 if (shdev->chan[i]) { 1158 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1159 1160 free_irq(sh_chan->irq, sh_chan); 1161 1162 list_del(&sh_chan->common.device_node); 1163 kfree(sh_chan); 1164 shdev->chan[i] = NULL; 1165 } 1166 } 1167 shdev->common.chancnt = 0; 1168} 1169 1170static int __init sh_dmae_probe(struct platform_device *pdev) 1171{ 1172 struct sh_dmae_pdata *pdata = pdev->dev.platform_data; 1173 unsigned long irqflags = IRQF_DISABLED, 1174 chan_flag[SH_DMAC_MAX_CHANNELS] = {}; 1175 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; 1176 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; 1177 struct sh_dmae_device *shdev; 1178 struct resource *chan, *dmars, *errirq_res, *chanirq_res; 1179 1180 /* get platform data */ 1181 if (!pdata || !pdata->channel_num) 1182 return -ENODEV; 1183 1184 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1185 /* DMARS area is optional */ 1186 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1187 /* 1188 * IRQ resources: 1189 * 1. there always must be at least one IRQ IO-resource. On SH4 it is 1190 * the error IRQ, in which case it is the only IRQ in this resource: 1191 * start == end. If it is the only IRQ resource, all channels also 1192 * use the same IRQ. 1193 * 2. DMA channel IRQ resources can be specified one per resource or in 1194 * ranges (start != end) 1195 * 3. iff all events (channels and, optionally, error) on this 1196 * controller use the same IRQ, only one IRQ resource can be 1197 * specified, otherwise there must be one IRQ per channel, even if 1198 * some of them are equal 1199 * 4. if all IRQs on this controller are equal or if some specific IRQs 1200 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be 1201 * requested with the IRQF_SHARED flag 1202 */ 1203 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1204 if (!chan || !errirq_res) 1205 return -ENODEV; 1206 1207 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { 1208 dev_err(&pdev->dev, "DMAC register region already claimed\n"); 1209 return -EBUSY; 1210 } 1211 1212 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { 1213 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); 1214 err = -EBUSY; 1215 goto ermrdmars; 1216 } 1217 1218 err = -ENOMEM; 1219 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); 1220 if (!shdev) { 1221 dev_err(&pdev->dev, "Not enough memory\n"); 1222 goto ealloc; 1223 } 1224 1225 shdev->chan_reg = ioremap(chan->start, resource_size(chan)); 1226 if (!shdev->chan_reg) 1227 goto emapchan; 1228 if (dmars) { 1229 shdev->dmars = ioremap(dmars->start, resource_size(dmars)); 1230 if (!shdev->dmars) 1231 goto emapdmars; 1232 } 1233 1234 /* platform data */ 1235 shdev->pdata = pdata; 1236 1237 if (pdata->chcr_offset) 1238 shdev->chcr_offset = pdata->chcr_offset; 1239 else 1240 shdev->chcr_offset = CHCR; 1241 1242 if (pdata->chcr_ie_bit) 1243 shdev->chcr_ie_bit = pdata->chcr_ie_bit; 1244 else 1245 shdev->chcr_ie_bit = CHCR_IE; 1246 1247 platform_set_drvdata(pdev, shdev); 1248 1249 shdev->common.dev = &pdev->dev; 1250 1251 pm_runtime_enable(&pdev->dev); 1252 pm_runtime_get_sync(&pdev->dev); 1253 1254 spin_lock_irq(&sh_dmae_lock); 1255 list_add_tail_rcu(&shdev->node, &sh_dmae_devices); 1256 spin_unlock_irq(&sh_dmae_lock); 1257 1258 /* reset dma controller - only needed as a test */ 1259 err = sh_dmae_rst(shdev); 1260 if (err) 1261 goto rst_err; 1262 1263 INIT_LIST_HEAD(&shdev->common.channels); 1264 1265 if (!pdata->slave_only) 1266 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 1267 if (pdata->slave && pdata->slave_num) 1268 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 1269 1270 shdev->common.device_alloc_chan_resources 1271 = sh_dmae_alloc_chan_resources; 1272 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; 1273 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; 1274 shdev->common.device_tx_status = sh_dmae_tx_status; 1275 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; 1276 1277 /* Compulsory for DMA_SLAVE fields */ 1278 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; 1279 shdev->common.device_control = sh_dmae_control; 1280 1281 /* Default transfer size of 32 bytes requires 32-byte alignment */ 1282 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; 1283 1284#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 1285 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 1286 1287 if (!chanirq_res) 1288 chanirq_res = errirq_res; 1289 else 1290 irqres++; 1291 1292 if (chanirq_res == errirq_res || 1293 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) 1294 irqflags = IRQF_SHARED; 1295 1296 errirq = errirq_res->start; 1297 1298 err = request_irq(errirq, sh_dmae_err, irqflags, 1299 "DMAC Address Error", shdev); 1300 if (err) { 1301 dev_err(&pdev->dev, 1302 "DMA failed requesting irq #%d, error %d\n", 1303 errirq, err); 1304 goto eirq_err; 1305 } 1306 1307#else 1308 chanirq_res = errirq_res; 1309#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ 1310 1311 if (chanirq_res->start == chanirq_res->end && 1312 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 1313 /* Special case - all multiplexed */ 1314 for (; irq_cnt < pdata->channel_num; irq_cnt++) { 1315 if (irq_cnt < SH_DMAC_MAX_CHANNELS) { 1316 chan_irq[irq_cnt] = chanirq_res->start; 1317 chan_flag[irq_cnt] = IRQF_SHARED; 1318 } else { 1319 irq_cap = 1; 1320 break; 1321 } 1322 } 1323 } else { 1324 do { 1325 for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 1326 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) { 1327 irq_cap = 1; 1328 break; 1329 } 1330 1331 if ((errirq_res->flags & IORESOURCE_BITS) == 1332 IORESOURCE_IRQ_SHAREABLE) 1333 chan_flag[irq_cnt] = IRQF_SHARED; 1334 else 1335 chan_flag[irq_cnt] = IRQF_DISABLED; 1336 dev_dbg(&pdev->dev, 1337 "Found IRQ %d for channel %d\n", 1338 i, irq_cnt); 1339 chan_irq[irq_cnt++] = i; 1340 } 1341 1342 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) 1343 break; 1344 1345 chanirq_res = platform_get_resource(pdev, 1346 IORESOURCE_IRQ, ++irqres); 1347 } while (irq_cnt < pdata->channel_num && chanirq_res); 1348 } 1349 1350 /* Create DMA Channel */ 1351 for (i = 0; i < irq_cnt; i++) { 1352 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 1353 if (err) 1354 goto chan_probe_err; 1355 } 1356 1357 if (irq_cap) 1358 dev_notice(&pdev->dev, "Attempting to register %d DMA " 1359 "channels when a maximum of %d are supported.\n", 1360 pdata->channel_num, SH_DMAC_MAX_CHANNELS); 1361 1362 pm_runtime_put(&pdev->dev); 1363 1364 dma_async_device_register(&shdev->common); 1365 1366 return err; 1367 1368chan_probe_err: 1369 sh_dmae_chan_remove(shdev); 1370 1371#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 1372 free_irq(errirq, shdev); 1373eirq_err: 1374#endif 1375rst_err: 1376 spin_lock_irq(&sh_dmae_lock); 1377 list_del_rcu(&shdev->node); 1378 spin_unlock_irq(&sh_dmae_lock); 1379 1380 pm_runtime_put(&pdev->dev); 1381 pm_runtime_disable(&pdev->dev); 1382 1383 if (dmars) 1384 iounmap(shdev->dmars); 1385 1386 platform_set_drvdata(pdev, NULL); 1387emapdmars: 1388 iounmap(shdev->chan_reg); 1389 synchronize_rcu(); 1390emapchan: 1391 kfree(shdev); 1392ealloc: 1393 if (dmars) 1394 release_mem_region(dmars->start, resource_size(dmars)); 1395ermrdmars: 1396 release_mem_region(chan->start, resource_size(chan)); 1397 1398 return err; 1399} 1400 1401static int __exit sh_dmae_remove(struct platform_device *pdev) 1402{ 1403 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1404 struct resource *res; 1405 int errirq = platform_get_irq(pdev, 0); 1406 1407 dma_async_device_unregister(&shdev->common); 1408 1409 if (errirq > 0) 1410 free_irq(errirq, shdev); 1411 1412 spin_lock_irq(&sh_dmae_lock); 1413 list_del_rcu(&shdev->node); 1414 spin_unlock_irq(&sh_dmae_lock); 1415 1416 /* channel data remove */ 1417 sh_dmae_chan_remove(shdev); 1418 1419 pm_runtime_disable(&pdev->dev); 1420 1421 if (shdev->dmars) 1422 iounmap(shdev->dmars); 1423 iounmap(shdev->chan_reg); 1424 1425 platform_set_drvdata(pdev, NULL); 1426 1427 synchronize_rcu(); 1428 kfree(shdev); 1429 1430 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1431 if (res) 1432 release_mem_region(res->start, resource_size(res)); 1433 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1434 if (res) 1435 release_mem_region(res->start, resource_size(res)); 1436 1437 return 0; 1438} 1439 1440static void sh_dmae_shutdown(struct platform_device *pdev) 1441{ 1442 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1443 sh_dmae_ctl_stop(shdev); 1444} 1445 1446static int sh_dmae_runtime_suspend(struct device *dev) 1447{ 1448 return 0; 1449} 1450 1451static int sh_dmae_runtime_resume(struct device *dev) 1452{ 1453 struct sh_dmae_device *shdev = dev_get_drvdata(dev); 1454 1455 return sh_dmae_rst(shdev); 1456} 1457 1458#ifdef CONFIG_PM 1459static int sh_dmae_suspend(struct device *dev) 1460{ 1461 return 0; 1462} 1463 1464static int sh_dmae_resume(struct device *dev) 1465{ 1466 struct sh_dmae_device *shdev = dev_get_drvdata(dev); 1467 int i, ret; 1468 1469 ret = sh_dmae_rst(shdev); 1470 if (ret < 0) 1471 dev_err(dev, "Failed to reset!\n"); 1472 1473 for (i = 0; i < shdev->pdata->channel_num; i++) { 1474 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1475 struct sh_dmae_slave *param = sh_chan->common.private; 1476 1477 if (!sh_chan->descs_allocated) 1478 continue; 1479 1480 if (param) { 1481 const struct sh_dmae_slave_config *cfg = param->config; 1482 dmae_set_dmars(sh_chan, cfg->mid_rid); 1483 dmae_set_chcr(sh_chan, cfg->chcr); 1484 } else { 1485 dmae_init(sh_chan); 1486 } 1487 } 1488 1489 return 0; 1490} 1491#else 1492#define sh_dmae_suspend NULL 1493#define sh_dmae_resume NULL 1494#endif 1495 1496const struct dev_pm_ops sh_dmae_pm = { 1497 .suspend = sh_dmae_suspend, 1498 .resume = sh_dmae_resume, 1499 .runtime_suspend = sh_dmae_runtime_suspend, 1500 .runtime_resume = sh_dmae_runtime_resume, 1501}; 1502 1503static struct platform_driver sh_dmae_driver = { 1504 .remove = __exit_p(sh_dmae_remove), 1505 .shutdown = sh_dmae_shutdown, 1506 .driver = { 1507 .owner = THIS_MODULE, 1508 .name = "sh-dma-engine", 1509 .pm = &sh_dmae_pm, 1510 }, 1511}; 1512 1513static int __init sh_dmae_init(void) 1514{ 1515 /* Wire up NMI handling */ 1516 int err = register_die_notifier(&sh_dmae_nmi_notifier); 1517 if (err) 1518 return err; 1519 1520 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); 1521} 1522module_init(sh_dmae_init); 1523 1524static void __exit sh_dmae_exit(void) 1525{ 1526 platform_driver_unregister(&sh_dmae_driver); 1527 1528 unregister_die_notifier(&sh_dmae_nmi_notifier); 1529} 1530module_exit(sh_dmae_exit); 1531 1532MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); 1533MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); 1534MODULE_LICENSE("GPL"); 1535MODULE_ALIAS("platform:sh-dma-engine"); 1536