mpc512x_dma.c revision 2dc11581376829303b98eadb2de253bee065a56a
1/* 2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. 3 * Copyright (C) Semihalf 2009 4 * 5 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description 6 * (defines, structures and comments) was taken from MPC5121 DMA driver 7 * written by Hongjun Chen <hong-jun.chen@freescale.com>. 8 * 9 * Approved as OSADL project by a majority of OSADL members and funded 10 * by OSADL membership fees in 2009; for details see www.osadl.org. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the Free 14 * Software Foundation; either version 2 of the License, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 20 * more details. 21 * 22 * You should have received a copy of the GNU General Public License along with 23 * this program; if not, write to the Free Software Foundation, Inc., 59 24 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 25 * 26 * The full GNU General Public License is included in this distribution in the 27 * file called COPYING. 28 */ 29 30/* 31 * This is initial version of MPC5121 DMA driver. Only memory to memory 32 * transfers are supported (tested using dmatest module). 33 */ 34 35#include <linux/module.h> 36#include <linux/dmaengine.h> 37#include <linux/dma-mapping.h> 38#include <linux/interrupt.h> 39#include <linux/io.h> 40#include <linux/slab.h> 41#include <linux/of_device.h> 42#include <linux/of_platform.h> 43 44#include <linux/random.h> 45 46/* Number of DMA Transfer descriptors allocated per channel */ 47#define MPC_DMA_DESCRIPTORS 64 48 49/* Macro definitions */ 50#define MPC_DMA_CHANNELS 64 51#define MPC_DMA_TCD_OFFSET 0x1000 52 53/* Arbitration mode of group and channel */ 54#define MPC_DMA_DMACR_EDCG (1 << 31) 55#define MPC_DMA_DMACR_ERGA (1 << 3) 56#define MPC_DMA_DMACR_ERCA (1 << 2) 57 58/* Error codes */ 59#define MPC_DMA_DMAES_VLD (1 << 31) 60#define MPC_DMA_DMAES_GPE (1 << 15) 61#define MPC_DMA_DMAES_CPE (1 << 14) 62#define MPC_DMA_DMAES_ERRCHN(err) \ 63 (((err) >> 8) & 0x3f) 64#define MPC_DMA_DMAES_SAE (1 << 7) 65#define MPC_DMA_DMAES_SOE (1 << 6) 66#define MPC_DMA_DMAES_DAE (1 << 5) 67#define MPC_DMA_DMAES_DOE (1 << 4) 68#define MPC_DMA_DMAES_NCE (1 << 3) 69#define MPC_DMA_DMAES_SGE (1 << 2) 70#define MPC_DMA_DMAES_SBE (1 << 1) 71#define MPC_DMA_DMAES_DBE (1 << 0) 72 73#define MPC_DMA_TSIZE_1 0x00 74#define MPC_DMA_TSIZE_2 0x01 75#define MPC_DMA_TSIZE_4 0x02 76#define MPC_DMA_TSIZE_16 0x04 77#define MPC_DMA_TSIZE_32 0x05 78 79/* MPC5121 DMA engine registers */ 80struct __attribute__ ((__packed__)) mpc_dma_regs { 81 /* 0x00 */ 82 u32 dmacr; /* DMA control register */ 83 u32 dmaes; /* DMA error status */ 84 /* 0x08 */ 85 u32 dmaerqh; /* DMA enable request high(channels 63~32) */ 86 u32 dmaerql; /* DMA enable request low(channels 31~0) */ 87 u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */ 88 u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */ 89 /* 0x18 */ 90 u8 dmaserq; /* DMA set enable request */ 91 u8 dmacerq; /* DMA clear enable request */ 92 u8 dmaseei; /* DMA set enable error interrupt */ 93 u8 dmaceei; /* DMA clear enable error interrupt */ 94 /* 0x1c */ 95 u8 dmacint; /* DMA clear interrupt request */ 96 u8 dmacerr; /* DMA clear error */ 97 u8 dmassrt; /* DMA set start bit */ 98 u8 dmacdne; /* DMA clear DONE status bit */ 99 /* 0x20 */ 100 u32 dmainth; /* DMA interrupt request high(ch63~32) */ 101 u32 dmaintl; /* DMA interrupt request low(ch31~0) */ 102 u32 dmaerrh; /* DMA error high(ch63~32) */ 103 u32 dmaerrl; /* DMA error low(ch31~0) */ 104 /* 0x30 */ 105 u32 dmahrsh; /* DMA hw request status high(ch63~32) */ 106 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ 107 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ 108 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ 109 /* 0x40 ~ 0xff */ 110 u32 reserve0[48]; /* Reserved */ 111 /* 0x100 */ 112 u8 dchpri[MPC_DMA_CHANNELS]; 113 /* DMA channels(0~63) priority */ 114}; 115 116struct __attribute__ ((__packed__)) mpc_dma_tcd { 117 /* 0x00 */ 118 u32 saddr; /* Source address */ 119 120 u32 smod:5; /* Source address modulo */ 121 u32 ssize:3; /* Source data transfer size */ 122 u32 dmod:5; /* Destination address modulo */ 123 u32 dsize:3; /* Destination data transfer size */ 124 u32 soff:16; /* Signed source address offset */ 125 126 /* 0x08 */ 127 u32 nbytes; /* Inner "minor" byte count */ 128 u32 slast; /* Last source address adjustment */ 129 u32 daddr; /* Destination address */ 130 131 /* 0x14 */ 132 u32 citer_elink:1; /* Enable channel-to-channel linking on 133 * minor loop complete 134 */ 135 u32 citer_linkch:6; /* Link channel for minor loop complete */ 136 u32 citer:9; /* Current "major" iteration count */ 137 u32 doff:16; /* Signed destination address offset */ 138 139 /* 0x18 */ 140 u32 dlast_sga; /* Last Destination address adjustment/scatter 141 * gather address 142 */ 143 144 /* 0x1c */ 145 u32 biter_elink:1; /* Enable channel-to-channel linking on major 146 * loop complete 147 */ 148 u32 biter_linkch:6; 149 u32 biter:9; /* Beginning "major" iteration count */ 150 u32 bwc:2; /* Bandwidth control */ 151 u32 major_linkch:6; /* Link channel number */ 152 u32 done:1; /* Channel done */ 153 u32 active:1; /* Channel active */ 154 u32 major_elink:1; /* Enable channel-to-channel linking on major 155 * loop complete 156 */ 157 u32 e_sg:1; /* Enable scatter/gather processing */ 158 u32 d_req:1; /* Disable request */ 159 u32 int_half:1; /* Enable an interrupt when major counter is 160 * half complete 161 */ 162 u32 int_maj:1; /* Enable an interrupt when major iteration 163 * count completes 164 */ 165 u32 start:1; /* Channel start */ 166}; 167 168struct mpc_dma_desc { 169 struct dma_async_tx_descriptor desc; 170 struct mpc_dma_tcd *tcd; 171 dma_addr_t tcd_paddr; 172 int error; 173 struct list_head node; 174}; 175 176struct mpc_dma_chan { 177 struct dma_chan chan; 178 struct list_head free; 179 struct list_head prepared; 180 struct list_head queued; 181 struct list_head active; 182 struct list_head completed; 183 struct mpc_dma_tcd *tcd; 184 dma_addr_t tcd_paddr; 185 dma_cookie_t completed_cookie; 186 187 /* Lock for this structure */ 188 spinlock_t lock; 189}; 190 191struct mpc_dma { 192 struct dma_device dma; 193 struct tasklet_struct tasklet; 194 struct mpc_dma_chan channels[MPC_DMA_CHANNELS]; 195 struct mpc_dma_regs __iomem *regs; 196 struct mpc_dma_tcd __iomem *tcd; 197 int irq; 198 uint error_status; 199 200 /* Lock for error_status field in this structure */ 201 spinlock_t error_status_lock; 202}; 203 204#define DRV_NAME "mpc512x_dma" 205 206/* Convert struct dma_chan to struct mpc_dma_chan */ 207static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) 208{ 209 return container_of(c, struct mpc_dma_chan, chan); 210} 211 212/* Convert struct dma_chan to struct mpc_dma */ 213static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) 214{ 215 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); 216 return container_of(mchan, struct mpc_dma, channels[c->chan_id]); 217} 218 219/* 220 * Execute all queued DMA descriptors. 221 * 222 * Following requirements must be met while calling mpc_dma_execute(): 223 * a) mchan->lock is acquired, 224 * b) mchan->active list is empty, 225 * c) mchan->queued list contains at least one entry. 226 */ 227static void mpc_dma_execute(struct mpc_dma_chan *mchan) 228{ 229 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); 230 struct mpc_dma_desc *first = NULL; 231 struct mpc_dma_desc *prev = NULL; 232 struct mpc_dma_desc *mdesc; 233 int cid = mchan->chan.chan_id; 234 235 /* Move all queued descriptors to active list */ 236 list_splice_tail_init(&mchan->queued, &mchan->active); 237 238 /* Chain descriptors into one transaction */ 239 list_for_each_entry(mdesc, &mchan->active, node) { 240 if (!first) 241 first = mdesc; 242 243 if (!prev) { 244 prev = mdesc; 245 continue; 246 } 247 248 prev->tcd->dlast_sga = mdesc->tcd_paddr; 249 prev->tcd->e_sg = 1; 250 mdesc->tcd->start = 1; 251 252 prev = mdesc; 253 } 254 255 prev->tcd->start = 0; 256 prev->tcd->int_maj = 1; 257 258 /* Send first descriptor in chain into hardware */ 259 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); 260 out_8(&mdma->regs->dmassrt, cid); 261} 262 263/* Handle interrupt on one half of DMA controller (32 channels) */ 264static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) 265{ 266 struct mpc_dma_chan *mchan; 267 struct mpc_dma_desc *mdesc; 268 u32 status = is | es; 269 int ch; 270 271 while ((ch = fls(status) - 1) >= 0) { 272 status &= ~(1 << ch); 273 mchan = &mdma->channels[ch + off]; 274 275 spin_lock(&mchan->lock); 276 277 /* Check error status */ 278 if (es & (1 << ch)) 279 list_for_each_entry(mdesc, &mchan->active, node) 280 mdesc->error = -EIO; 281 282 /* Execute queued descriptors */ 283 list_splice_tail_init(&mchan->active, &mchan->completed); 284 if (!list_empty(&mchan->queued)) 285 mpc_dma_execute(mchan); 286 287 spin_unlock(&mchan->lock); 288 } 289} 290 291/* Interrupt handler */ 292static irqreturn_t mpc_dma_irq(int irq, void *data) 293{ 294 struct mpc_dma *mdma = data; 295 uint es; 296 297 /* Save error status register */ 298 es = in_be32(&mdma->regs->dmaes); 299 spin_lock(&mdma->error_status_lock); 300 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0) 301 mdma->error_status = es; 302 spin_unlock(&mdma->error_status_lock); 303 304 /* Handle interrupt on each channel */ 305 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), 306 in_be32(&mdma->regs->dmaerrh), 32); 307 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), 308 in_be32(&mdma->regs->dmaerrl), 0); 309 310 /* Ack interrupt on all channels */ 311 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); 312 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); 313 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); 314 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); 315 316 /* Schedule tasklet */ 317 tasklet_schedule(&mdma->tasklet); 318 319 return IRQ_HANDLED; 320} 321 322/* DMA Tasklet */ 323static void mpc_dma_tasklet(unsigned long data) 324{ 325 struct mpc_dma *mdma = (void *)data; 326 dma_cookie_t last_cookie = 0; 327 struct mpc_dma_chan *mchan; 328 struct mpc_dma_desc *mdesc; 329 struct dma_async_tx_descriptor *desc; 330 unsigned long flags; 331 LIST_HEAD(list); 332 uint es; 333 int i; 334 335 spin_lock_irqsave(&mdma->error_status_lock, flags); 336 es = mdma->error_status; 337 mdma->error_status = 0; 338 spin_unlock_irqrestore(&mdma->error_status_lock, flags); 339 340 /* Print nice error report */ 341 if (es) { 342 dev_err(mdma->dma.dev, 343 "Hardware reported following error(s) on channel %u:\n", 344 MPC_DMA_DMAES_ERRCHN(es)); 345 346 if (es & MPC_DMA_DMAES_GPE) 347 dev_err(mdma->dma.dev, "- Group Priority Error\n"); 348 if (es & MPC_DMA_DMAES_CPE) 349 dev_err(mdma->dma.dev, "- Channel Priority Error\n"); 350 if (es & MPC_DMA_DMAES_SAE) 351 dev_err(mdma->dma.dev, "- Source Address Error\n"); 352 if (es & MPC_DMA_DMAES_SOE) 353 dev_err(mdma->dma.dev, "- Source Offset" 354 " Configuration Error\n"); 355 if (es & MPC_DMA_DMAES_DAE) 356 dev_err(mdma->dma.dev, "- Destination Address" 357 " Error\n"); 358 if (es & MPC_DMA_DMAES_DOE) 359 dev_err(mdma->dma.dev, "- Destination Offset" 360 " Configuration Error\n"); 361 if (es & MPC_DMA_DMAES_NCE) 362 dev_err(mdma->dma.dev, "- NBytes/Citter" 363 " Configuration Error\n"); 364 if (es & MPC_DMA_DMAES_SGE) 365 dev_err(mdma->dma.dev, "- Scatter/Gather" 366 " Configuration Error\n"); 367 if (es & MPC_DMA_DMAES_SBE) 368 dev_err(mdma->dma.dev, "- Source Bus Error\n"); 369 if (es & MPC_DMA_DMAES_DBE) 370 dev_err(mdma->dma.dev, "- Destination Bus Error\n"); 371 } 372 373 for (i = 0; i < mdma->dma.chancnt; i++) { 374 mchan = &mdma->channels[i]; 375 376 /* Get all completed descriptors */ 377 spin_lock_irqsave(&mchan->lock, flags); 378 if (!list_empty(&mchan->completed)) 379 list_splice_tail_init(&mchan->completed, &list); 380 spin_unlock_irqrestore(&mchan->lock, flags); 381 382 if (list_empty(&list)) 383 continue; 384 385 /* Execute callbacks and run dependencies */ 386 list_for_each_entry(mdesc, &list, node) { 387 desc = &mdesc->desc; 388 389 if (desc->callback) 390 desc->callback(desc->callback_param); 391 392 last_cookie = desc->cookie; 393 dma_run_dependencies(desc); 394 } 395 396 /* Free descriptors */ 397 spin_lock_irqsave(&mchan->lock, flags); 398 list_splice_tail_init(&list, &mchan->free); 399 mchan->completed_cookie = last_cookie; 400 spin_unlock_irqrestore(&mchan->lock, flags); 401 } 402} 403 404/* Submit descriptor to hardware */ 405static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) 406{ 407 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan); 408 struct mpc_dma_desc *mdesc; 409 unsigned long flags; 410 dma_cookie_t cookie; 411 412 mdesc = container_of(txd, struct mpc_dma_desc, desc); 413 414 spin_lock_irqsave(&mchan->lock, flags); 415 416 /* Move descriptor to queue */ 417 list_move_tail(&mdesc->node, &mchan->queued); 418 419 /* If channel is idle, execute all queued descriptors */ 420 if (list_empty(&mchan->active)) 421 mpc_dma_execute(mchan); 422 423 /* Update cookie */ 424 cookie = mchan->chan.cookie + 1; 425 if (cookie <= 0) 426 cookie = 1; 427 428 mchan->chan.cookie = cookie; 429 mdesc->desc.cookie = cookie; 430 431 spin_unlock_irqrestore(&mchan->lock, flags); 432 433 return cookie; 434} 435 436/* Alloc channel resources */ 437static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) 438{ 439 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); 440 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 441 struct mpc_dma_desc *mdesc; 442 struct mpc_dma_tcd *tcd; 443 dma_addr_t tcd_paddr; 444 unsigned long flags; 445 LIST_HEAD(descs); 446 int i; 447 448 /* Alloc DMA memory for Transfer Control Descriptors */ 449 tcd = dma_alloc_coherent(mdma->dma.dev, 450 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), 451 &tcd_paddr, GFP_KERNEL); 452 if (!tcd) 453 return -ENOMEM; 454 455 /* Alloc descriptors for this channel */ 456 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { 457 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); 458 if (!mdesc) { 459 dev_notice(mdma->dma.dev, "Memory allocation error. " 460 "Allocated only %u descriptors\n", i); 461 break; 462 } 463 464 dma_async_tx_descriptor_init(&mdesc->desc, chan); 465 mdesc->desc.flags = DMA_CTRL_ACK; 466 mdesc->desc.tx_submit = mpc_dma_tx_submit; 467 468 mdesc->tcd = &tcd[i]; 469 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd)); 470 471 list_add_tail(&mdesc->node, &descs); 472 } 473 474 /* Return error only if no descriptors were allocated */ 475 if (i == 0) { 476 dma_free_coherent(mdma->dma.dev, 477 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), 478 tcd, tcd_paddr); 479 return -ENOMEM; 480 } 481 482 spin_lock_irqsave(&mchan->lock, flags); 483 mchan->tcd = tcd; 484 mchan->tcd_paddr = tcd_paddr; 485 list_splice_tail_init(&descs, &mchan->free); 486 spin_unlock_irqrestore(&mchan->lock, flags); 487 488 /* Enable Error Interrupt */ 489 out_8(&mdma->regs->dmaseei, chan->chan_id); 490 491 return 0; 492} 493 494/* Free channel resources */ 495static void mpc_dma_free_chan_resources(struct dma_chan *chan) 496{ 497 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); 498 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 499 struct mpc_dma_desc *mdesc, *tmp; 500 struct mpc_dma_tcd *tcd; 501 dma_addr_t tcd_paddr; 502 unsigned long flags; 503 LIST_HEAD(descs); 504 505 spin_lock_irqsave(&mchan->lock, flags); 506 507 /* Channel must be idle */ 508 BUG_ON(!list_empty(&mchan->prepared)); 509 BUG_ON(!list_empty(&mchan->queued)); 510 BUG_ON(!list_empty(&mchan->active)); 511 BUG_ON(!list_empty(&mchan->completed)); 512 513 /* Move data */ 514 list_splice_tail_init(&mchan->free, &descs); 515 tcd = mchan->tcd; 516 tcd_paddr = mchan->tcd_paddr; 517 518 spin_unlock_irqrestore(&mchan->lock, flags); 519 520 /* Free DMA memory used by descriptors */ 521 dma_free_coherent(mdma->dma.dev, 522 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), 523 tcd, tcd_paddr); 524 525 /* Free descriptors */ 526 list_for_each_entry_safe(mdesc, tmp, &descs, node) 527 kfree(mdesc); 528 529 /* Disable Error Interrupt */ 530 out_8(&mdma->regs->dmaceei, chan->chan_id); 531} 532 533/* Send all pending descriptor to hardware */ 534static void mpc_dma_issue_pending(struct dma_chan *chan) 535{ 536 /* 537 * We are posting descriptors to the hardware as soon as 538 * they are ready, so this function does nothing. 539 */ 540} 541 542/* Check request completion status */ 543static enum dma_status 544mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 545 struct dma_tx_state *txstate) 546{ 547 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 548 unsigned long flags; 549 dma_cookie_t last_used; 550 dma_cookie_t last_complete; 551 552 spin_lock_irqsave(&mchan->lock, flags); 553 last_used = mchan->chan.cookie; 554 last_complete = mchan->completed_cookie; 555 spin_unlock_irqrestore(&mchan->lock, flags); 556 557 dma_set_tx_state(txstate, last_complete, last_used, 0); 558 return dma_async_is_complete(cookie, last_complete, last_used); 559} 560 561/* Prepare descriptor for memory to memory copy */ 562static struct dma_async_tx_descriptor * 563mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 564 size_t len, unsigned long flags) 565{ 566 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 567 struct mpc_dma_desc *mdesc = NULL; 568 struct mpc_dma_tcd *tcd; 569 unsigned long iflags; 570 571 /* Get free descriptor */ 572 spin_lock_irqsave(&mchan->lock, iflags); 573 if (!list_empty(&mchan->free)) { 574 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, 575 node); 576 list_del(&mdesc->node); 577 } 578 spin_unlock_irqrestore(&mchan->lock, iflags); 579 580 if (!mdesc) 581 return NULL; 582 583 mdesc->error = 0; 584 tcd = mdesc->tcd; 585 586 /* Prepare Transfer Control Descriptor for this transaction */ 587 memset(tcd, 0, sizeof(struct mpc_dma_tcd)); 588 589 if (IS_ALIGNED(src | dst | len, 32)) { 590 tcd->ssize = MPC_DMA_TSIZE_32; 591 tcd->dsize = MPC_DMA_TSIZE_32; 592 tcd->soff = 32; 593 tcd->doff = 32; 594 } else if (IS_ALIGNED(src | dst | len, 16)) { 595 tcd->ssize = MPC_DMA_TSIZE_16; 596 tcd->dsize = MPC_DMA_TSIZE_16; 597 tcd->soff = 16; 598 tcd->doff = 16; 599 } else if (IS_ALIGNED(src | dst | len, 4)) { 600 tcd->ssize = MPC_DMA_TSIZE_4; 601 tcd->dsize = MPC_DMA_TSIZE_4; 602 tcd->soff = 4; 603 tcd->doff = 4; 604 } else if (IS_ALIGNED(src | dst | len, 2)) { 605 tcd->ssize = MPC_DMA_TSIZE_2; 606 tcd->dsize = MPC_DMA_TSIZE_2; 607 tcd->soff = 2; 608 tcd->doff = 2; 609 } else { 610 tcd->ssize = MPC_DMA_TSIZE_1; 611 tcd->dsize = MPC_DMA_TSIZE_1; 612 tcd->soff = 1; 613 tcd->doff = 1; 614 } 615 616 tcd->saddr = src; 617 tcd->daddr = dst; 618 tcd->nbytes = len; 619 tcd->biter = 1; 620 tcd->citer = 1; 621 622 /* Place descriptor in prepared list */ 623 spin_lock_irqsave(&mchan->lock, iflags); 624 list_add_tail(&mdesc->node, &mchan->prepared); 625 spin_unlock_irqrestore(&mchan->lock, iflags); 626 627 return &mdesc->desc; 628} 629 630static int __devinit mpc_dma_probe(struct platform_device *op, 631 const struct of_device_id *match) 632{ 633 struct device_node *dn = op->dev.of_node; 634 struct device *dev = &op->dev; 635 struct dma_device *dma; 636 struct mpc_dma *mdma; 637 struct mpc_dma_chan *mchan; 638 struct resource res; 639 ulong regs_start, regs_size; 640 int retval, i; 641 642 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); 643 if (!mdma) { 644 dev_err(dev, "Memory exhausted!\n"); 645 return -ENOMEM; 646 } 647 648 mdma->irq = irq_of_parse_and_map(dn, 0); 649 if (mdma->irq == NO_IRQ) { 650 dev_err(dev, "Error mapping IRQ!\n"); 651 return -EINVAL; 652 } 653 654 retval = of_address_to_resource(dn, 0, &res); 655 if (retval) { 656 dev_err(dev, "Error parsing memory region!\n"); 657 return retval; 658 } 659 660 regs_start = res.start; 661 regs_size = resource_size(&res); 662 663 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { 664 dev_err(dev, "Error requesting memory region!\n"); 665 return -EBUSY; 666 } 667 668 mdma->regs = devm_ioremap(dev, regs_start, regs_size); 669 if (!mdma->regs) { 670 dev_err(dev, "Error mapping memory region!\n"); 671 return -ENOMEM; 672 } 673 674 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) 675 + MPC_DMA_TCD_OFFSET); 676 677 retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME, 678 mdma); 679 if (retval) { 680 dev_err(dev, "Error requesting IRQ!\n"); 681 return -EINVAL; 682 } 683 684 spin_lock_init(&mdma->error_status_lock); 685 686 dma = &mdma->dma; 687 dma->dev = dev; 688 dma->chancnt = MPC_DMA_CHANNELS; 689 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; 690 dma->device_free_chan_resources = mpc_dma_free_chan_resources; 691 dma->device_issue_pending = mpc_dma_issue_pending; 692 dma->device_tx_status = mpc_dma_tx_status; 693 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; 694 695 INIT_LIST_HEAD(&dma->channels); 696 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 697 698 for (i = 0; i < dma->chancnt; i++) { 699 mchan = &mdma->channels[i]; 700 701 mchan->chan.device = dma; 702 mchan->chan.chan_id = i; 703 mchan->chan.cookie = 1; 704 mchan->completed_cookie = mchan->chan.cookie; 705 706 INIT_LIST_HEAD(&mchan->free); 707 INIT_LIST_HEAD(&mchan->prepared); 708 INIT_LIST_HEAD(&mchan->queued); 709 INIT_LIST_HEAD(&mchan->active); 710 INIT_LIST_HEAD(&mchan->completed); 711 712 spin_lock_init(&mchan->lock); 713 list_add_tail(&mchan->chan.device_node, &dma->channels); 714 } 715 716 tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma); 717 718 /* 719 * Configure DMA Engine: 720 * - Dynamic clock, 721 * - Round-robin group arbitration, 722 * - Round-robin channel arbitration. 723 */ 724 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | 725 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); 726 727 /* Disable hardware DMA requests */ 728 out_be32(&mdma->regs->dmaerqh, 0); 729 out_be32(&mdma->regs->dmaerql, 0); 730 731 /* Disable error interrupts */ 732 out_be32(&mdma->regs->dmaeeih, 0); 733 out_be32(&mdma->regs->dmaeeil, 0); 734 735 /* Clear interrupts status */ 736 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); 737 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); 738 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); 739 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); 740 741 /* Route interrupts to IPIC */ 742 out_be32(&mdma->regs->dmaihsa, 0); 743 out_be32(&mdma->regs->dmailsa, 0); 744 745 /* Register DMA engine */ 746 dev_set_drvdata(dev, mdma); 747 retval = dma_async_device_register(dma); 748 if (retval) { 749 devm_free_irq(dev, mdma->irq, mdma); 750 irq_dispose_mapping(mdma->irq); 751 } 752 753 return retval; 754} 755 756static int __devexit mpc_dma_remove(struct platform_device *op) 757{ 758 struct device *dev = &op->dev; 759 struct mpc_dma *mdma = dev_get_drvdata(dev); 760 761 dma_async_device_unregister(&mdma->dma); 762 devm_free_irq(dev, mdma->irq, mdma); 763 irq_dispose_mapping(mdma->irq); 764 765 return 0; 766} 767 768static struct of_device_id mpc_dma_match[] = { 769 { .compatible = "fsl,mpc5121-dma", }, 770 {}, 771}; 772 773static struct of_platform_driver mpc_dma_driver = { 774 .probe = mpc_dma_probe, 775 .remove = __devexit_p(mpc_dma_remove), 776 .driver = { 777 .name = DRV_NAME, 778 .owner = THIS_MODULE, 779 .of_match_table = mpc_dma_match, 780 }, 781}; 782 783static int __init mpc_dma_init(void) 784{ 785 return of_register_platform_driver(&mpc_dma_driver); 786} 787module_init(mpc_dma_init); 788 789static void __exit mpc_dma_exit(void) 790{ 791 of_unregister_platform_driver(&mpc_dma_driver); 792} 793module_exit(mpc_dma_exit); 794 795MODULE_LICENSE("GPL"); 796MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); 797