fsldma.c revision f79abb627f033c85a6088231f20c85bc4a9bd757
1/* 2 * Freescale MPC85xx, MPC83xx DMA Engine support 3 * 4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 5 * 6 * Author: 7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 9 * 10 * Description: 11 * DMA engine driver for Freescale MPC8540 DMA controller, which is 12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. 13 * The support for MPC8349 DMA contorller is also added. 14 * 15 * This is free software; you can redistribute it and/or modify 16 * it under the terms of the GNU General Public License as published by 17 * the Free Software Foundation; either version 2 of the License, or 18 * (at your option) any later version. 19 * 20 */ 21 22#include <linux/init.h> 23#include <linux/module.h> 24#include <linux/pci.h> 25#include <linux/interrupt.h> 26#include <linux/dmaengine.h> 27#include <linux/delay.h> 28#include <linux/dma-mapping.h> 29#include <linux/dmapool.h> 30#include <linux/of_platform.h> 31 32#include "fsldma.h" 33 34static void dma_init(struct fsl_dma_chan *fsl_chan) 35{ 36 /* Reset the channel */ 37 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); 38 39 switch (fsl_chan->feature & FSL_DMA_IP_MASK) { 40 case FSL_DMA_IP_85XX: 41 /* Set the channel to below modes: 42 * EIE - Error interrupt enable 43 * EOSIE - End of segments interrupt enable (basic mode) 44 * EOLNIE - End of links interrupt enable 45 */ 46 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE 47 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); 48 break; 49 case FSL_DMA_IP_83XX: 50 /* Set the channel to below modes: 51 * EOTIE - End-of-transfer interrupt enable 52 */ 53 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, 54 32); 55 break; 56 } 57 58} 59 60static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) 61{ 62 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); 63} 64 65static u32 get_sr(struct fsl_dma_chan *fsl_chan) 66{ 67 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); 68} 69 70static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, 71 struct fsl_dma_ld_hw *hw, u32 count) 72{ 73 hw->count = CPU_TO_DMA(fsl_chan, count, 32); 74} 75 76static void set_desc_src(struct fsl_dma_chan *fsl_chan, 77 struct fsl_dma_ld_hw *hw, dma_addr_t src) 78{ 79 u64 snoop_bits; 80 81 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 82 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 83 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); 84} 85 86static void set_desc_dest(struct fsl_dma_chan *fsl_chan, 87 struct fsl_dma_ld_hw *hw, dma_addr_t dest) 88{ 89 u64 snoop_bits; 90 91 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 92 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 93 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); 94} 95 96static void set_desc_next(struct fsl_dma_chan *fsl_chan, 97 struct fsl_dma_ld_hw *hw, dma_addr_t next) 98{ 99 u64 snoop_bits; 100 101 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 102 ? FSL_DMA_SNEN : 0; 103 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); 104} 105 106static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) 107{ 108 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); 109} 110 111static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) 112{ 113 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; 114} 115 116static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) 117{ 118 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); 119} 120 121static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) 122{ 123 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); 124} 125 126static u32 get_bcr(struct fsl_dma_chan *fsl_chan) 127{ 128 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); 129} 130 131static int dma_is_idle(struct fsl_dma_chan *fsl_chan) 132{ 133 u32 sr = get_sr(fsl_chan); 134 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 135} 136 137static void dma_start(struct fsl_dma_chan *fsl_chan) 138{ 139 u32 mr_set = 0;; 140 141 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 142 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); 143 mr_set |= FSL_DMA_MR_EMP_EN; 144 } else 145 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 146 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 147 & ~FSL_DMA_MR_EMP_EN, 32); 148 149 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) 150 mr_set |= FSL_DMA_MR_EMS_EN; 151 else 152 mr_set |= FSL_DMA_MR_CS; 153 154 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 155 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 156 | mr_set, 32); 157} 158 159static void dma_halt(struct fsl_dma_chan *fsl_chan) 160{ 161 int i = 0; 162 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 163 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, 164 32); 165 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 166 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS 167 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); 168 169 while (!dma_is_idle(fsl_chan) && (i++ < 100)) 170 udelay(10); 171 if (i >= 100 && !dma_is_idle(fsl_chan)) 172 dev_err(fsl_chan->dev, "DMA halt timeout!\n"); 173} 174 175static void set_ld_eol(struct fsl_dma_chan *fsl_chan, 176 struct fsl_desc_sw *desc) 177{ 178 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, 179 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, 180 64); 181} 182 183static void append_ld_queue(struct fsl_dma_chan *fsl_chan, 184 struct fsl_desc_sw *new_desc) 185{ 186 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); 187 188 if (list_empty(&fsl_chan->ld_queue)) 189 return; 190 191 /* Link to the new descriptor physical address and 192 * Enable End-of-segment interrupt for 193 * the last link descriptor. 194 * (the previous node's next link descriptor) 195 * 196 * For FSL_DMA_IP_83xx, the snoop enable bit need be set. 197 */ 198 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, 199 new_desc->async_tx.phys | FSL_DMA_EOSIE | 200 (((fsl_chan->feature & FSL_DMA_IP_MASK) 201 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); 202} 203 204/** 205 * fsl_chan_set_src_loop_size - Set source address hold transfer size 206 * @fsl_chan : Freescale DMA channel 207 * @size : Address loop size, 0 for disable loop 208 * 209 * The set source address hold transfer size. The source 210 * address hold or loop transfer size is when the DMA transfer 211 * data from source address (SA), if the loop size is 4, the DMA will 212 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, 213 * SA + 1 ... and so on. 214 */ 215static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) 216{ 217 switch (size) { 218 case 0: 219 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 220 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & 221 (~FSL_DMA_MR_SAHE), 32); 222 break; 223 case 1: 224 case 2: 225 case 4: 226 case 8: 227 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 228 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 229 FSL_DMA_MR_SAHE | (__ilog2(size) << 14), 230 32); 231 break; 232 } 233} 234 235/** 236 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size 237 * @fsl_chan : Freescale DMA channel 238 * @size : Address loop size, 0 for disable loop 239 * 240 * The set destination address hold transfer size. The destination 241 * address hold or loop transfer size is when the DMA transfer 242 * data to destination address (TA), if the loop size is 4, the DMA will 243 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, 244 * TA + 1 ... and so on. 245 */ 246static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) 247{ 248 switch (size) { 249 case 0: 250 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 251 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & 252 (~FSL_DMA_MR_DAHE), 32); 253 break; 254 case 1: 255 case 2: 256 case 4: 257 case 8: 258 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 259 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 260 FSL_DMA_MR_DAHE | (__ilog2(size) << 16), 261 32); 262 break; 263 } 264} 265 266/** 267 * fsl_chan_toggle_ext_pause - Toggle channel external pause status 268 * @fsl_chan : Freescale DMA channel 269 * @size : Pause control size, 0 for disable external pause control. 270 * The maximum is 1024. 271 * 272 * The Freescale DMA channel can be controlled by the external 273 * signal DREQ#. The pause control size is how many bytes are allowed 274 * to transfer before pausing the channel, after which a new assertion 275 * of DREQ# resumes channel operation. 276 */ 277static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) 278{ 279 if (size > 1024) 280 return; 281 282 if (size) { 283 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 284 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 285 | ((__ilog2(size) << 24) & 0x0f000000), 286 32); 287 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; 288 } else 289 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; 290} 291 292/** 293 * fsl_chan_toggle_ext_start - Toggle channel external start status 294 * @fsl_chan : Freescale DMA channel 295 * @enable : 0 is disabled, 1 is enabled. 296 * 297 * If enable the external start, the channel can be started by an 298 * external DMA start pin. So the dma_start() does not start the 299 * transfer immediately. The DMA channel will wait for the 300 * control pin asserted. 301 */ 302static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) 303{ 304 if (enable) 305 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; 306 else 307 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; 308} 309 310static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 311{ 312 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 313 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); 314 unsigned long flags; 315 dma_cookie_t cookie; 316 317 /* cookie increment and adding to ld_queue must be atomic */ 318 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 319 320 cookie = fsl_chan->common.cookie; 321 cookie++; 322 if (cookie < 0) 323 cookie = 1; 324 desc->async_tx.cookie = cookie; 325 fsl_chan->common.cookie = desc->async_tx.cookie; 326 327 append_ld_queue(fsl_chan, desc); 328 list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); 329 330 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 331 332 return cookie; 333} 334 335/** 336 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. 337 * @fsl_chan : Freescale DMA channel 338 * 339 * Return - The descriptor allocated. NULL for failed. 340 */ 341static struct fsl_desc_sw *fsl_dma_alloc_descriptor( 342 struct fsl_dma_chan *fsl_chan) 343{ 344 dma_addr_t pdesc; 345 struct fsl_desc_sw *desc_sw; 346 347 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); 348 if (desc_sw) { 349 memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); 350 dma_async_tx_descriptor_init(&desc_sw->async_tx, 351 &fsl_chan->common); 352 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; 353 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); 354 desc_sw->async_tx.phys = pdesc; 355 } 356 357 return desc_sw; 358} 359 360 361/** 362 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 363 * @fsl_chan : Freescale DMA channel 364 * 365 * This function will create a dma pool for descriptor allocation. 366 * 367 * Return - The number of descriptors allocated. 368 */ 369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) 370{ 371 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 372 LIST_HEAD(tmp_list); 373 374 /* We need the descriptor to be aligned to 32bytes 375 * for meeting FSL DMA specification requirement. 376 */ 377 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 378 fsl_chan->dev, sizeof(struct fsl_desc_sw), 379 32, 0); 380 if (!fsl_chan->desc_pool) { 381 dev_err(fsl_chan->dev, "No memory for channel %d " 382 "descriptor dma pool.\n", fsl_chan->id); 383 return 0; 384 } 385 386 return 1; 387} 388 389/** 390 * fsl_dma_free_chan_resources - Free all resources of the channel. 391 * @fsl_chan : Freescale DMA channel 392 */ 393static void fsl_dma_free_chan_resources(struct dma_chan *chan) 394{ 395 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 396 struct fsl_desc_sw *desc, *_desc; 397 unsigned long flags; 398 399 dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); 400 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 401 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { 402#ifdef FSL_DMA_LD_DEBUG 403 dev_dbg(fsl_chan->dev, 404 "LD %p will be released.\n", desc); 405#endif 406 list_del(&desc->node); 407 /* free link descriptor */ 408 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); 409 } 410 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 411 dma_pool_destroy(fsl_chan->desc_pool); 412} 413 414static struct dma_async_tx_descriptor * 415fsl_dma_prep_interrupt(struct dma_chan *chan) 416{ 417 struct fsl_dma_chan *fsl_chan; 418 struct fsl_desc_sw *new; 419 420 if (!chan) 421 return NULL; 422 423 fsl_chan = to_fsl_chan(chan); 424 425 new = fsl_dma_alloc_descriptor(fsl_chan); 426 if (!new) { 427 dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); 428 return NULL; 429 } 430 431 new->async_tx.cookie = -EBUSY; 432 new->async_tx.ack = 0; 433 434 /* Insert the link descriptor to the LD ring */ 435 list_add_tail(&new->node, &new->async_tx.tx_list); 436 437 /* Set End-of-link to the last link descriptor of new list*/ 438 set_ld_eol(fsl_chan, new); 439 440 return &new->async_tx; 441} 442 443static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 444 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 445 size_t len, unsigned long flags) 446{ 447 struct fsl_dma_chan *fsl_chan; 448 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 449 size_t copy; 450 LIST_HEAD(link_chain); 451 452 if (!chan) 453 return NULL; 454 455 if (!len) 456 return NULL; 457 458 fsl_chan = to_fsl_chan(chan); 459 460 do { 461 462 /* Allocate the link descriptor from DMA pool */ 463 new = fsl_dma_alloc_descriptor(fsl_chan); 464 if (!new) { 465 dev_err(fsl_chan->dev, 466 "No free memory for link descriptor\n"); 467 return NULL; 468 } 469#ifdef FSL_DMA_LD_DEBUG 470 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); 471#endif 472 473 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 474 475 set_desc_cnt(fsl_chan, &new->hw, copy); 476 set_desc_src(fsl_chan, &new->hw, dma_src); 477 set_desc_dest(fsl_chan, &new->hw, dma_dest); 478 479 if (!first) 480 first = new; 481 else 482 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); 483 484 new->async_tx.cookie = 0; 485 new->async_tx.ack = 1; 486 487 prev = new; 488 len -= copy; 489 dma_src += copy; 490 dma_dest += copy; 491 492 /* Insert the link descriptor to the LD ring */ 493 list_add_tail(&new->node, &first->async_tx.tx_list); 494 } while (len); 495 496 new->async_tx.ack = 0; /* client is in control of this ack */ 497 new->async_tx.cookie = -EBUSY; 498 499 /* Set End-of-link to the last link descriptor of new list*/ 500 set_ld_eol(fsl_chan, new); 501 502 return first ? &first->async_tx : NULL; 503} 504 505/** 506 * fsl_dma_update_completed_cookie - Update the completed cookie. 507 * @fsl_chan : Freescale DMA channel 508 */ 509static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) 510{ 511 struct fsl_desc_sw *cur_desc, *desc; 512 dma_addr_t ld_phy; 513 514 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; 515 516 if (ld_phy) { 517 cur_desc = NULL; 518 list_for_each_entry(desc, &fsl_chan->ld_queue, node) 519 if (desc->async_tx.phys == ld_phy) { 520 cur_desc = desc; 521 break; 522 } 523 524 if (cur_desc && cur_desc->async_tx.cookie) { 525 if (dma_is_idle(fsl_chan)) 526 fsl_chan->completed_cookie = 527 cur_desc->async_tx.cookie; 528 else 529 fsl_chan->completed_cookie = 530 cur_desc->async_tx.cookie - 1; 531 } 532 } 533} 534 535/** 536 * fsl_chan_ld_cleanup - Clean up link descriptors 537 * @fsl_chan : Freescale DMA channel 538 * 539 * This function clean up the ld_queue of DMA channel. 540 * If 'in_intr' is set, the function will move the link descriptor to 541 * the recycle list. Otherwise, free it directly. 542 */ 543static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) 544{ 545 struct fsl_desc_sw *desc, *_desc; 546 unsigned long flags; 547 548 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 549 550 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", 551 fsl_chan->completed_cookie); 552 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { 553 dma_async_tx_callback callback; 554 void *callback_param; 555 556 if (dma_async_is_complete(desc->async_tx.cookie, 557 fsl_chan->completed_cookie, fsl_chan->common.cookie) 558 == DMA_IN_PROGRESS) 559 break; 560 561 callback = desc->async_tx.callback; 562 callback_param = desc->async_tx.callback_param; 563 564 /* Remove from ld_queue list */ 565 list_del(&desc->node); 566 567 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", 568 desc); 569 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); 570 571 /* Run the link descriptor callback function */ 572 if (callback) { 573 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 574 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", 575 desc); 576 callback(callback_param); 577 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 578 } 579 } 580 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 581} 582 583/** 584 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. 585 * @fsl_chan : Freescale DMA channel 586 */ 587static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) 588{ 589 struct list_head *ld_node; 590 dma_addr_t next_dest_addr; 591 unsigned long flags; 592 593 if (!dma_is_idle(fsl_chan)) 594 return; 595 596 dma_halt(fsl_chan); 597 598 /* If there are some link descriptors 599 * not transfered in queue. We need to start it. 600 */ 601 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 602 603 /* Find the first un-transfer desciptor */ 604 for (ld_node = fsl_chan->ld_queue.next; 605 (ld_node != &fsl_chan->ld_queue) 606 && (dma_async_is_complete( 607 to_fsl_desc(ld_node)->async_tx.cookie, 608 fsl_chan->completed_cookie, 609 fsl_chan->common.cookie) == DMA_SUCCESS); 610 ld_node = ld_node->next); 611 612 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 613 614 if (ld_node != &fsl_chan->ld_queue) { 615 /* Get the ld start address from ld_queue */ 616 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; 617 dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", 618 (void *)next_dest_addr); 619 set_cdar(fsl_chan, next_dest_addr); 620 dma_start(fsl_chan); 621 } else { 622 set_cdar(fsl_chan, 0); 623 set_ndar(fsl_chan, 0); 624 } 625} 626 627/** 628 * fsl_dma_memcpy_issue_pending - Issue the DMA start command 629 * @fsl_chan : Freescale DMA channel 630 */ 631static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) 632{ 633 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 634 635#ifdef FSL_DMA_LD_DEBUG 636 struct fsl_desc_sw *ld; 637 unsigned long flags; 638 639 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 640 if (list_empty(&fsl_chan->ld_queue)) { 641 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 642 return; 643 } 644 645 dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); 646 list_for_each_entry(ld, &fsl_chan->ld_queue, node) { 647 int i; 648 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", 649 fsl_chan->id, ld->async_tx.phys); 650 for (i = 0; i < 8; i++) 651 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", 652 i, *(((u32 *)&ld->hw) + i)); 653 } 654 dev_dbg(fsl_chan->dev, "----------------\n"); 655 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 656#endif 657 658 fsl_chan_xfer_ld_queue(fsl_chan); 659} 660 661static void fsl_dma_dependency_added(struct dma_chan *chan) 662{ 663 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 664 665 fsl_chan_ld_cleanup(fsl_chan); 666} 667 668/** 669 * fsl_dma_is_complete - Determine the DMA status 670 * @fsl_chan : Freescale DMA channel 671 */ 672static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, 673 dma_cookie_t cookie, 674 dma_cookie_t *done, 675 dma_cookie_t *used) 676{ 677 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 678 dma_cookie_t last_used; 679 dma_cookie_t last_complete; 680 681 fsl_chan_ld_cleanup(fsl_chan); 682 683 last_used = chan->cookie; 684 last_complete = fsl_chan->completed_cookie; 685 686 if (done) 687 *done = last_complete; 688 689 if (used) 690 *used = last_used; 691 692 return dma_async_is_complete(cookie, last_complete, last_used); 693} 694 695static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) 696{ 697 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 698 u32 stat; 699 700 stat = get_sr(fsl_chan); 701 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", 702 fsl_chan->id, stat); 703 set_sr(fsl_chan, stat); /* Clear the event register */ 704 705 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 706 if (!stat) 707 return IRQ_NONE; 708 709 if (stat & FSL_DMA_SR_TE) 710 dev_err(fsl_chan->dev, "Transfer Error!\n"); 711 712 /* Programming Error 713 * The DMA_INTERRUPT async_tx is a NULL transfer, which will 714 * triger a PE interrupt. 715 */ 716 if (stat & FSL_DMA_SR_PE) { 717 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); 718 if (get_bcr(fsl_chan) == 0) { 719 /* BCR register is 0, this is a DMA_INTERRUPT async_tx. 720 * Now, update the completed cookie, and continue the 721 * next uncompleted transfer. 722 */ 723 fsl_dma_update_completed_cookie(fsl_chan); 724 fsl_chan_xfer_ld_queue(fsl_chan); 725 } 726 stat &= ~FSL_DMA_SR_PE; 727 } 728 729 /* If the link descriptor segment transfer finishes, 730 * we will recycle the used descriptor. 731 */ 732 if (stat & FSL_DMA_SR_EOSI) { 733 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); 734 dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", 735 (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); 736 stat &= ~FSL_DMA_SR_EOSI; 737 fsl_dma_update_completed_cookie(fsl_chan); 738 } 739 740 /* If it current transfer is the end-of-transfer, 741 * we should clear the Channel Start bit for 742 * prepare next transfer. 743 */ 744 if (stat & (FSL_DMA_SR_EOLNI | FSL_DMA_SR_EOCDI)) { 745 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); 746 stat &= ~FSL_DMA_SR_EOLNI; 747 fsl_chan_xfer_ld_queue(fsl_chan); 748 } 749 750 if (stat) 751 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", 752 stat); 753 754 dev_dbg(fsl_chan->dev, "event: Exit\n"); 755 tasklet_schedule(&fsl_chan->tasklet); 756 return IRQ_HANDLED; 757} 758 759static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) 760{ 761 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; 762 u32 gsr; 763 int ch_nr; 764 765 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) 766 : in_le32(fdev->reg_base); 767 ch_nr = (32 - ffs(gsr)) / 8; 768 769 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, 770 fdev->chan[ch_nr]) : IRQ_NONE; 771} 772 773static void dma_do_tasklet(unsigned long data) 774{ 775 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 776 fsl_chan_ld_cleanup(fsl_chan); 777} 778 779#ifdef FSL_DMA_CALLBACKTEST 780static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan) 781{ 782 if (fsl_chan) 783 dev_info(fsl_chan->dev, "selftest: callback is ok!\n"); 784} 785#endif 786 787#ifdef CONFIG_FSL_DMA_SELFTEST 788static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) 789{ 790 struct dma_chan *chan; 791 int err = 0; 792 dma_addr_t dma_dest, dma_src; 793 dma_cookie_t cookie; 794 u8 *src, *dest; 795 int i; 796 size_t test_size; 797 struct dma_async_tx_descriptor *tx1, *tx2, *tx3; 798 799 test_size = 4096; 800 801 src = kmalloc(test_size * 2, GFP_KERNEL); 802 if (!src) { 803 dev_err(fsl_chan->dev, 804 "selftest: Cannot alloc memory for test!\n"); 805 err = -ENOMEM; 806 goto out; 807 } 808 809 dest = src + test_size; 810 811 for (i = 0; i < test_size; i++) 812 src[i] = (u8) i; 813 814 chan = &fsl_chan->common; 815 816 if (fsl_dma_alloc_chan_resources(chan) < 1) { 817 dev_err(fsl_chan->dev, 818 "selftest: Cannot alloc resources for DMA\n"); 819 err = -ENODEV; 820 goto out; 821 } 822 823 /* TX 1 */ 824 dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2, 825 DMA_TO_DEVICE); 826 dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2, 827 DMA_FROM_DEVICE); 828 tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0); 829 async_tx_ack(tx1); 830 831 cookie = fsl_dma_tx_submit(tx1); 832 fsl_dma_memcpy_issue_pending(chan); 833 msleep(2); 834 835 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { 836 dev_err(fsl_chan->dev, "selftest: Time out!\n"); 837 err = -ENODEV; 838 goto out; 839 } 840 841 /* Test free and re-alloc channel resources */ 842 fsl_dma_free_chan_resources(chan); 843 844 if (fsl_dma_alloc_chan_resources(chan) < 1) { 845 dev_err(fsl_chan->dev, 846 "selftest: Cannot alloc resources for DMA\n"); 847 err = -ENODEV; 848 goto free_resources; 849 } 850 851 /* Continue to test 852 * TX 2 853 */ 854 dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2, 855 test_size / 4, DMA_TO_DEVICE); 856 dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2, 857 test_size / 4, DMA_FROM_DEVICE); 858 tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); 859 async_tx_ack(tx2); 860 861 /* TX 3 */ 862 dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4, 863 test_size / 4, DMA_TO_DEVICE); 864 dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4, 865 test_size / 4, DMA_FROM_DEVICE); 866 tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); 867 async_tx_ack(tx3); 868 869 /* Interrupt tx test */ 870 tx1 = fsl_dma_prep_interrupt(chan); 871 async_tx_ack(tx1); 872 cookie = fsl_dma_tx_submit(tx1); 873 874 /* Test exchanging the prepared tx sort */ 875 cookie = fsl_dma_tx_submit(tx3); 876 cookie = fsl_dma_tx_submit(tx2); 877 878#ifdef FSL_DMA_CALLBACKTEST 879 if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *) 880 dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) { 881 tx3->callback = fsl_dma_callback_test; 882 tx3->callback_param = fsl_chan; 883 } 884#endif 885 fsl_dma_memcpy_issue_pending(chan); 886 msleep(2); 887 888 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { 889 dev_err(fsl_chan->dev, "selftest: Time out!\n"); 890 err = -ENODEV; 891 goto free_resources; 892 } 893 894 err = memcmp(src, dest, test_size); 895 if (err) { 896 for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); 897 i++); 898 dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is " 899 "error! src 0x%x, dest 0x%x\n", 900 i, (long)test_size, *(src + i), *(dest + i)); 901 } 902 903free_resources: 904 fsl_dma_free_chan_resources(chan); 905out: 906 kfree(src); 907 return err; 908} 909#endif 910 911static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, 912 const struct of_device_id *match) 913{ 914 struct fsl_dma_device *fdev; 915 struct fsl_dma_chan *new_fsl_chan; 916 int err; 917 918 fdev = dev_get_drvdata(dev->dev.parent); 919 BUG_ON(!fdev); 920 921 /* alloc channel */ 922 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); 923 if (!new_fsl_chan) { 924 dev_err(&dev->dev, "No free memory for allocating " 925 "dma channels!\n"); 926 err = -ENOMEM; 927 goto err; 928 } 929 930 /* get dma channel register base */ 931 err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg); 932 if (err) { 933 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 934 dev->node->full_name); 935 goto err; 936 } 937 938 new_fsl_chan->feature = *(u32 *)match->data; 939 940 if (!fdev->feature) 941 fdev->feature = new_fsl_chan->feature; 942 943 /* If the DMA device's feature is different than its channels', 944 * report the bug. 945 */ 946 WARN_ON(fdev->feature != new_fsl_chan->feature); 947 948 new_fsl_chan->dev = &dev->dev; 949 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, 950 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 951 952 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; 953 if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { 954 dev_err(&dev->dev, "There is no %d channel!\n", 955 new_fsl_chan->id); 956 err = -EINVAL; 957 goto err; 958 } 959 fdev->chan[new_fsl_chan->id] = new_fsl_chan; 960 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, 961 (unsigned long)new_fsl_chan); 962 963 /* Init the channel */ 964 dma_init(new_fsl_chan); 965 966 /* Clear cdar registers */ 967 set_cdar(new_fsl_chan, 0); 968 969 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { 970 case FSL_DMA_IP_85XX: 971 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; 972 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 973 case FSL_DMA_IP_83XX: 974 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; 975 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; 976 } 977 978 spin_lock_init(&new_fsl_chan->desc_lock); 979 INIT_LIST_HEAD(&new_fsl_chan->ld_queue); 980 981 new_fsl_chan->common.device = &fdev->common; 982 983 /* Add the channel to DMA device channel list */ 984 list_add_tail(&new_fsl_chan->common.device_node, 985 &fdev->common.channels); 986 fdev->common.chancnt++; 987 988 new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0); 989 if (new_fsl_chan->irq != NO_IRQ) { 990 err = request_irq(new_fsl_chan->irq, 991 &fsl_dma_chan_do_interrupt, IRQF_SHARED, 992 "fsldma-channel", new_fsl_chan); 993 if (err) { 994 dev_err(&dev->dev, "DMA channel %s request_irq error " 995 "with return %d\n", dev->node->full_name, err); 996 goto err; 997 } 998 } 999 1000#ifdef CONFIG_FSL_DMA_SELFTEST 1001 err = fsl_dma_self_test(new_fsl_chan); 1002 if (err) 1003 goto err; 1004#endif 1005 1006 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, 1007 match->compatible, new_fsl_chan->irq); 1008 1009 return 0; 1010err: 1011 dma_halt(new_fsl_chan); 1012 iounmap(new_fsl_chan->reg_base); 1013 free_irq(new_fsl_chan->irq, new_fsl_chan); 1014 list_del(&new_fsl_chan->common.device_node); 1015 kfree(new_fsl_chan); 1016 return err; 1017} 1018 1019const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN; 1020const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN; 1021 1022static struct of_device_id of_fsl_dma_chan_ids[] = { 1023 { 1024 .compatible = "fsl,mpc8540-dma-channel", 1025 .data = (void *)&mpc8540_dma_ip_feature, 1026 }, 1027 { 1028 .compatible = "fsl,mpc8349-dma-channel", 1029 .data = (void *)&mpc8349_dma_ip_feature, 1030 }, 1031 {} 1032}; 1033 1034static struct of_platform_driver of_fsl_dma_chan_driver = { 1035 .name = "of-fsl-dma-channel", 1036 .match_table = of_fsl_dma_chan_ids, 1037 .probe = of_fsl_dma_chan_probe, 1038}; 1039 1040static __init int of_fsl_dma_chan_init(void) 1041{ 1042 return of_register_platform_driver(&of_fsl_dma_chan_driver); 1043} 1044 1045static int __devinit of_fsl_dma_probe(struct of_device *dev, 1046 const struct of_device_id *match) 1047{ 1048 int err; 1049 unsigned int irq; 1050 struct fsl_dma_device *fdev; 1051 1052 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); 1053 if (!fdev) { 1054 dev_err(&dev->dev, "No enough memory for 'priv'\n"); 1055 err = -ENOMEM; 1056 goto err; 1057 } 1058 fdev->dev = &dev->dev; 1059 INIT_LIST_HEAD(&fdev->common.channels); 1060 1061 /* get DMA controller register base */ 1062 err = of_address_to_resource(dev->node, 0, &fdev->reg); 1063 if (err) { 1064 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 1065 dev->node->full_name); 1066 goto err; 1067 } 1068 1069 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 1070 "controller at %p...\n", 1071 match->compatible, (void *)fdev->reg.start); 1072 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end 1073 - fdev->reg.start + 1); 1074 1075 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1076 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1077 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1078 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1079 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1080 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1081 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 1082 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1083 fdev->common.device_dependency_added = fsl_dma_dependency_added; 1084 fdev->common.dev = &dev->dev; 1085 1086 irq = irq_of_parse_and_map(dev->node, 0); 1087 if (irq != NO_IRQ) { 1088 err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED, 1089 "fsldma-device", fdev); 1090 if (err) { 1091 dev_err(&dev->dev, "DMA device request_irq error " 1092 "with return %d\n", err); 1093 goto err; 1094 } 1095 } 1096 1097 dev_set_drvdata(&(dev->dev), fdev); 1098 of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev); 1099 1100 dma_async_device_register(&fdev->common); 1101 return 0; 1102 1103err: 1104 iounmap(fdev->reg_base); 1105 kfree(fdev); 1106 return err; 1107} 1108 1109static struct of_device_id of_fsl_dma_ids[] = { 1110 { .compatible = "fsl,mpc8540-dma", }, 1111 { .compatible = "fsl,mpc8349-dma", }, 1112 {} 1113}; 1114 1115static struct of_platform_driver of_fsl_dma_driver = { 1116 .name = "of-fsl-dma", 1117 .match_table = of_fsl_dma_ids, 1118 .probe = of_fsl_dma_probe, 1119}; 1120 1121static __init int of_fsl_dma_init(void) 1122{ 1123 return of_register_platform_driver(&of_fsl_dma_driver); 1124} 1125 1126subsys_initcall(of_fsl_dma_chan_init); 1127subsys_initcall(of_fsl_dma_init); 1128