1/* 2 * Driver for Cirrus Logic EP93xx SPI controller. 3 * 4 * Copyright (C) 2010-2011 Mika Westerberg 5 * 6 * Explicit FIFO handling code was inspired by amba-pl022 driver. 7 * 8 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. 9 * 10 * For more information about the SPI controller see documentation on Cirrus 11 * Logic web site: 12 * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 19#include <linux/io.h> 20#include <linux/clk.h> 21#include <linux/err.h> 22#include <linux/delay.h> 23#include <linux/device.h> 24#include <linux/dmaengine.h> 25#include <linux/bitops.h> 26#include <linux/interrupt.h> 27#include <linux/module.h> 28#include <linux/platform_device.h> 29#include <linux/workqueue.h> 30#include <linux/sched.h> 31#include <linux/scatterlist.h> 32#include <linux/spi/spi.h> 33 34#include <mach/dma.h> 35#include <mach/ep93xx_spi.h> 36 37#define SSPCR0 0x0000 38#define SSPCR0_MODE_SHIFT 6 39#define SSPCR0_SCR_SHIFT 8 40 41#define SSPCR1 0x0004 42#define SSPCR1_RIE BIT(0) 43#define SSPCR1_TIE BIT(1) 44#define SSPCR1_RORIE BIT(2) 45#define SSPCR1_LBM BIT(3) 46#define SSPCR1_SSE BIT(4) 47#define SSPCR1_MS BIT(5) 48#define SSPCR1_SOD BIT(6) 49 50#define SSPDR 0x0008 51 52#define SSPSR 0x000c 53#define SSPSR_TFE BIT(0) 54#define SSPSR_TNF BIT(1) 55#define SSPSR_RNE BIT(2) 56#define SSPSR_RFF BIT(3) 57#define SSPSR_BSY BIT(4) 58#define SSPCPSR 0x0010 59 60#define SSPIIR 0x0014 61#define SSPIIR_RIS BIT(0) 62#define SSPIIR_TIS BIT(1) 63#define SSPIIR_RORIS BIT(2) 64#define SSPICR SSPIIR 65 66/* timeout in milliseconds */ 67#define SPI_TIMEOUT 5 68/* maximum depth of RX/TX FIFO */ 69#define SPI_FIFO_SIZE 8 70 71/** 72 * struct ep93xx_spi - EP93xx SPI controller structure 73 * @lock: spinlock that protects concurrent accesses to fields @running, 74 * @current_msg and @msg_queue 75 * @pdev: pointer to platform device 76 * @clk: clock for the controller 77 * @regs_base: pointer to ioremap()'d registers 78 * @sspdr_phys: physical address of the SSPDR register 79 * @irq: IRQ number used by the driver 80 * @min_rate: minimum clock rate (in Hz) supported by the controller 81 * @max_rate: maximum clock rate (in Hz) supported by the controller 82 * @running: is the queue running 83 * @wq: workqueue used by the driver 84 * @msg_work: work that is queued for the driver 85 * @wait: wait here until given transfer is completed 86 * @msg_queue: queue for the messages 87 * @current_msg: message that is currently processed (or %NULL if none) 88 * @tx: current byte in transfer to transmit 89 * @rx: current byte in transfer to receive 90 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one 91 * frame decreases this level and sending one frame increases it. 92 * @dma_rx: RX DMA channel 93 * @dma_tx: TX DMA channel 94 * @dma_rx_data: RX parameters passed to the DMA engine 95 * @dma_tx_data: TX parameters passed to the DMA engine 96 * @rx_sgt: sg table for RX transfers 97 * @tx_sgt: sg table for TX transfers 98 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by 99 * the client 100 * 101 * This structure holds EP93xx SPI controller specific information. When 102 * @running is %true, driver accepts transfer requests from protocol drivers. 103 * @current_msg is used to hold pointer to the message that is currently 104 * processed. If @current_msg is %NULL, it means that no processing is going 105 * on. 106 * 107 * Most of the fields are only written once and they can be accessed without 108 * taking the @lock. Fields that are accessed concurrently are: @current_msg, 109 * @running, and @msg_queue. 110 */ 111struct ep93xx_spi { 112 spinlock_t lock; 113 const struct platform_device *pdev; 114 struct clk *clk; 115 void __iomem *regs_base; 116 unsigned long sspdr_phys; 117 int irq; 118 unsigned long min_rate; 119 unsigned long max_rate; 120 bool running; 121 struct workqueue_struct *wq; 122 struct work_struct msg_work; 123 struct completion wait; 124 struct list_head msg_queue; 125 struct spi_message *current_msg; 126 size_t tx; 127 size_t rx; 128 size_t fifo_level; 129 struct dma_chan *dma_rx; 130 struct dma_chan *dma_tx; 131 struct ep93xx_dma_data dma_rx_data; 132 struct ep93xx_dma_data dma_tx_data; 133 struct sg_table rx_sgt; 134 struct sg_table tx_sgt; 135 void *zeropage; 136}; 137 138/** 139 * struct ep93xx_spi_chip - SPI device hardware settings 140 * @spi: back pointer to the SPI device 141 * @rate: max rate in hz this chip supports 142 * @div_cpsr: cpsr (pre-scaler) divider 143 * @div_scr: scr divider 144 * @dss: bits per word (4 - 16 bits) 145 * @ops: private chip operations 146 * 147 * This structure is used to store hardware register specific settings for each 148 * SPI device. Settings are written to hardware by function 149 * ep93xx_spi_chip_setup(). 150 */ 151struct ep93xx_spi_chip { 152 const struct spi_device *spi; 153 unsigned long rate; 154 u8 div_cpsr; 155 u8 div_scr; 156 u8 dss; 157 struct ep93xx_spi_chip_ops *ops; 158}; 159 160/* converts bits per word to CR0.DSS value */ 161#define bits_per_word_to_dss(bpw) ((bpw) - 1) 162 163static inline void 164ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value) 165{ 166 __raw_writeb(value, espi->regs_base + reg); 167} 168 169static inline u8 170ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) 171{ 172 return __raw_readb(spi->regs_base + reg); 173} 174 175static inline void 176ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value) 177{ 178 __raw_writew(value, espi->regs_base + reg); 179} 180 181static inline u16 182ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) 183{ 184 return __raw_readw(spi->regs_base + reg); 185} 186 187static int ep93xx_spi_enable(const struct ep93xx_spi *espi) 188{ 189 u8 regval; 190 int err; 191 192 err = clk_enable(espi->clk); 193 if (err) 194 return err; 195 196 regval = ep93xx_spi_read_u8(espi, SSPCR1); 197 regval |= SSPCR1_SSE; 198 ep93xx_spi_write_u8(espi, SSPCR1, regval); 199 200 return 0; 201} 202 203static void ep93xx_spi_disable(const struct ep93xx_spi *espi) 204{ 205 u8 regval; 206 207 regval = ep93xx_spi_read_u8(espi, SSPCR1); 208 regval &= ~SSPCR1_SSE; 209 ep93xx_spi_write_u8(espi, SSPCR1, regval); 210 211 clk_disable(espi->clk); 212} 213 214static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi) 215{ 216 u8 regval; 217 218 regval = ep93xx_spi_read_u8(espi, SSPCR1); 219 regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 220 ep93xx_spi_write_u8(espi, SSPCR1, regval); 221} 222 223static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi) 224{ 225 u8 regval; 226 227 regval = ep93xx_spi_read_u8(espi, SSPCR1); 228 regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 229 ep93xx_spi_write_u8(espi, SSPCR1, regval); 230} 231 232/** 233 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors 234 * @espi: ep93xx SPI controller struct 235 * @chip: divisors are calculated for this chip 236 * @rate: desired SPI output clock rate 237 * 238 * Function calculates cpsr (clock pre-scaler) and scr divisors based on 239 * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If, 240 * for some reason, divisors cannot be calculated nothing is stored and 241 * %-EINVAL is returned. 242 */ 243static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, 244 struct ep93xx_spi_chip *chip, 245 unsigned long rate) 246{ 247 unsigned long spi_clk_rate = clk_get_rate(espi->clk); 248 int cpsr, scr; 249 250 /* 251 * Make sure that max value is between values supported by the 252 * controller. Note that minimum value is already checked in 253 * ep93xx_spi_transfer(). 254 */ 255 rate = clamp(rate, espi->min_rate, espi->max_rate); 256 257 /* 258 * Calculate divisors so that we can get speed according the 259 * following formula: 260 * rate = spi_clock_rate / (cpsr * (1 + scr)) 261 * 262 * cpsr must be even number and starts from 2, scr can be any number 263 * between 0 and 255. 264 */ 265 for (cpsr = 2; cpsr <= 254; cpsr += 2) { 266 for (scr = 0; scr <= 255; scr++) { 267 if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { 268 chip->div_scr = (u8)scr; 269 chip->div_cpsr = (u8)cpsr; 270 return 0; 271 } 272 } 273 } 274 275 return -EINVAL; 276} 277 278static void ep93xx_spi_cs_control(struct spi_device *spi, bool control) 279{ 280 struct ep93xx_spi_chip *chip = spi_get_ctldata(spi); 281 int value = (spi->mode & SPI_CS_HIGH) ? control : !control; 282 283 if (chip->ops && chip->ops->cs_control) 284 chip->ops->cs_control(spi, value); 285} 286 287/** 288 * ep93xx_spi_setup() - setup an SPI device 289 * @spi: SPI device to setup 290 * 291 * This function sets up SPI device mode, speed etc. Can be called multiple 292 * times for a single device. Returns %0 in case of success, negative error in 293 * case of failure. When this function returns success, the device is 294 * deselected. 295 */ 296static int ep93xx_spi_setup(struct spi_device *spi) 297{ 298 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); 299 struct ep93xx_spi_chip *chip; 300 301 if (spi->bits_per_word < 4 || spi->bits_per_word > 16) { 302 dev_err(&espi->pdev->dev, "invalid bits per word %d\n", 303 spi->bits_per_word); 304 return -EINVAL; 305 } 306 307 chip = spi_get_ctldata(spi); 308 if (!chip) { 309 dev_dbg(&espi->pdev->dev, "initial setup for %s\n", 310 spi->modalias); 311 312 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 313 if (!chip) 314 return -ENOMEM; 315 316 chip->spi = spi; 317 chip->ops = spi->controller_data; 318 319 if (chip->ops && chip->ops->setup) { 320 int ret = chip->ops->setup(spi); 321 if (ret) { 322 kfree(chip); 323 return ret; 324 } 325 } 326 327 spi_set_ctldata(spi, chip); 328 } 329 330 if (spi->max_speed_hz != chip->rate) { 331 int err; 332 333 err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz); 334 if (err != 0) { 335 spi_set_ctldata(spi, NULL); 336 kfree(chip); 337 return err; 338 } 339 chip->rate = spi->max_speed_hz; 340 } 341 342 chip->dss = bits_per_word_to_dss(spi->bits_per_word); 343 344 ep93xx_spi_cs_control(spi, false); 345 return 0; 346} 347 348/** 349 * ep93xx_spi_transfer() - queue message to be transferred 350 * @spi: target SPI device 351 * @msg: message to be transferred 352 * 353 * This function is called by SPI device drivers when they are going to transfer 354 * a new message. It simply puts the message in the queue and schedules 355 * workqueue to perform the actual transfer later on. 356 * 357 * Returns %0 on success and negative error in case of failure. 358 */ 359static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg) 360{ 361 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); 362 struct spi_transfer *t; 363 unsigned long flags; 364 365 if (!msg || !msg->complete) 366 return -EINVAL; 367 368 /* first validate each transfer */ 369 list_for_each_entry(t, &msg->transfers, transfer_list) { 370 if (t->bits_per_word) { 371 if (t->bits_per_word < 4 || t->bits_per_word > 16) 372 return -EINVAL; 373 } 374 if (t->speed_hz && t->speed_hz < espi->min_rate) 375 return -EINVAL; 376 } 377 378 /* 379 * Now that we own the message, let's initialize it so that it is 380 * suitable for us. We use @msg->status to signal whether there was 381 * error in transfer and @msg->state is used to hold pointer to the 382 * current transfer (or %NULL if no active current transfer). 383 */ 384 msg->state = NULL; 385 msg->status = 0; 386 msg->actual_length = 0; 387 388 spin_lock_irqsave(&espi->lock, flags); 389 if (!espi->running) { 390 spin_unlock_irqrestore(&espi->lock, flags); 391 return -ESHUTDOWN; 392 } 393 list_add_tail(&msg->queue, &espi->msg_queue); 394 queue_work(espi->wq, &espi->msg_work); 395 spin_unlock_irqrestore(&espi->lock, flags); 396 397 return 0; 398} 399 400/** 401 * ep93xx_spi_cleanup() - cleans up master controller specific state 402 * @spi: SPI device to cleanup 403 * 404 * This function releases master controller specific state for given @spi 405 * device. 406 */ 407static void ep93xx_spi_cleanup(struct spi_device *spi) 408{ 409 struct ep93xx_spi_chip *chip; 410 411 chip = spi_get_ctldata(spi); 412 if (chip) { 413 if (chip->ops && chip->ops->cleanup) 414 chip->ops->cleanup(spi); 415 spi_set_ctldata(spi, NULL); 416 kfree(chip); 417 } 418} 419 420/** 421 * ep93xx_spi_chip_setup() - configures hardware according to given @chip 422 * @espi: ep93xx SPI controller struct 423 * @chip: chip specific settings 424 * 425 * This function sets up the actual hardware registers with settings given in 426 * @chip. Note that no validation is done so make sure that callers validate 427 * settings before calling this. 428 */ 429static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, 430 const struct ep93xx_spi_chip *chip) 431{ 432 u16 cr0; 433 434 cr0 = chip->div_scr << SSPCR0_SCR_SHIFT; 435 cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; 436 cr0 |= chip->dss; 437 438 dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", 439 chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss); 440 dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); 441 442 ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr); 443 ep93xx_spi_write_u16(espi, SSPCR0, cr0); 444} 445 446static inline int bits_per_word(const struct ep93xx_spi *espi) 447{ 448 struct spi_message *msg = espi->current_msg; 449 struct spi_transfer *t = msg->state; 450 451 return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word; 452} 453 454static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) 455{ 456 if (bits_per_word(espi) > 8) { 457 u16 tx_val = 0; 458 459 if (t->tx_buf) 460 tx_val = ((u16 *)t->tx_buf)[espi->tx]; 461 ep93xx_spi_write_u16(espi, SSPDR, tx_val); 462 espi->tx += sizeof(tx_val); 463 } else { 464 u8 tx_val = 0; 465 466 if (t->tx_buf) 467 tx_val = ((u8 *)t->tx_buf)[espi->tx]; 468 ep93xx_spi_write_u8(espi, SSPDR, tx_val); 469 espi->tx += sizeof(tx_val); 470 } 471} 472 473static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) 474{ 475 if (bits_per_word(espi) > 8) { 476 u16 rx_val; 477 478 rx_val = ep93xx_spi_read_u16(espi, SSPDR); 479 if (t->rx_buf) 480 ((u16 *)t->rx_buf)[espi->rx] = rx_val; 481 espi->rx += sizeof(rx_val); 482 } else { 483 u8 rx_val; 484 485 rx_val = ep93xx_spi_read_u8(espi, SSPDR); 486 if (t->rx_buf) 487 ((u8 *)t->rx_buf)[espi->rx] = rx_val; 488 espi->rx += sizeof(rx_val); 489 } 490} 491 492/** 493 * ep93xx_spi_read_write() - perform next RX/TX transfer 494 * @espi: ep93xx SPI controller struct 495 * 496 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If 497 * called several times, the whole transfer will be completed. Returns 498 * %-EINPROGRESS when current transfer was not yet completed otherwise %0. 499 * 500 * When this function is finished, RX FIFO should be empty and TX FIFO should be 501 * full. 502 */ 503static int ep93xx_spi_read_write(struct ep93xx_spi *espi) 504{ 505 struct spi_message *msg = espi->current_msg; 506 struct spi_transfer *t = msg->state; 507 508 /* read as long as RX FIFO has frames in it */ 509 while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) { 510 ep93xx_do_read(espi, t); 511 espi->fifo_level--; 512 } 513 514 /* write as long as TX FIFO has room */ 515 while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { 516 ep93xx_do_write(espi, t); 517 espi->fifo_level++; 518 } 519 520 if (espi->rx == t->len) 521 return 0; 522 523 return -EINPROGRESS; 524} 525 526static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi) 527{ 528 /* 529 * Now everything is set up for the current transfer. We prime the TX 530 * FIFO, enable interrupts, and wait for the transfer to complete. 531 */ 532 if (ep93xx_spi_read_write(espi)) { 533 ep93xx_spi_enable_interrupts(espi); 534 wait_for_completion(&espi->wait); 535 } 536} 537 538/** 539 * ep93xx_spi_dma_prepare() - prepares a DMA transfer 540 * @espi: ep93xx SPI controller struct 541 * @dir: DMA transfer direction 542 * 543 * Function configures the DMA, maps the buffer and prepares the DMA 544 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR 545 * in case of failure. 546 */ 547static struct dma_async_tx_descriptor * 548ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) 549{ 550 struct spi_transfer *t = espi->current_msg->state; 551 struct dma_async_tx_descriptor *txd; 552 enum dma_slave_buswidth buswidth; 553 struct dma_slave_config conf; 554 enum dma_transfer_direction slave_dirn; 555 struct scatterlist *sg; 556 struct sg_table *sgt; 557 struct dma_chan *chan; 558 const void *buf, *pbuf; 559 size_t len = t->len; 560 int i, ret, nents; 561 562 if (bits_per_word(espi) > 8) 563 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 564 else 565 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 566 567 memset(&conf, 0, sizeof(conf)); 568 conf.direction = dir; 569 570 if (dir == DMA_FROM_DEVICE) { 571 chan = espi->dma_rx; 572 buf = t->rx_buf; 573 sgt = &espi->rx_sgt; 574 575 conf.src_addr = espi->sspdr_phys; 576 conf.src_addr_width = buswidth; 577 slave_dirn = DMA_DEV_TO_MEM; 578 } else { 579 chan = espi->dma_tx; 580 buf = t->tx_buf; 581 sgt = &espi->tx_sgt; 582 583 conf.dst_addr = espi->sspdr_phys; 584 conf.dst_addr_width = buswidth; 585 slave_dirn = DMA_MEM_TO_DEV; 586 } 587 588 ret = dmaengine_slave_config(chan, &conf); 589 if (ret) 590 return ERR_PTR(ret); 591 592 /* 593 * We need to split the transfer into PAGE_SIZE'd chunks. This is 594 * because we are using @espi->zeropage to provide a zero RX buffer 595 * for the TX transfers and we have only allocated one page for that. 596 * 597 * For performance reasons we allocate a new sg_table only when 598 * needed. Otherwise we will re-use the current one. Eventually the 599 * last sg_table is released in ep93xx_spi_release_dma(). 600 */ 601 602 nents = DIV_ROUND_UP(len, PAGE_SIZE); 603 if (nents != sgt->nents) { 604 sg_free_table(sgt); 605 606 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 607 if (ret) 608 return ERR_PTR(ret); 609 } 610 611 pbuf = buf; 612 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 613 size_t bytes = min_t(size_t, len, PAGE_SIZE); 614 615 if (buf) { 616 sg_set_page(sg, virt_to_page(pbuf), bytes, 617 offset_in_page(pbuf)); 618 } else { 619 sg_set_page(sg, virt_to_page(espi->zeropage), 620 bytes, 0); 621 } 622 623 pbuf += bytes; 624 len -= bytes; 625 } 626 627 if (WARN_ON(len)) { 628 dev_warn(&espi->pdev->dev, "len = %d expected 0!", len); 629 return ERR_PTR(-EINVAL); 630 } 631 632 nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 633 if (!nents) 634 return ERR_PTR(-ENOMEM); 635 636 txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents, 637 slave_dirn, DMA_CTRL_ACK); 638 if (!txd) { 639 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 640 return ERR_PTR(-ENOMEM); 641 } 642 return txd; 643} 644 645/** 646 * ep93xx_spi_dma_finish() - finishes with a DMA transfer 647 * @espi: ep93xx SPI controller struct 648 * @dir: DMA transfer direction 649 * 650 * Function finishes with the DMA transfer. After this, the DMA buffer is 651 * unmapped. 652 */ 653static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, 654 enum dma_data_direction dir) 655{ 656 struct dma_chan *chan; 657 struct sg_table *sgt; 658 659 if (dir == DMA_FROM_DEVICE) { 660 chan = espi->dma_rx; 661 sgt = &espi->rx_sgt; 662 } else { 663 chan = espi->dma_tx; 664 sgt = &espi->tx_sgt; 665 } 666 667 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 668} 669 670static void ep93xx_spi_dma_callback(void *callback_param) 671{ 672 complete(callback_param); 673} 674 675static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) 676{ 677 struct spi_message *msg = espi->current_msg; 678 struct dma_async_tx_descriptor *rxd, *txd; 679 680 rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE); 681 if (IS_ERR(rxd)) { 682 dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); 683 msg->status = PTR_ERR(rxd); 684 return; 685 } 686 687 txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE); 688 if (IS_ERR(txd)) { 689 ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); 690 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); 691 msg->status = PTR_ERR(txd); 692 return; 693 } 694 695 /* We are ready when RX is done */ 696 rxd->callback = ep93xx_spi_dma_callback; 697 rxd->callback_param = &espi->wait; 698 699 /* Now submit both descriptors and wait while they finish */ 700 dmaengine_submit(rxd); 701 dmaengine_submit(txd); 702 703 dma_async_issue_pending(espi->dma_rx); 704 dma_async_issue_pending(espi->dma_tx); 705 706 wait_for_completion(&espi->wait); 707 708 ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE); 709 ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); 710} 711 712/** 713 * ep93xx_spi_process_transfer() - processes one SPI transfer 714 * @espi: ep93xx SPI controller struct 715 * @msg: current message 716 * @t: transfer to process 717 * 718 * This function processes one SPI transfer given in @t. Function waits until 719 * transfer is complete (may sleep) and updates @msg->status based on whether 720 * transfer was successfully processed or not. 721 */ 722static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, 723 struct spi_message *msg, 724 struct spi_transfer *t) 725{ 726 struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); 727 728 msg->state = t; 729 730 /* 731 * Handle any transfer specific settings if needed. We use 732 * temporary chip settings here and restore original later when 733 * the transfer is finished. 734 */ 735 if (t->speed_hz || t->bits_per_word) { 736 struct ep93xx_spi_chip tmp_chip = *chip; 737 738 if (t->speed_hz) { 739 int err; 740 741 err = ep93xx_spi_calc_divisors(espi, &tmp_chip, 742 t->speed_hz); 743 if (err) { 744 dev_err(&espi->pdev->dev, 745 "failed to adjust speed\n"); 746 msg->status = err; 747 return; 748 } 749 } 750 751 if (t->bits_per_word) 752 tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word); 753 754 /* 755 * Set up temporary new hw settings for this transfer. 756 */ 757 ep93xx_spi_chip_setup(espi, &tmp_chip); 758 } 759 760 espi->rx = 0; 761 espi->tx = 0; 762 763 /* 764 * There is no point of setting up DMA for the transfers which will 765 * fit into the FIFO and can be transferred with a single interrupt. 766 * So in these cases we will be using PIO and don't bother for DMA. 767 */ 768 if (espi->dma_rx && t->len > SPI_FIFO_SIZE) 769 ep93xx_spi_dma_transfer(espi); 770 else 771 ep93xx_spi_pio_transfer(espi); 772 773 /* 774 * In case of error during transmit, we bail out from processing 775 * the message. 776 */ 777 if (msg->status) 778 return; 779 780 msg->actual_length += t->len; 781 782 /* 783 * After this transfer is finished, perform any possible 784 * post-transfer actions requested by the protocol driver. 785 */ 786 if (t->delay_usecs) { 787 set_current_state(TASK_UNINTERRUPTIBLE); 788 schedule_timeout(usecs_to_jiffies(t->delay_usecs)); 789 } 790 if (t->cs_change) { 791 if (!list_is_last(&t->transfer_list, &msg->transfers)) { 792 /* 793 * In case protocol driver is asking us to drop the 794 * chipselect briefly, we let the scheduler to handle 795 * any "delay" here. 796 */ 797 ep93xx_spi_cs_control(msg->spi, false); 798 cond_resched(); 799 ep93xx_spi_cs_control(msg->spi, true); 800 } 801 } 802 803 if (t->speed_hz || t->bits_per_word) 804 ep93xx_spi_chip_setup(espi, chip); 805} 806 807/* 808 * ep93xx_spi_process_message() - process one SPI message 809 * @espi: ep93xx SPI controller struct 810 * @msg: message to process 811 * 812 * This function processes a single SPI message. We go through all transfers in 813 * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is 814 * asserted during the whole message (unless per transfer cs_change is set). 815 * 816 * @msg->status contains %0 in case of success or negative error code in case of 817 * failure. 818 */ 819static void ep93xx_spi_process_message(struct ep93xx_spi *espi, 820 struct spi_message *msg) 821{ 822 unsigned long timeout; 823 struct spi_transfer *t; 824 int err; 825 826 /* 827 * Enable the SPI controller and its clock. 828 */ 829 err = ep93xx_spi_enable(espi); 830 if (err) { 831 dev_err(&espi->pdev->dev, "failed to enable SPI controller\n"); 832 msg->status = err; 833 return; 834 } 835 836 /* 837 * Just to be sure: flush any data from RX FIFO. 838 */ 839 timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); 840 while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) { 841 if (time_after(jiffies, timeout)) { 842 dev_warn(&espi->pdev->dev, 843 "timeout while flushing RX FIFO\n"); 844 msg->status = -ETIMEDOUT; 845 return; 846 } 847 ep93xx_spi_read_u16(espi, SSPDR); 848 } 849 850 /* 851 * We explicitly handle FIFO level. This way we don't have to check TX 852 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. 853 */ 854 espi->fifo_level = 0; 855 856 /* 857 * Update SPI controller registers according to spi device and assert 858 * the chipselect. 859 */ 860 ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi)); 861 ep93xx_spi_cs_control(msg->spi, true); 862 863 list_for_each_entry(t, &msg->transfers, transfer_list) { 864 ep93xx_spi_process_transfer(espi, msg, t); 865 if (msg->status) 866 break; 867 } 868 869 /* 870 * Now the whole message is transferred (or failed for some reason). We 871 * deselect the device and disable the SPI controller. 872 */ 873 ep93xx_spi_cs_control(msg->spi, false); 874 ep93xx_spi_disable(espi); 875} 876 877#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work)) 878 879/** 880 * ep93xx_spi_work() - EP93xx SPI workqueue worker function 881 * @work: work struct 882 * 883 * Workqueue worker function. This function is called when there are new 884 * SPI messages to be processed. Message is taken out from the queue and then 885 * passed to ep93xx_spi_process_message(). 886 * 887 * After message is transferred, protocol driver is notified by calling 888 * @msg->complete(). In case of error, @msg->status is set to negative error 889 * number, otherwise it contains zero (and @msg->actual_length is updated). 890 */ 891static void ep93xx_spi_work(struct work_struct *work) 892{ 893 struct ep93xx_spi *espi = work_to_espi(work); 894 struct spi_message *msg; 895 896 spin_lock_irq(&espi->lock); 897 if (!espi->running || espi->current_msg || 898 list_empty(&espi->msg_queue)) { 899 spin_unlock_irq(&espi->lock); 900 return; 901 } 902 msg = list_first_entry(&espi->msg_queue, struct spi_message, queue); 903 list_del_init(&msg->queue); 904 espi->current_msg = msg; 905 spin_unlock_irq(&espi->lock); 906 907 ep93xx_spi_process_message(espi, msg); 908 909 /* 910 * Update the current message and re-schedule ourselves if there are 911 * more messages in the queue. 912 */ 913 spin_lock_irq(&espi->lock); 914 espi->current_msg = NULL; 915 if (espi->running && !list_empty(&espi->msg_queue)) 916 queue_work(espi->wq, &espi->msg_work); 917 spin_unlock_irq(&espi->lock); 918 919 /* notify the protocol driver that we are done with this message */ 920 msg->complete(msg->context); 921} 922 923static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) 924{ 925 struct ep93xx_spi *espi = dev_id; 926 u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR); 927 928 /* 929 * If we got ROR (receive overrun) interrupt we know that something is 930 * wrong. Just abort the message. 931 */ 932 if (unlikely(irq_status & SSPIIR_RORIS)) { 933 /* clear the overrun interrupt */ 934 ep93xx_spi_write_u8(espi, SSPICR, 0); 935 dev_warn(&espi->pdev->dev, 936 "receive overrun, aborting the message\n"); 937 espi->current_msg->status = -EIO; 938 } else { 939 /* 940 * Interrupt is either RX (RIS) or TX (TIS). For both cases we 941 * simply execute next data transfer. 942 */ 943 if (ep93xx_spi_read_write(espi)) { 944 /* 945 * In normal case, there still is some processing left 946 * for current transfer. Let's wait for the next 947 * interrupt then. 948 */ 949 return IRQ_HANDLED; 950 } 951 } 952 953 /* 954 * Current transfer is finished, either with error or with success. In 955 * any case we disable interrupts and notify the worker to handle 956 * any post-processing of the message. 957 */ 958 ep93xx_spi_disable_interrupts(espi); 959 complete(&espi->wait); 960 return IRQ_HANDLED; 961} 962 963static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param) 964{ 965 if (ep93xx_dma_chan_is_m2p(chan)) 966 return false; 967 968 chan->private = filter_param; 969 return true; 970} 971 972static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) 973{ 974 dma_cap_mask_t mask; 975 int ret; 976 977 espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL); 978 if (!espi->zeropage) 979 return -ENOMEM; 980 981 dma_cap_zero(mask); 982 dma_cap_set(DMA_SLAVE, mask); 983 984 espi->dma_rx_data.port = EP93XX_DMA_SSP; 985 espi->dma_rx_data.direction = DMA_DEV_TO_MEM; 986 espi->dma_rx_data.name = "ep93xx-spi-rx"; 987 988 espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, 989 &espi->dma_rx_data); 990 if (!espi->dma_rx) { 991 ret = -ENODEV; 992 goto fail_free_page; 993 } 994 995 espi->dma_tx_data.port = EP93XX_DMA_SSP; 996 espi->dma_tx_data.direction = DMA_MEM_TO_DEV; 997 espi->dma_tx_data.name = "ep93xx-spi-tx"; 998 999 espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, 1000 &espi->dma_tx_data); 1001 if (!espi->dma_tx) { 1002 ret = -ENODEV; 1003 goto fail_release_rx; 1004 } 1005 1006 return 0; 1007 1008fail_release_rx: 1009 dma_release_channel(espi->dma_rx); 1010 espi->dma_rx = NULL; 1011fail_free_page: 1012 free_page((unsigned long)espi->zeropage); 1013 1014 return ret; 1015} 1016 1017static void ep93xx_spi_release_dma(struct ep93xx_spi *espi) 1018{ 1019 if (espi->dma_rx) { 1020 dma_release_channel(espi->dma_rx); 1021 sg_free_table(&espi->rx_sgt); 1022 } 1023 if (espi->dma_tx) { 1024 dma_release_channel(espi->dma_tx); 1025 sg_free_table(&espi->tx_sgt); 1026 } 1027 1028 if (espi->zeropage) 1029 free_page((unsigned long)espi->zeropage); 1030} 1031 1032static int __devinit ep93xx_spi_probe(struct platform_device *pdev) 1033{ 1034 struct spi_master *master; 1035 struct ep93xx_spi_info *info; 1036 struct ep93xx_spi *espi; 1037 struct resource *res; 1038 int error; 1039 1040 info = pdev->dev.platform_data; 1041 1042 master = spi_alloc_master(&pdev->dev, sizeof(*espi)); 1043 if (!master) { 1044 dev_err(&pdev->dev, "failed to allocate spi master\n"); 1045 return -ENOMEM; 1046 } 1047 1048 master->setup = ep93xx_spi_setup; 1049 master->transfer = ep93xx_spi_transfer; 1050 master->cleanup = ep93xx_spi_cleanup; 1051 master->bus_num = pdev->id; 1052 master->num_chipselect = info->num_chipselect; 1053 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1054 1055 platform_set_drvdata(pdev, master); 1056 1057 espi = spi_master_get_devdata(master); 1058 1059 espi->clk = clk_get(&pdev->dev, NULL); 1060 if (IS_ERR(espi->clk)) { 1061 dev_err(&pdev->dev, "unable to get spi clock\n"); 1062 error = PTR_ERR(espi->clk); 1063 goto fail_release_master; 1064 } 1065 1066 spin_lock_init(&espi->lock); 1067 init_completion(&espi->wait); 1068 1069 /* 1070 * Calculate maximum and minimum supported clock rates 1071 * for the controller. 1072 */ 1073 espi->max_rate = clk_get_rate(espi->clk) / 2; 1074 espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); 1075 espi->pdev = pdev; 1076 1077 espi->irq = platform_get_irq(pdev, 0); 1078 if (espi->irq < 0) { 1079 error = -EBUSY; 1080 dev_err(&pdev->dev, "failed to get irq resources\n"); 1081 goto fail_put_clock; 1082 } 1083 1084 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1085 if (!res) { 1086 dev_err(&pdev->dev, "unable to get iomem resource\n"); 1087 error = -ENODEV; 1088 goto fail_put_clock; 1089 } 1090 1091 res = request_mem_region(res->start, resource_size(res), pdev->name); 1092 if (!res) { 1093 dev_err(&pdev->dev, "unable to request iomem resources\n"); 1094 error = -EBUSY; 1095 goto fail_put_clock; 1096 } 1097 1098 espi->sspdr_phys = res->start + SSPDR; 1099 espi->regs_base = ioremap(res->start, resource_size(res)); 1100 if (!espi->regs_base) { 1101 dev_err(&pdev->dev, "failed to map resources\n"); 1102 error = -ENODEV; 1103 goto fail_free_mem; 1104 } 1105 1106 error = request_irq(espi->irq, ep93xx_spi_interrupt, 0, 1107 "ep93xx-spi", espi); 1108 if (error) { 1109 dev_err(&pdev->dev, "failed to request irq\n"); 1110 goto fail_unmap_regs; 1111 } 1112 1113 if (info->use_dma && ep93xx_spi_setup_dma(espi)) 1114 dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); 1115 1116 espi->wq = create_singlethread_workqueue("ep93xx_spid"); 1117 if (!espi->wq) { 1118 dev_err(&pdev->dev, "unable to create workqueue\n"); 1119 goto fail_free_dma; 1120 } 1121 INIT_WORK(&espi->msg_work, ep93xx_spi_work); 1122 INIT_LIST_HEAD(&espi->msg_queue); 1123 espi->running = true; 1124 1125 /* make sure that the hardware is disabled */ 1126 ep93xx_spi_write_u8(espi, SSPCR1, 0); 1127 1128 error = spi_register_master(master); 1129 if (error) { 1130 dev_err(&pdev->dev, "failed to register SPI master\n"); 1131 goto fail_free_queue; 1132 } 1133 1134 dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", 1135 (unsigned long)res->start, espi->irq); 1136 1137 return 0; 1138 1139fail_free_queue: 1140 destroy_workqueue(espi->wq); 1141fail_free_dma: 1142 ep93xx_spi_release_dma(espi); 1143 free_irq(espi->irq, espi); 1144fail_unmap_regs: 1145 iounmap(espi->regs_base); 1146fail_free_mem: 1147 release_mem_region(res->start, resource_size(res)); 1148fail_put_clock: 1149 clk_put(espi->clk); 1150fail_release_master: 1151 spi_master_put(master); 1152 platform_set_drvdata(pdev, NULL); 1153 1154 return error; 1155} 1156 1157static int __devexit ep93xx_spi_remove(struct platform_device *pdev) 1158{ 1159 struct spi_master *master = platform_get_drvdata(pdev); 1160 struct ep93xx_spi *espi = spi_master_get_devdata(master); 1161 struct resource *res; 1162 1163 spin_lock_irq(&espi->lock); 1164 espi->running = false; 1165 spin_unlock_irq(&espi->lock); 1166 1167 destroy_workqueue(espi->wq); 1168 1169 /* 1170 * Complete remaining messages with %-ESHUTDOWN status. 1171 */ 1172 spin_lock_irq(&espi->lock); 1173 while (!list_empty(&espi->msg_queue)) { 1174 struct spi_message *msg; 1175 1176 msg = list_first_entry(&espi->msg_queue, 1177 struct spi_message, queue); 1178 list_del_init(&msg->queue); 1179 msg->status = -ESHUTDOWN; 1180 spin_unlock_irq(&espi->lock); 1181 msg->complete(msg->context); 1182 spin_lock_irq(&espi->lock); 1183 } 1184 spin_unlock_irq(&espi->lock); 1185 1186 ep93xx_spi_release_dma(espi); 1187 free_irq(espi->irq, espi); 1188 iounmap(espi->regs_base); 1189 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1190 release_mem_region(res->start, resource_size(res)); 1191 clk_put(espi->clk); 1192 platform_set_drvdata(pdev, NULL); 1193 1194 spi_unregister_master(master); 1195 return 0; 1196} 1197 1198static struct platform_driver ep93xx_spi_driver = { 1199 .driver = { 1200 .name = "ep93xx-spi", 1201 .owner = THIS_MODULE, 1202 }, 1203 .probe = ep93xx_spi_probe, 1204 .remove = __devexit_p(ep93xx_spi_remove), 1205}; 1206module_platform_driver(ep93xx_spi_driver); 1207 1208MODULE_DESCRIPTION("EP93xx SPI Controller driver"); 1209MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); 1210MODULE_LICENSE("GPL"); 1211MODULE_ALIAS("platform:ep93xx-spi"); 1212