mmci.c revision c8ebae37034c0ead62eb4df8ef88e999ddb8d5cf
1/* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/module.h> 12#include <linux/moduleparam.h> 13#include <linux/init.h> 14#include <linux/ioport.h> 15#include <linux/device.h> 16#include <linux/interrupt.h> 17#include <linux/kernel.h> 18#include <linux/delay.h> 19#include <linux/err.h> 20#include <linux/highmem.h> 21#include <linux/log2.h> 22#include <linux/mmc/host.h> 23#include <linux/mmc/card.h> 24#include <linux/amba/bus.h> 25#include <linux/clk.h> 26#include <linux/scatterlist.h> 27#include <linux/gpio.h> 28#include <linux/regulator/consumer.h> 29#include <linux/dmaengine.h> 30#include <linux/dma-mapping.h> 31#include <linux/amba/mmci.h> 32 33#include <asm/div64.h> 34#include <asm/io.h> 35#include <asm/sizes.h> 36 37#include "mmci.h" 38 39#define DRIVER_NAME "mmci-pl18x" 40 41static unsigned int fmax = 515633; 42 43/** 44 * struct variant_data - MMCI variant-specific quirks 45 * @clkreg: default value for MCICLOCK register 46 * @clkreg_enable: enable value for MMCICLOCK register 47 * @datalength_bits: number of bits in the MMCIDATALENGTH register 48 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 49 * is asserted (likewise for RX) 50 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 51 * is asserted (likewise for RX) 52 * @sdio: variant supports SDIO 53 * @st_clkdiv: true if using a ST-specific clock divider algorithm 54 */ 55struct variant_data { 56 unsigned int clkreg; 57 unsigned int clkreg_enable; 58 unsigned int datalength_bits; 59 unsigned int fifosize; 60 unsigned int fifohalfsize; 61 bool sdio; 62 bool st_clkdiv; 63}; 64 65static struct variant_data variant_arm = { 66 .fifosize = 16 * 4, 67 .fifohalfsize = 8 * 4, 68 .datalength_bits = 16, 69}; 70 71static struct variant_data variant_u300 = { 72 .fifosize = 16 * 4, 73 .fifohalfsize = 8 * 4, 74 .clkreg_enable = 1 << 13, /* HWFCEN */ 75 .datalength_bits = 16, 76 .sdio = true, 77}; 78 79static struct variant_data variant_ux500 = { 80 .fifosize = 30 * 4, 81 .fifohalfsize = 8 * 4, 82 .clkreg = MCI_CLK_ENABLE, 83 .clkreg_enable = 1 << 14, /* HWFCEN */ 84 .datalength_bits = 24, 85 .sdio = true, 86 .st_clkdiv = true, 87}; 88 89/* 90 * This must be called with host->lock held 91 */ 92static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 93{ 94 struct variant_data *variant = host->variant; 95 u32 clk = variant->clkreg; 96 97 if (desired) { 98 if (desired >= host->mclk) { 99 clk = MCI_CLK_BYPASS; 100 host->cclk = host->mclk; 101 } else if (variant->st_clkdiv) { 102 /* 103 * DB8500 TRM says f = mclk / (clkdiv + 2) 104 * => clkdiv = (mclk / f) - 2 105 * Round the divider up so we don't exceed the max 106 * frequency 107 */ 108 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 109 if (clk >= 256) 110 clk = 255; 111 host->cclk = host->mclk / (clk + 2); 112 } else { 113 /* 114 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 115 * => clkdiv = mclk / (2 * f) - 1 116 */ 117 clk = host->mclk / (2 * desired) - 1; 118 if (clk >= 256) 119 clk = 255; 120 host->cclk = host->mclk / (2 * (clk + 1)); 121 } 122 123 clk |= variant->clkreg_enable; 124 clk |= MCI_CLK_ENABLE; 125 /* This hasn't proven to be worthwhile */ 126 /* clk |= MCI_CLK_PWRSAVE; */ 127 } 128 129 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 130 clk |= MCI_4BIT_BUS; 131 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 132 clk |= MCI_ST_8BIT_BUS; 133 134 writel(clk, host->base + MMCICLOCK); 135} 136 137static void 138mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 139{ 140 writel(0, host->base + MMCICOMMAND); 141 142 BUG_ON(host->data); 143 144 host->mrq = NULL; 145 host->cmd = NULL; 146 147 /* 148 * Need to drop the host lock here; mmc_request_done may call 149 * back into the driver... 150 */ 151 spin_unlock(&host->lock); 152 mmc_request_done(host->mmc, mrq); 153 spin_lock(&host->lock); 154} 155 156static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 157{ 158 void __iomem *base = host->base; 159 160 if (host->singleirq) { 161 unsigned int mask0 = readl(base + MMCIMASK0); 162 163 mask0 &= ~MCI_IRQ1MASK; 164 mask0 |= mask; 165 166 writel(mask0, base + MMCIMASK0); 167 } 168 169 writel(mask, base + MMCIMASK1); 170} 171 172static void mmci_stop_data(struct mmci_host *host) 173{ 174 writel(0, host->base + MMCIDATACTRL); 175 mmci_set_mask1(host, 0); 176 host->data = NULL; 177} 178 179static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 180{ 181 unsigned int flags = SG_MITER_ATOMIC; 182 183 if (data->flags & MMC_DATA_READ) 184 flags |= SG_MITER_TO_SG; 185 else 186 flags |= SG_MITER_FROM_SG; 187 188 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 189} 190 191/* 192 * All the DMA operation mode stuff goes inside this ifdef. 193 * This assumes that you have a generic DMA device interface, 194 * no custom DMA interfaces are supported. 195 */ 196#ifdef CONFIG_DMA_ENGINE 197static void __devinit mmci_dma_setup(struct mmci_host *host) 198{ 199 struct mmci_platform_data *plat = host->plat; 200 const char *rxname, *txname; 201 dma_cap_mask_t mask; 202 203 if (!plat || !plat->dma_filter) { 204 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 205 return; 206 } 207 208 /* Try to acquire a generic DMA engine slave channel */ 209 dma_cap_zero(mask); 210 dma_cap_set(DMA_SLAVE, mask); 211 212 /* 213 * If only an RX channel is specified, the driver will 214 * attempt to use it bidirectionally, however if it is 215 * is specified but cannot be located, DMA will be disabled. 216 */ 217 if (plat->dma_rx_param) { 218 host->dma_rx_channel = dma_request_channel(mask, 219 plat->dma_filter, 220 plat->dma_rx_param); 221 /* E.g if no DMA hardware is present */ 222 if (!host->dma_rx_channel) 223 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 224 } 225 226 if (plat->dma_tx_param) { 227 host->dma_tx_channel = dma_request_channel(mask, 228 plat->dma_filter, 229 plat->dma_tx_param); 230 if (!host->dma_tx_channel) 231 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 232 } else { 233 host->dma_tx_channel = host->dma_rx_channel; 234 } 235 236 if (host->dma_rx_channel) 237 rxname = dma_chan_name(host->dma_rx_channel); 238 else 239 rxname = "none"; 240 241 if (host->dma_tx_channel) 242 txname = dma_chan_name(host->dma_tx_channel); 243 else 244 txname = "none"; 245 246 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 247 rxname, txname); 248 249 /* 250 * Limit the maximum segment size in any SG entry according to 251 * the parameters of the DMA engine device. 252 */ 253 if (host->dma_tx_channel) { 254 struct device *dev = host->dma_tx_channel->device->dev; 255 unsigned int max_seg_size = dma_get_max_seg_size(dev); 256 257 if (max_seg_size < host->mmc->max_seg_size) 258 host->mmc->max_seg_size = max_seg_size; 259 } 260 if (host->dma_rx_channel) { 261 struct device *dev = host->dma_rx_channel->device->dev; 262 unsigned int max_seg_size = dma_get_max_seg_size(dev); 263 264 if (max_seg_size < host->mmc->max_seg_size) 265 host->mmc->max_seg_size = max_seg_size; 266 } 267} 268 269/* 270 * This is used in __devinit or __devexit so inline it 271 * so it can be discarded. 272 */ 273static inline void mmci_dma_release(struct mmci_host *host) 274{ 275 struct mmci_platform_data *plat = host->plat; 276 277 if (host->dma_rx_channel) 278 dma_release_channel(host->dma_rx_channel); 279 if (host->dma_tx_channel && plat->dma_tx_param) 280 dma_release_channel(host->dma_tx_channel); 281 host->dma_rx_channel = host->dma_tx_channel = NULL; 282} 283 284static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 285{ 286 struct dma_chan *chan = host->dma_current; 287 enum dma_data_direction dir; 288 u32 status; 289 int i; 290 291 /* Wait up to 1ms for the DMA to complete */ 292 for (i = 0; ; i++) { 293 status = readl(host->base + MMCISTATUS); 294 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 295 break; 296 udelay(10); 297 } 298 299 /* 300 * Check to see whether we still have some data left in the FIFO - 301 * this catches DMA controllers which are unable to monitor the 302 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 303 * contiguous buffers. On TX, we'll get a FIFO underrun error. 304 */ 305 if (status & MCI_RXDATAAVLBLMASK) { 306 dmaengine_terminate_all(chan); 307 if (!data->error) 308 data->error = -EIO; 309 } 310 311 if (data->flags & MMC_DATA_WRITE) { 312 dir = DMA_TO_DEVICE; 313 } else { 314 dir = DMA_FROM_DEVICE; 315 } 316 317 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 318 319 /* 320 * Use of DMA with scatter-gather is impossible. 321 * Give up with DMA and switch back to PIO mode. 322 */ 323 if (status & MCI_RXDATAAVLBLMASK) { 324 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 325 mmci_dma_release(host); 326 } 327} 328 329static void mmci_dma_data_error(struct mmci_host *host) 330{ 331 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 332 dmaengine_terminate_all(host->dma_current); 333} 334 335static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 336{ 337 struct variant_data *variant = host->variant; 338 struct dma_slave_config conf = { 339 .src_addr = host->phybase + MMCIFIFO, 340 .dst_addr = host->phybase + MMCIFIFO, 341 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 342 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 343 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 344 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 345 }; 346 struct mmc_data *data = host->data; 347 struct dma_chan *chan; 348 struct dma_device *device; 349 struct dma_async_tx_descriptor *desc; 350 int nr_sg; 351 352 host->dma_current = NULL; 353 354 if (data->flags & MMC_DATA_READ) { 355 conf.direction = DMA_FROM_DEVICE; 356 chan = host->dma_rx_channel; 357 } else { 358 conf.direction = DMA_TO_DEVICE; 359 chan = host->dma_tx_channel; 360 } 361 362 /* If there's no DMA channel, fall back to PIO */ 363 if (!chan) 364 return -EINVAL; 365 366 /* If less than or equal to the fifo size, don't bother with DMA */ 367 if (host->size <= variant->fifosize) 368 return -EINVAL; 369 370 device = chan->device; 371 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); 372 if (nr_sg == 0) 373 return -EINVAL; 374 375 dmaengine_slave_config(chan, &conf); 376 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, 377 conf.direction, DMA_CTRL_ACK); 378 if (!desc) 379 goto unmap_exit; 380 381 /* Okay, go for it. */ 382 host->dma_current = chan; 383 384 dev_vdbg(mmc_dev(host->mmc), 385 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 386 data->sg_len, data->blksz, data->blocks, data->flags); 387 dmaengine_submit(desc); 388 dma_async_issue_pending(chan); 389 390 datactrl |= MCI_DPSM_DMAENABLE; 391 392 /* Trigger the DMA transfer */ 393 writel(datactrl, host->base + MMCIDATACTRL); 394 395 /* 396 * Let the MMCI say when the data is ended and it's time 397 * to fire next DMA request. When that happens, MMCI will 398 * call mmci_data_end() 399 */ 400 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 401 host->base + MMCIMASK0); 402 return 0; 403 404unmap_exit: 405 dmaengine_terminate_all(chan); 406 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); 407 return -ENOMEM; 408} 409#else 410/* Blank functions if the DMA engine is not available */ 411static inline void mmci_dma_setup(struct mmci_host *host) 412{ 413} 414 415static inline void mmci_dma_release(struct mmci_host *host) 416{ 417} 418 419static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 420{ 421} 422 423static inline void mmci_dma_data_error(struct mmci_host *host) 424{ 425} 426 427static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 428{ 429 return -ENOSYS; 430} 431#endif 432 433static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 434{ 435 struct variant_data *variant = host->variant; 436 unsigned int datactrl, timeout, irqmask; 437 unsigned long long clks; 438 void __iomem *base; 439 int blksz_bits; 440 441 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 442 data->blksz, data->blocks, data->flags); 443 444 host->data = data; 445 host->size = data->blksz * data->blocks; 446 data->bytes_xfered = 0; 447 448 clks = (unsigned long long)data->timeout_ns * host->cclk; 449 do_div(clks, 1000000000UL); 450 451 timeout = data->timeout_clks + (unsigned int)clks; 452 453 base = host->base; 454 writel(timeout, base + MMCIDATATIMER); 455 writel(host->size, base + MMCIDATALENGTH); 456 457 blksz_bits = ffs(data->blksz) - 1; 458 BUG_ON(1 << blksz_bits != data->blksz); 459 460 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 461 462 if (data->flags & MMC_DATA_READ) 463 datactrl |= MCI_DPSM_DIRECTION; 464 465 /* 466 * Attempt to use DMA operation mode, if this 467 * should fail, fall back to PIO mode 468 */ 469 if (!mmci_dma_start_data(host, datactrl)) 470 return; 471 472 /* IRQ mode, map the SG list for CPU reading/writing */ 473 mmci_init_sg(host, data); 474 475 if (data->flags & MMC_DATA_READ) { 476 irqmask = MCI_RXFIFOHALFFULLMASK; 477 478 /* 479 * If we have less than the fifo 'half-full' threshold to 480 * transfer, trigger a PIO interrupt as soon as any data 481 * is available. 482 */ 483 if (host->size < variant->fifohalfsize) 484 irqmask |= MCI_RXDATAAVLBLMASK; 485 } else { 486 /* 487 * We don't actually need to include "FIFO empty" here 488 * since its implicit in "FIFO half empty". 489 */ 490 irqmask = MCI_TXFIFOHALFEMPTYMASK; 491 } 492 493 /* The ST Micro variants has a special bit to enable SDIO */ 494 if (variant->sdio && host->mmc->card) 495 if (mmc_card_sdio(host->mmc->card)) 496 datactrl |= MCI_ST_DPSM_SDIOEN; 497 498 writel(datactrl, base + MMCIDATACTRL); 499 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 500 mmci_set_mask1(host, irqmask); 501} 502 503static void 504mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 505{ 506 void __iomem *base = host->base; 507 508 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 509 cmd->opcode, cmd->arg, cmd->flags); 510 511 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 512 writel(0, base + MMCICOMMAND); 513 udelay(1); 514 } 515 516 c |= cmd->opcode | MCI_CPSM_ENABLE; 517 if (cmd->flags & MMC_RSP_PRESENT) { 518 if (cmd->flags & MMC_RSP_136) 519 c |= MCI_CPSM_LONGRSP; 520 c |= MCI_CPSM_RESPONSE; 521 } 522 if (/*interrupt*/0) 523 c |= MCI_CPSM_INTERRUPT; 524 525 host->cmd = cmd; 526 527 writel(cmd->arg, base + MMCIARGUMENT); 528 writel(c, base + MMCICOMMAND); 529} 530 531static void 532mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 533 unsigned int status) 534{ 535 /* First check for errors */ 536 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 537 u32 remain, success; 538 539 /* Terminate the DMA transfer */ 540 if (dma_inprogress(host)) 541 mmci_dma_data_error(host); 542 543 /* 544 * Calculate how far we are into the transfer. Note that 545 * the data counter gives the number of bytes transferred 546 * on the MMC bus, not on the host side. On reads, this 547 * can be as much as a FIFO-worth of data ahead. This 548 * matters for FIFO overruns only. 549 */ 550 remain = readl(host->base + MMCIDATACNT); 551 success = data->blksz * data->blocks - remain; 552 553 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 554 status, success); 555 if (status & MCI_DATACRCFAIL) { 556 /* Last block was not successful */ 557 success -= 1; 558 data->error = -EILSEQ; 559 } else if (status & MCI_DATATIMEOUT) { 560 data->error = -ETIMEDOUT; 561 } else if (status & MCI_TXUNDERRUN) { 562 data->error = -EIO; 563 } else if (status & MCI_RXOVERRUN) { 564 if (success > host->variant->fifosize) 565 success -= host->variant->fifosize; 566 else 567 success = 0; 568 data->error = -EIO; 569 } 570 data->bytes_xfered = round_down(success, data->blksz); 571 } 572 573 if (status & MCI_DATABLOCKEND) 574 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 575 576 if (status & MCI_DATAEND || data->error) { 577 if (dma_inprogress(host)) 578 mmci_dma_unmap(host, data); 579 mmci_stop_data(host); 580 581 if (!data->error) 582 /* The error clause is handled above, success! */ 583 data->bytes_xfered = data->blksz * data->blocks; 584 585 if (!data->stop) { 586 mmci_request_end(host, data->mrq); 587 } else { 588 mmci_start_command(host, data->stop, 0); 589 } 590 } 591} 592 593static void 594mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 595 unsigned int status) 596{ 597 void __iomem *base = host->base; 598 599 host->cmd = NULL; 600 601 if (status & MCI_CMDTIMEOUT) { 602 cmd->error = -ETIMEDOUT; 603 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 604 cmd->error = -EILSEQ; 605 } else { 606 cmd->resp[0] = readl(base + MMCIRESPONSE0); 607 cmd->resp[1] = readl(base + MMCIRESPONSE1); 608 cmd->resp[2] = readl(base + MMCIRESPONSE2); 609 cmd->resp[3] = readl(base + MMCIRESPONSE3); 610 } 611 612 if (!cmd->data || cmd->error) { 613 if (host->data) 614 mmci_stop_data(host); 615 mmci_request_end(host, cmd->mrq); 616 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 617 mmci_start_data(host, cmd->data); 618 } 619} 620 621static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 622{ 623 void __iomem *base = host->base; 624 char *ptr = buffer; 625 u32 status; 626 int host_remain = host->size; 627 628 do { 629 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 630 631 if (count > remain) 632 count = remain; 633 634 if (count <= 0) 635 break; 636 637 readsl(base + MMCIFIFO, ptr, count >> 2); 638 639 ptr += count; 640 remain -= count; 641 host_remain -= count; 642 643 if (remain == 0) 644 break; 645 646 status = readl(base + MMCISTATUS); 647 } while (status & MCI_RXDATAAVLBL); 648 649 return ptr - buffer; 650} 651 652static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 653{ 654 struct variant_data *variant = host->variant; 655 void __iomem *base = host->base; 656 char *ptr = buffer; 657 658 do { 659 unsigned int count, maxcnt; 660 661 maxcnt = status & MCI_TXFIFOEMPTY ? 662 variant->fifosize : variant->fifohalfsize; 663 count = min(remain, maxcnt); 664 665 /* 666 * The ST Micro variant for SDIO transfer sizes 667 * less then 8 bytes should have clock H/W flow 668 * control disabled. 669 */ 670 if (variant->sdio && 671 mmc_card_sdio(host->mmc->card)) { 672 if (count < 8) 673 writel(readl(host->base + MMCICLOCK) & 674 ~variant->clkreg_enable, 675 host->base + MMCICLOCK); 676 else 677 writel(readl(host->base + MMCICLOCK) | 678 variant->clkreg_enable, 679 host->base + MMCICLOCK); 680 } 681 682 /* 683 * SDIO especially may want to send something that is 684 * not divisible by 4 (as opposed to card sectors 685 * etc), and the FIFO only accept full 32-bit writes. 686 * So compensate by adding +3 on the count, a single 687 * byte become a 32bit write, 7 bytes will be two 688 * 32bit writes etc. 689 */ 690 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); 691 692 ptr += count; 693 remain -= count; 694 695 if (remain == 0) 696 break; 697 698 status = readl(base + MMCISTATUS); 699 } while (status & MCI_TXFIFOHALFEMPTY); 700 701 return ptr - buffer; 702} 703 704/* 705 * PIO data transfer IRQ handler. 706 */ 707static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 708{ 709 struct mmci_host *host = dev_id; 710 struct sg_mapping_iter *sg_miter = &host->sg_miter; 711 struct variant_data *variant = host->variant; 712 void __iomem *base = host->base; 713 unsigned long flags; 714 u32 status; 715 716 status = readl(base + MMCISTATUS); 717 718 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 719 720 local_irq_save(flags); 721 722 do { 723 unsigned int remain, len; 724 char *buffer; 725 726 /* 727 * For write, we only need to test the half-empty flag 728 * here - if the FIFO is completely empty, then by 729 * definition it is more than half empty. 730 * 731 * For read, check for data available. 732 */ 733 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 734 break; 735 736 if (!sg_miter_next(sg_miter)) 737 break; 738 739 buffer = sg_miter->addr; 740 remain = sg_miter->length; 741 742 len = 0; 743 if (status & MCI_RXACTIVE) 744 len = mmci_pio_read(host, buffer, remain); 745 if (status & MCI_TXACTIVE) 746 len = mmci_pio_write(host, buffer, remain, status); 747 748 sg_miter->consumed = len; 749 750 host->size -= len; 751 remain -= len; 752 753 if (remain) 754 break; 755 756 status = readl(base + MMCISTATUS); 757 } while (1); 758 759 sg_miter_stop(sg_miter); 760 761 local_irq_restore(flags); 762 763 /* 764 * If we have less than the fifo 'half-full' threshold to transfer, 765 * trigger a PIO interrupt as soon as any data is available. 766 */ 767 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 768 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 769 770 /* 771 * If we run out of data, disable the data IRQs; this 772 * prevents a race where the FIFO becomes empty before 773 * the chip itself has disabled the data path, and 774 * stops us racing with our data end IRQ. 775 */ 776 if (host->size == 0) { 777 mmci_set_mask1(host, 0); 778 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 779 } 780 781 return IRQ_HANDLED; 782} 783 784/* 785 * Handle completion of command and data transfers. 786 */ 787static irqreturn_t mmci_irq(int irq, void *dev_id) 788{ 789 struct mmci_host *host = dev_id; 790 u32 status; 791 int ret = 0; 792 793 spin_lock(&host->lock); 794 795 do { 796 struct mmc_command *cmd; 797 struct mmc_data *data; 798 799 status = readl(host->base + MMCISTATUS); 800 801 if (host->singleirq) { 802 if (status & readl(host->base + MMCIMASK1)) 803 mmci_pio_irq(irq, dev_id); 804 805 status &= ~MCI_IRQ1MASK; 806 } 807 808 status &= readl(host->base + MMCIMASK0); 809 writel(status, host->base + MMCICLEAR); 810 811 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 812 813 data = host->data; 814 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| 815 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) 816 mmci_data_irq(host, data, status); 817 818 cmd = host->cmd; 819 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 820 mmci_cmd_irq(host, cmd, status); 821 822 ret = 1; 823 } while (status); 824 825 spin_unlock(&host->lock); 826 827 return IRQ_RETVAL(ret); 828} 829 830static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 831{ 832 struct mmci_host *host = mmc_priv(mmc); 833 unsigned long flags; 834 835 WARN_ON(host->mrq != NULL); 836 837 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 838 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 839 mrq->data->blksz); 840 mrq->cmd->error = -EINVAL; 841 mmc_request_done(mmc, mrq); 842 return; 843 } 844 845 spin_lock_irqsave(&host->lock, flags); 846 847 host->mrq = mrq; 848 849 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 850 mmci_start_data(host, mrq->data); 851 852 mmci_start_command(host, mrq->cmd, 0); 853 854 spin_unlock_irqrestore(&host->lock, flags); 855} 856 857static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 858{ 859 struct mmci_host *host = mmc_priv(mmc); 860 u32 pwr = 0; 861 unsigned long flags; 862 int ret; 863 864 switch (ios->power_mode) { 865 case MMC_POWER_OFF: 866 if (host->vcc) 867 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 868 break; 869 case MMC_POWER_UP: 870 if (host->vcc) { 871 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 872 if (ret) { 873 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 874 /* 875 * The .set_ios() function in the mmc_host_ops 876 * struct return void, and failing to set the 877 * power should be rare so we print an error 878 * and return here. 879 */ 880 return; 881 } 882 } 883 if (host->plat->vdd_handler) 884 pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, 885 ios->power_mode); 886 /* The ST version does not have this, fall through to POWER_ON */ 887 if (host->hw_designer != AMBA_VENDOR_ST) { 888 pwr |= MCI_PWR_UP; 889 break; 890 } 891 case MMC_POWER_ON: 892 pwr |= MCI_PWR_ON; 893 break; 894 } 895 896 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 897 if (host->hw_designer != AMBA_VENDOR_ST) 898 pwr |= MCI_ROD; 899 else { 900 /* 901 * The ST Micro variant use the ROD bit for something 902 * else and only has OD (Open Drain). 903 */ 904 pwr |= MCI_OD; 905 } 906 } 907 908 spin_lock_irqsave(&host->lock, flags); 909 910 mmci_set_clkreg(host, ios->clock); 911 912 if (host->pwr != pwr) { 913 host->pwr = pwr; 914 writel(pwr, host->base + MMCIPOWER); 915 } 916 917 spin_unlock_irqrestore(&host->lock, flags); 918} 919 920static int mmci_get_ro(struct mmc_host *mmc) 921{ 922 struct mmci_host *host = mmc_priv(mmc); 923 924 if (host->gpio_wp == -ENOSYS) 925 return -ENOSYS; 926 927 return gpio_get_value_cansleep(host->gpio_wp); 928} 929 930static int mmci_get_cd(struct mmc_host *mmc) 931{ 932 struct mmci_host *host = mmc_priv(mmc); 933 struct mmci_platform_data *plat = host->plat; 934 unsigned int status; 935 936 if (host->gpio_cd == -ENOSYS) { 937 if (!plat->status) 938 return 1; /* Assume always present */ 939 940 status = plat->status(mmc_dev(host->mmc)); 941 } else 942 status = !!gpio_get_value_cansleep(host->gpio_cd) 943 ^ plat->cd_invert; 944 945 /* 946 * Use positive logic throughout - status is zero for no card, 947 * non-zero for card inserted. 948 */ 949 return status; 950} 951 952static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 953{ 954 struct mmci_host *host = dev_id; 955 956 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 957 958 return IRQ_HANDLED; 959} 960 961static const struct mmc_host_ops mmci_ops = { 962 .request = mmci_request, 963 .set_ios = mmci_set_ios, 964 .get_ro = mmci_get_ro, 965 .get_cd = mmci_get_cd, 966}; 967 968static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) 969{ 970 struct mmci_platform_data *plat = dev->dev.platform_data; 971 struct variant_data *variant = id->data; 972 struct mmci_host *host; 973 struct mmc_host *mmc; 974 int ret; 975 976 /* must have platform data */ 977 if (!plat) { 978 ret = -EINVAL; 979 goto out; 980 } 981 982 ret = amba_request_regions(dev, DRIVER_NAME); 983 if (ret) 984 goto out; 985 986 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 987 if (!mmc) { 988 ret = -ENOMEM; 989 goto rel_regions; 990 } 991 992 host = mmc_priv(mmc); 993 host->mmc = mmc; 994 995 host->gpio_wp = -ENOSYS; 996 host->gpio_cd = -ENOSYS; 997 host->gpio_cd_irq = -1; 998 999 host->hw_designer = amba_manf(dev); 1000 host->hw_revision = amba_rev(dev); 1001 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1002 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1003 1004 host->clk = clk_get(&dev->dev, NULL); 1005 if (IS_ERR(host->clk)) { 1006 ret = PTR_ERR(host->clk); 1007 host->clk = NULL; 1008 goto host_free; 1009 } 1010 1011 ret = clk_enable(host->clk); 1012 if (ret) 1013 goto clk_free; 1014 1015 host->plat = plat; 1016 host->variant = variant; 1017 host->mclk = clk_get_rate(host->clk); 1018 /* 1019 * According to the spec, mclk is max 100 MHz, 1020 * so we try to adjust the clock down to this, 1021 * (if possible). 1022 */ 1023 if (host->mclk > 100000000) { 1024 ret = clk_set_rate(host->clk, 100000000); 1025 if (ret < 0) 1026 goto clk_disable; 1027 host->mclk = clk_get_rate(host->clk); 1028 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1029 host->mclk); 1030 } 1031 host->phybase = dev->res.start; 1032 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1033 if (!host->base) { 1034 ret = -ENOMEM; 1035 goto clk_disable; 1036 } 1037 1038 mmc->ops = &mmci_ops; 1039 mmc->f_min = (host->mclk + 511) / 512; 1040 /* 1041 * If the platform data supplies a maximum operating 1042 * frequency, this takes precedence. Else, we fall back 1043 * to using the module parameter, which has a (low) 1044 * default value in case it is not specified. Either 1045 * value must not exceed the clock rate into the block, 1046 * of course. 1047 */ 1048 if (plat->f_max) 1049 mmc->f_max = min(host->mclk, plat->f_max); 1050 else 1051 mmc->f_max = min(host->mclk, fmax); 1052 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1053 1054#ifdef CONFIG_REGULATOR 1055 /* If we're using the regulator framework, try to fetch a regulator */ 1056 host->vcc = regulator_get(&dev->dev, "vmmc"); 1057 if (IS_ERR(host->vcc)) 1058 host->vcc = NULL; 1059 else { 1060 int mask = mmc_regulator_get_ocrmask(host->vcc); 1061 1062 if (mask < 0) 1063 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1064 mask); 1065 else { 1066 host->mmc->ocr_avail = (u32) mask; 1067 if (plat->ocr_mask) 1068 dev_warn(&dev->dev, 1069 "Provided ocr_mask/setpower will not be used " 1070 "(using regulator instead)\n"); 1071 } 1072 } 1073#endif 1074 /* Fall back to platform data if no regulator is found */ 1075 if (host->vcc == NULL) 1076 mmc->ocr_avail = plat->ocr_mask; 1077 mmc->caps = plat->capabilities; 1078 1079 /* 1080 * We can do SGIO 1081 */ 1082 mmc->max_segs = NR_SG; 1083 1084 /* 1085 * Since only a certain number of bits are valid in the data length 1086 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1087 * single request. 1088 */ 1089 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1090 1091 /* 1092 * Set the maximum segment size. Since we aren't doing DMA 1093 * (yet) we are only limited by the data length register. 1094 */ 1095 mmc->max_seg_size = mmc->max_req_size; 1096 1097 /* 1098 * Block size can be up to 2048 bytes, but must be a power of two. 1099 */ 1100 mmc->max_blk_size = 2048; 1101 1102 /* 1103 * No limit on the number of blocks transferred. 1104 */ 1105 mmc->max_blk_count = mmc->max_req_size; 1106 1107 spin_lock_init(&host->lock); 1108 1109 writel(0, host->base + MMCIMASK0); 1110 writel(0, host->base + MMCIMASK1); 1111 writel(0xfff, host->base + MMCICLEAR); 1112 1113 if (gpio_is_valid(plat->gpio_cd)) { 1114 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1115 if (ret == 0) 1116 ret = gpio_direction_input(plat->gpio_cd); 1117 if (ret == 0) 1118 host->gpio_cd = plat->gpio_cd; 1119 else if (ret != -ENOSYS) 1120 goto err_gpio_cd; 1121 1122 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1123 mmci_cd_irq, 0, 1124 DRIVER_NAME " (cd)", host); 1125 if (ret >= 0) 1126 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1127 } 1128 if (gpio_is_valid(plat->gpio_wp)) { 1129 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1130 if (ret == 0) 1131 ret = gpio_direction_input(plat->gpio_wp); 1132 if (ret == 0) 1133 host->gpio_wp = plat->gpio_wp; 1134 else if (ret != -ENOSYS) 1135 goto err_gpio_wp; 1136 } 1137 1138 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1139 && host->gpio_cd_irq < 0) 1140 mmc->caps |= MMC_CAP_NEEDS_POLL; 1141 1142 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1143 if (ret) 1144 goto unmap; 1145 1146 if (dev->irq[1] == NO_IRQ) 1147 host->singleirq = true; 1148 else { 1149 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1150 DRIVER_NAME " (pio)", host); 1151 if (ret) 1152 goto irq0_free; 1153 } 1154 1155 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1156 1157 amba_set_drvdata(dev, mmc); 1158 1159 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1160 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1161 amba_rev(dev), (unsigned long long)dev->res.start, 1162 dev->irq[0], dev->irq[1]); 1163 1164 mmci_dma_setup(host); 1165 1166 mmc_add_host(mmc); 1167 1168 return 0; 1169 1170 irq0_free: 1171 free_irq(dev->irq[0], host); 1172 unmap: 1173 if (host->gpio_wp != -ENOSYS) 1174 gpio_free(host->gpio_wp); 1175 err_gpio_wp: 1176 if (host->gpio_cd_irq >= 0) 1177 free_irq(host->gpio_cd_irq, host); 1178 if (host->gpio_cd != -ENOSYS) 1179 gpio_free(host->gpio_cd); 1180 err_gpio_cd: 1181 iounmap(host->base); 1182 clk_disable: 1183 clk_disable(host->clk); 1184 clk_free: 1185 clk_put(host->clk); 1186 host_free: 1187 mmc_free_host(mmc); 1188 rel_regions: 1189 amba_release_regions(dev); 1190 out: 1191 return ret; 1192} 1193 1194static int __devexit mmci_remove(struct amba_device *dev) 1195{ 1196 struct mmc_host *mmc = amba_get_drvdata(dev); 1197 1198 amba_set_drvdata(dev, NULL); 1199 1200 if (mmc) { 1201 struct mmci_host *host = mmc_priv(mmc); 1202 1203 mmc_remove_host(mmc); 1204 1205 writel(0, host->base + MMCIMASK0); 1206 writel(0, host->base + MMCIMASK1); 1207 1208 writel(0, host->base + MMCICOMMAND); 1209 writel(0, host->base + MMCIDATACTRL); 1210 1211 mmci_dma_release(host); 1212 free_irq(dev->irq[0], host); 1213 if (!host->singleirq) 1214 free_irq(dev->irq[1], host); 1215 1216 if (host->gpio_wp != -ENOSYS) 1217 gpio_free(host->gpio_wp); 1218 if (host->gpio_cd_irq >= 0) 1219 free_irq(host->gpio_cd_irq, host); 1220 if (host->gpio_cd != -ENOSYS) 1221 gpio_free(host->gpio_cd); 1222 1223 iounmap(host->base); 1224 clk_disable(host->clk); 1225 clk_put(host->clk); 1226 1227 if (host->vcc) 1228 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1229 regulator_put(host->vcc); 1230 1231 mmc_free_host(mmc); 1232 1233 amba_release_regions(dev); 1234 } 1235 1236 return 0; 1237} 1238 1239#ifdef CONFIG_PM 1240static int mmci_suspend(struct amba_device *dev, pm_message_t state) 1241{ 1242 struct mmc_host *mmc = amba_get_drvdata(dev); 1243 int ret = 0; 1244 1245 if (mmc) { 1246 struct mmci_host *host = mmc_priv(mmc); 1247 1248 ret = mmc_suspend_host(mmc); 1249 if (ret == 0) 1250 writel(0, host->base + MMCIMASK0); 1251 } 1252 1253 return ret; 1254} 1255 1256static int mmci_resume(struct amba_device *dev) 1257{ 1258 struct mmc_host *mmc = amba_get_drvdata(dev); 1259 int ret = 0; 1260 1261 if (mmc) { 1262 struct mmci_host *host = mmc_priv(mmc); 1263 1264 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1265 1266 ret = mmc_resume_host(mmc); 1267 } 1268 1269 return ret; 1270} 1271#else 1272#define mmci_suspend NULL 1273#define mmci_resume NULL 1274#endif 1275 1276static struct amba_id mmci_ids[] = { 1277 { 1278 .id = 0x00041180, 1279 .mask = 0x000fffff, 1280 .data = &variant_arm, 1281 }, 1282 { 1283 .id = 0x00041181, 1284 .mask = 0x000fffff, 1285 .data = &variant_arm, 1286 }, 1287 /* ST Micro variants */ 1288 { 1289 .id = 0x00180180, 1290 .mask = 0x00ffffff, 1291 .data = &variant_u300, 1292 }, 1293 { 1294 .id = 0x00280180, 1295 .mask = 0x00ffffff, 1296 .data = &variant_u300, 1297 }, 1298 { 1299 .id = 0x00480180, 1300 .mask = 0x00ffffff, 1301 .data = &variant_ux500, 1302 }, 1303 { 0, 0 }, 1304}; 1305 1306static struct amba_driver mmci_driver = { 1307 .drv = { 1308 .name = DRIVER_NAME, 1309 }, 1310 .probe = mmci_probe, 1311 .remove = __devexit_p(mmci_remove), 1312 .suspend = mmci_suspend, 1313 .resume = mmci_resume, 1314 .id_table = mmci_ids, 1315}; 1316 1317static int __init mmci_init(void) 1318{ 1319 return amba_driver_register(&mmci_driver); 1320} 1321 1322static void __exit mmci_exit(void) 1323{ 1324 amba_driver_unregister(&mmci_driver); 1325} 1326 1327module_init(mmci_init); 1328module_exit(mmci_exit); 1329module_param(fmax, uint, 0444); 1330 1331MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1332MODULE_LICENSE("GPL"); 1333