mmci.c revision bc521818e28042bb6018d91c353d24fb01ccb162
1/* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/module.h> 12#include <linux/moduleparam.h> 13#include <linux/init.h> 14#include <linux/ioport.h> 15#include <linux/device.h> 16#include <linux/interrupt.h> 17#include <linux/kernel.h> 18#include <linux/delay.h> 19#include <linux/err.h> 20#include <linux/highmem.h> 21#include <linux/log2.h> 22#include <linux/mmc/host.h> 23#include <linux/mmc/card.h> 24#include <linux/amba/bus.h> 25#include <linux/clk.h> 26#include <linux/scatterlist.h> 27#include <linux/gpio.h> 28#include <linux/regulator/consumer.h> 29#include <linux/dmaengine.h> 30#include <linux/dma-mapping.h> 31#include <linux/amba/mmci.h> 32#include <linux/pm_runtime.h> 33 34#include <asm/div64.h> 35#include <asm/io.h> 36#include <asm/sizes.h> 37 38#include "mmci.h" 39 40#define DRIVER_NAME "mmci-pl18x" 41 42static unsigned int fmax = 515633; 43 44/** 45 * struct variant_data - MMCI variant-specific quirks 46 * @clkreg: default value for MCICLOCK register 47 * @clkreg_enable: enable value for MMCICLOCK register 48 * @datalength_bits: number of bits in the MMCIDATALENGTH register 49 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 50 * is asserted (likewise for RX) 51 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 52 * is asserted (likewise for RX) 53 * @sdio: variant supports SDIO 54 * @st_clkdiv: true if using a ST-specific clock divider algorithm 55 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 56 * @pwrreg_powerup: power up value for MMCIPOWER register 57 * @signal_direction: input/out direction of bus signals can be indicated 58 */ 59struct variant_data { 60 unsigned int clkreg; 61 unsigned int clkreg_enable; 62 unsigned int datalength_bits; 63 unsigned int fifosize; 64 unsigned int fifohalfsize; 65 bool sdio; 66 bool st_clkdiv; 67 bool blksz_datactrl16; 68 u32 pwrreg_powerup; 69 bool signal_direction; 70}; 71 72static struct variant_data variant_arm = { 73 .fifosize = 16 * 4, 74 .fifohalfsize = 8 * 4, 75 .datalength_bits = 16, 76 .pwrreg_powerup = MCI_PWR_UP, 77}; 78 79static struct variant_data variant_arm_extended_fifo = { 80 .fifosize = 128 * 4, 81 .fifohalfsize = 64 * 4, 82 .datalength_bits = 16, 83 .pwrreg_powerup = MCI_PWR_UP, 84}; 85 86static struct variant_data variant_u300 = { 87 .fifosize = 16 * 4, 88 .fifohalfsize = 8 * 4, 89 .clkreg_enable = MCI_ST_U300_HWFCEN, 90 .datalength_bits = 16, 91 .sdio = true, 92 .pwrreg_powerup = MCI_PWR_ON, 93 .signal_direction = true, 94}; 95 96static struct variant_data variant_ux500 = { 97 .fifosize = 30 * 4, 98 .fifohalfsize = 8 * 4, 99 .clkreg = MCI_CLK_ENABLE, 100 .clkreg_enable = MCI_ST_UX500_HWFCEN, 101 .datalength_bits = 24, 102 .sdio = true, 103 .st_clkdiv = true, 104 .pwrreg_powerup = MCI_PWR_ON, 105 .signal_direction = true, 106}; 107 108static struct variant_data variant_ux500v2 = { 109 .fifosize = 30 * 4, 110 .fifohalfsize = 8 * 4, 111 .clkreg = MCI_CLK_ENABLE, 112 .clkreg_enable = MCI_ST_UX500_HWFCEN, 113 .datalength_bits = 24, 114 .sdio = true, 115 .st_clkdiv = true, 116 .blksz_datactrl16 = true, 117 .pwrreg_powerup = MCI_PWR_ON, 118 .signal_direction = true, 119}; 120 121/* 122 * This must be called with host->lock held 123 */ 124static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 125{ 126 struct variant_data *variant = host->variant; 127 u32 clk = variant->clkreg; 128 129 if (desired) { 130 if (desired >= host->mclk) { 131 clk = MCI_CLK_BYPASS; 132 if (variant->st_clkdiv) 133 clk |= MCI_ST_UX500_NEG_EDGE; 134 host->cclk = host->mclk; 135 } else if (variant->st_clkdiv) { 136 /* 137 * DB8500 TRM says f = mclk / (clkdiv + 2) 138 * => clkdiv = (mclk / f) - 2 139 * Round the divider up so we don't exceed the max 140 * frequency 141 */ 142 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 143 if (clk >= 256) 144 clk = 255; 145 host->cclk = host->mclk / (clk + 2); 146 } else { 147 /* 148 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 149 * => clkdiv = mclk / (2 * f) - 1 150 */ 151 clk = host->mclk / (2 * desired) - 1; 152 if (clk >= 256) 153 clk = 255; 154 host->cclk = host->mclk / (2 * (clk + 1)); 155 } 156 157 clk |= variant->clkreg_enable; 158 clk |= MCI_CLK_ENABLE; 159 /* This hasn't proven to be worthwhile */ 160 /* clk |= MCI_CLK_PWRSAVE; */ 161 } 162 163 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 164 clk |= MCI_4BIT_BUS; 165 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 166 clk |= MCI_ST_8BIT_BUS; 167 168 writel(clk, host->base + MMCICLOCK); 169} 170 171static void 172mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 173{ 174 writel(0, host->base + MMCICOMMAND); 175 176 BUG_ON(host->data); 177 178 host->mrq = NULL; 179 host->cmd = NULL; 180 181 pm_runtime_put(mmc_dev(host->mmc)); 182 mmc_request_done(host->mmc, mrq); 183} 184 185static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 186{ 187 void __iomem *base = host->base; 188 189 if (host->singleirq) { 190 unsigned int mask0 = readl(base + MMCIMASK0); 191 192 mask0 &= ~MCI_IRQ1MASK; 193 mask0 |= mask; 194 195 writel(mask0, base + MMCIMASK0); 196 } 197 198 writel(mask, base + MMCIMASK1); 199} 200 201static void mmci_stop_data(struct mmci_host *host) 202{ 203 writel(0, host->base + MMCIDATACTRL); 204 mmci_set_mask1(host, 0); 205 host->data = NULL; 206} 207 208static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 209{ 210 unsigned int flags = SG_MITER_ATOMIC; 211 212 if (data->flags & MMC_DATA_READ) 213 flags |= SG_MITER_TO_SG; 214 else 215 flags |= SG_MITER_FROM_SG; 216 217 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 218} 219 220/* 221 * All the DMA operation mode stuff goes inside this ifdef. 222 * This assumes that you have a generic DMA device interface, 223 * no custom DMA interfaces are supported. 224 */ 225#ifdef CONFIG_DMA_ENGINE 226static void __devinit mmci_dma_setup(struct mmci_host *host) 227{ 228 struct mmci_platform_data *plat = host->plat; 229 const char *rxname, *txname; 230 dma_cap_mask_t mask; 231 232 if (!plat || !plat->dma_filter) { 233 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 234 return; 235 } 236 237 /* initialize pre request cookie */ 238 host->next_data.cookie = 1; 239 240 /* Try to acquire a generic DMA engine slave channel */ 241 dma_cap_zero(mask); 242 dma_cap_set(DMA_SLAVE, mask); 243 244 /* 245 * If only an RX channel is specified, the driver will 246 * attempt to use it bidirectionally, however if it is 247 * is specified but cannot be located, DMA will be disabled. 248 */ 249 if (plat->dma_rx_param) { 250 host->dma_rx_channel = dma_request_channel(mask, 251 plat->dma_filter, 252 plat->dma_rx_param); 253 /* E.g if no DMA hardware is present */ 254 if (!host->dma_rx_channel) 255 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 256 } 257 258 if (plat->dma_tx_param) { 259 host->dma_tx_channel = dma_request_channel(mask, 260 plat->dma_filter, 261 plat->dma_tx_param); 262 if (!host->dma_tx_channel) 263 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 264 } else { 265 host->dma_tx_channel = host->dma_rx_channel; 266 } 267 268 if (host->dma_rx_channel) 269 rxname = dma_chan_name(host->dma_rx_channel); 270 else 271 rxname = "none"; 272 273 if (host->dma_tx_channel) 274 txname = dma_chan_name(host->dma_tx_channel); 275 else 276 txname = "none"; 277 278 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 279 rxname, txname); 280 281 /* 282 * Limit the maximum segment size in any SG entry according to 283 * the parameters of the DMA engine device. 284 */ 285 if (host->dma_tx_channel) { 286 struct device *dev = host->dma_tx_channel->device->dev; 287 unsigned int max_seg_size = dma_get_max_seg_size(dev); 288 289 if (max_seg_size < host->mmc->max_seg_size) 290 host->mmc->max_seg_size = max_seg_size; 291 } 292 if (host->dma_rx_channel) { 293 struct device *dev = host->dma_rx_channel->device->dev; 294 unsigned int max_seg_size = dma_get_max_seg_size(dev); 295 296 if (max_seg_size < host->mmc->max_seg_size) 297 host->mmc->max_seg_size = max_seg_size; 298 } 299} 300 301/* 302 * This is used in __devinit or __devexit so inline it 303 * so it can be discarded. 304 */ 305static inline void mmci_dma_release(struct mmci_host *host) 306{ 307 struct mmci_platform_data *plat = host->plat; 308 309 if (host->dma_rx_channel) 310 dma_release_channel(host->dma_rx_channel); 311 if (host->dma_tx_channel && plat->dma_tx_param) 312 dma_release_channel(host->dma_tx_channel); 313 host->dma_rx_channel = host->dma_tx_channel = NULL; 314} 315 316static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 317{ 318 struct dma_chan *chan = host->dma_current; 319 enum dma_data_direction dir; 320 u32 status; 321 int i; 322 323 /* Wait up to 1ms for the DMA to complete */ 324 for (i = 0; ; i++) { 325 status = readl(host->base + MMCISTATUS); 326 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 327 break; 328 udelay(10); 329 } 330 331 /* 332 * Check to see whether we still have some data left in the FIFO - 333 * this catches DMA controllers which are unable to monitor the 334 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 335 * contiguous buffers. On TX, we'll get a FIFO underrun error. 336 */ 337 if (status & MCI_RXDATAAVLBLMASK) { 338 dmaengine_terminate_all(chan); 339 if (!data->error) 340 data->error = -EIO; 341 } 342 343 if (data->flags & MMC_DATA_WRITE) { 344 dir = DMA_TO_DEVICE; 345 } else { 346 dir = DMA_FROM_DEVICE; 347 } 348 349 if (!data->host_cookie) 350 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 351 352 /* 353 * Use of DMA with scatter-gather is impossible. 354 * Give up with DMA and switch back to PIO mode. 355 */ 356 if (status & MCI_RXDATAAVLBLMASK) { 357 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 358 mmci_dma_release(host); 359 } 360} 361 362static void mmci_dma_data_error(struct mmci_host *host) 363{ 364 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 365 dmaengine_terminate_all(host->dma_current); 366} 367 368static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 369 struct mmci_host_next *next) 370{ 371 struct variant_data *variant = host->variant; 372 struct dma_slave_config conf = { 373 .src_addr = host->phybase + MMCIFIFO, 374 .dst_addr = host->phybase + MMCIFIFO, 375 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 376 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 377 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 378 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 379 }; 380 struct dma_chan *chan; 381 struct dma_device *device; 382 struct dma_async_tx_descriptor *desc; 383 enum dma_data_direction buffer_dirn; 384 int nr_sg; 385 386 /* Check if next job is already prepared */ 387 if (data->host_cookie && !next && 388 host->dma_current && host->dma_desc_current) 389 return 0; 390 391 if (!next) { 392 host->dma_current = NULL; 393 host->dma_desc_current = NULL; 394 } 395 396 if (data->flags & MMC_DATA_READ) { 397 conf.direction = DMA_DEV_TO_MEM; 398 buffer_dirn = DMA_FROM_DEVICE; 399 chan = host->dma_rx_channel; 400 } else { 401 conf.direction = DMA_MEM_TO_DEV; 402 buffer_dirn = DMA_TO_DEVICE; 403 chan = host->dma_tx_channel; 404 } 405 406 /* If there's no DMA channel, fall back to PIO */ 407 if (!chan) 408 return -EINVAL; 409 410 /* If less than or equal to the fifo size, don't bother with DMA */ 411 if (data->blksz * data->blocks <= variant->fifosize) 412 return -EINVAL; 413 414 device = chan->device; 415 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 416 if (nr_sg == 0) 417 return -EINVAL; 418 419 dmaengine_slave_config(chan, &conf); 420 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, 421 conf.direction, DMA_CTRL_ACK); 422 if (!desc) 423 goto unmap_exit; 424 425 if (next) { 426 next->dma_chan = chan; 427 next->dma_desc = desc; 428 } else { 429 host->dma_current = chan; 430 host->dma_desc_current = desc; 431 } 432 433 return 0; 434 435 unmap_exit: 436 if (!next) 437 dmaengine_terminate_all(chan); 438 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 439 return -ENOMEM; 440} 441 442static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 443{ 444 int ret; 445 struct mmc_data *data = host->data; 446 447 ret = mmci_dma_prep_data(host, host->data, NULL); 448 if (ret) 449 return ret; 450 451 /* Okay, go for it. */ 452 dev_vdbg(mmc_dev(host->mmc), 453 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 454 data->sg_len, data->blksz, data->blocks, data->flags); 455 dmaengine_submit(host->dma_desc_current); 456 dma_async_issue_pending(host->dma_current); 457 458 datactrl |= MCI_DPSM_DMAENABLE; 459 460 /* Trigger the DMA transfer */ 461 writel(datactrl, host->base + MMCIDATACTRL); 462 463 /* 464 * Let the MMCI say when the data is ended and it's time 465 * to fire next DMA request. When that happens, MMCI will 466 * call mmci_data_end() 467 */ 468 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 469 host->base + MMCIMASK0); 470 return 0; 471} 472 473static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 474{ 475 struct mmci_host_next *next = &host->next_data; 476 477 if (data->host_cookie && data->host_cookie != next->cookie) { 478 pr_warning("[%s] invalid cookie: data->host_cookie %d" 479 " host->next_data.cookie %d\n", 480 __func__, data->host_cookie, host->next_data.cookie); 481 data->host_cookie = 0; 482 } 483 484 if (!data->host_cookie) 485 return; 486 487 host->dma_desc_current = next->dma_desc; 488 host->dma_current = next->dma_chan; 489 490 next->dma_desc = NULL; 491 next->dma_chan = NULL; 492} 493 494static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 495 bool is_first_req) 496{ 497 struct mmci_host *host = mmc_priv(mmc); 498 struct mmc_data *data = mrq->data; 499 struct mmci_host_next *nd = &host->next_data; 500 501 if (!data) 502 return; 503 504 if (data->host_cookie) { 505 data->host_cookie = 0; 506 return; 507 } 508 509 /* if config for dma */ 510 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 511 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { 512 if (mmci_dma_prep_data(host, data, nd)) 513 data->host_cookie = 0; 514 else 515 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 516 } 517} 518 519static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 520 int err) 521{ 522 struct mmci_host *host = mmc_priv(mmc); 523 struct mmc_data *data = mrq->data; 524 struct dma_chan *chan; 525 enum dma_data_direction dir; 526 527 if (!data) 528 return; 529 530 if (data->flags & MMC_DATA_READ) { 531 dir = DMA_FROM_DEVICE; 532 chan = host->dma_rx_channel; 533 } else { 534 dir = DMA_TO_DEVICE; 535 chan = host->dma_tx_channel; 536 } 537 538 539 /* if config for dma */ 540 if (chan) { 541 if (err) 542 dmaengine_terminate_all(chan); 543 if (data->host_cookie) 544 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 545 data->sg_len, dir); 546 mrq->data->host_cookie = 0; 547 } 548} 549 550#else 551/* Blank functions if the DMA engine is not available */ 552static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 553{ 554} 555static inline void mmci_dma_setup(struct mmci_host *host) 556{ 557} 558 559static inline void mmci_dma_release(struct mmci_host *host) 560{ 561} 562 563static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 564{ 565} 566 567static inline void mmci_dma_data_error(struct mmci_host *host) 568{ 569} 570 571static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 572{ 573 return -ENOSYS; 574} 575 576#define mmci_pre_request NULL 577#define mmci_post_request NULL 578 579#endif 580 581static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 582{ 583 struct variant_data *variant = host->variant; 584 unsigned int datactrl, timeout, irqmask; 585 unsigned long long clks; 586 void __iomem *base; 587 int blksz_bits; 588 589 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 590 data->blksz, data->blocks, data->flags); 591 592 host->data = data; 593 host->size = data->blksz * data->blocks; 594 data->bytes_xfered = 0; 595 596 clks = (unsigned long long)data->timeout_ns * host->cclk; 597 do_div(clks, 1000000000UL); 598 599 timeout = data->timeout_clks + (unsigned int)clks; 600 601 base = host->base; 602 writel(timeout, base + MMCIDATATIMER); 603 writel(host->size, base + MMCIDATALENGTH); 604 605 blksz_bits = ffs(data->blksz) - 1; 606 BUG_ON(1 << blksz_bits != data->blksz); 607 608 if (variant->blksz_datactrl16) 609 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 610 else 611 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 612 613 if (data->flags & MMC_DATA_READ) 614 datactrl |= MCI_DPSM_DIRECTION; 615 616 /* 617 * Attempt to use DMA operation mode, if this 618 * should fail, fall back to PIO mode 619 */ 620 if (!mmci_dma_start_data(host, datactrl)) 621 return; 622 623 /* IRQ mode, map the SG list for CPU reading/writing */ 624 mmci_init_sg(host, data); 625 626 if (data->flags & MMC_DATA_READ) { 627 irqmask = MCI_RXFIFOHALFFULLMASK; 628 629 /* 630 * If we have less than the fifo 'half-full' threshold to 631 * transfer, trigger a PIO interrupt as soon as any data 632 * is available. 633 */ 634 if (host->size < variant->fifohalfsize) 635 irqmask |= MCI_RXDATAAVLBLMASK; 636 } else { 637 /* 638 * We don't actually need to include "FIFO empty" here 639 * since its implicit in "FIFO half empty". 640 */ 641 irqmask = MCI_TXFIFOHALFEMPTYMASK; 642 } 643 644 /* The ST Micro variants has a special bit to enable SDIO */ 645 if (variant->sdio && host->mmc->card) 646 if (mmc_card_sdio(host->mmc->card)) 647 datactrl |= MCI_ST_DPSM_SDIOEN; 648 649 writel(datactrl, base + MMCIDATACTRL); 650 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 651 mmci_set_mask1(host, irqmask); 652} 653 654static void 655mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 656{ 657 void __iomem *base = host->base; 658 659 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 660 cmd->opcode, cmd->arg, cmd->flags); 661 662 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 663 writel(0, base + MMCICOMMAND); 664 udelay(1); 665 } 666 667 c |= cmd->opcode | MCI_CPSM_ENABLE; 668 if (cmd->flags & MMC_RSP_PRESENT) { 669 if (cmd->flags & MMC_RSP_136) 670 c |= MCI_CPSM_LONGRSP; 671 c |= MCI_CPSM_RESPONSE; 672 } 673 if (/*interrupt*/0) 674 c |= MCI_CPSM_INTERRUPT; 675 676 host->cmd = cmd; 677 678 writel(cmd->arg, base + MMCIARGUMENT); 679 writel(c, base + MMCICOMMAND); 680} 681 682static void 683mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 684 unsigned int status) 685{ 686 /* First check for errors */ 687 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 688 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 689 u32 remain, success; 690 691 /* Terminate the DMA transfer */ 692 if (dma_inprogress(host)) 693 mmci_dma_data_error(host); 694 695 /* 696 * Calculate how far we are into the transfer. Note that 697 * the data counter gives the number of bytes transferred 698 * on the MMC bus, not on the host side. On reads, this 699 * can be as much as a FIFO-worth of data ahead. This 700 * matters for FIFO overruns only. 701 */ 702 remain = readl(host->base + MMCIDATACNT); 703 success = data->blksz * data->blocks - remain; 704 705 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 706 status, success); 707 if (status & MCI_DATACRCFAIL) { 708 /* Last block was not successful */ 709 success -= 1; 710 data->error = -EILSEQ; 711 } else if (status & MCI_DATATIMEOUT) { 712 data->error = -ETIMEDOUT; 713 } else if (status & MCI_STARTBITERR) { 714 data->error = -ECOMM; 715 } else if (status & MCI_TXUNDERRUN) { 716 data->error = -EIO; 717 } else if (status & MCI_RXOVERRUN) { 718 if (success > host->variant->fifosize) 719 success -= host->variant->fifosize; 720 else 721 success = 0; 722 data->error = -EIO; 723 } 724 data->bytes_xfered = round_down(success, data->blksz); 725 } 726 727 if (status & MCI_DATABLOCKEND) 728 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 729 730 if (status & MCI_DATAEND || data->error) { 731 if (dma_inprogress(host)) 732 mmci_dma_unmap(host, data); 733 mmci_stop_data(host); 734 735 if (!data->error) 736 /* The error clause is handled above, success! */ 737 data->bytes_xfered = data->blksz * data->blocks; 738 739 if (!data->stop) { 740 mmci_request_end(host, data->mrq); 741 } else { 742 mmci_start_command(host, data->stop, 0); 743 } 744 } 745} 746 747static void 748mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 749 unsigned int status) 750{ 751 void __iomem *base = host->base; 752 753 host->cmd = NULL; 754 755 if (status & MCI_CMDTIMEOUT) { 756 cmd->error = -ETIMEDOUT; 757 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 758 cmd->error = -EILSEQ; 759 } else { 760 cmd->resp[0] = readl(base + MMCIRESPONSE0); 761 cmd->resp[1] = readl(base + MMCIRESPONSE1); 762 cmd->resp[2] = readl(base + MMCIRESPONSE2); 763 cmd->resp[3] = readl(base + MMCIRESPONSE3); 764 } 765 766 if (!cmd->data || cmd->error) { 767 if (host->data) { 768 /* Terminate the DMA transfer */ 769 if (dma_inprogress(host)) 770 mmci_dma_data_error(host); 771 mmci_stop_data(host); 772 } 773 mmci_request_end(host, cmd->mrq); 774 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 775 mmci_start_data(host, cmd->data); 776 } 777} 778 779static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 780{ 781 void __iomem *base = host->base; 782 char *ptr = buffer; 783 u32 status; 784 int host_remain = host->size; 785 786 do { 787 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 788 789 if (count > remain) 790 count = remain; 791 792 if (count <= 0) 793 break; 794 795 readsl(base + MMCIFIFO, ptr, count >> 2); 796 797 ptr += count; 798 remain -= count; 799 host_remain -= count; 800 801 if (remain == 0) 802 break; 803 804 status = readl(base + MMCISTATUS); 805 } while (status & MCI_RXDATAAVLBL); 806 807 return ptr - buffer; 808} 809 810static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 811{ 812 struct variant_data *variant = host->variant; 813 void __iomem *base = host->base; 814 char *ptr = buffer; 815 816 do { 817 unsigned int count, maxcnt; 818 819 maxcnt = status & MCI_TXFIFOEMPTY ? 820 variant->fifosize : variant->fifohalfsize; 821 count = min(remain, maxcnt); 822 823 /* 824 * The ST Micro variant for SDIO transfer sizes 825 * less then 8 bytes should have clock H/W flow 826 * control disabled. 827 */ 828 if (variant->sdio && 829 mmc_card_sdio(host->mmc->card)) { 830 if (count < 8) 831 writel(readl(host->base + MMCICLOCK) & 832 ~variant->clkreg_enable, 833 host->base + MMCICLOCK); 834 else 835 writel(readl(host->base + MMCICLOCK) | 836 variant->clkreg_enable, 837 host->base + MMCICLOCK); 838 } 839 840 /* 841 * SDIO especially may want to send something that is 842 * not divisible by 4 (as opposed to card sectors 843 * etc), and the FIFO only accept full 32-bit writes. 844 * So compensate by adding +3 on the count, a single 845 * byte become a 32bit write, 7 bytes will be two 846 * 32bit writes etc. 847 */ 848 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); 849 850 ptr += count; 851 remain -= count; 852 853 if (remain == 0) 854 break; 855 856 status = readl(base + MMCISTATUS); 857 } while (status & MCI_TXFIFOHALFEMPTY); 858 859 return ptr - buffer; 860} 861 862/* 863 * PIO data transfer IRQ handler. 864 */ 865static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 866{ 867 struct mmci_host *host = dev_id; 868 struct sg_mapping_iter *sg_miter = &host->sg_miter; 869 struct variant_data *variant = host->variant; 870 void __iomem *base = host->base; 871 unsigned long flags; 872 u32 status; 873 874 status = readl(base + MMCISTATUS); 875 876 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 877 878 local_irq_save(flags); 879 880 do { 881 unsigned int remain, len; 882 char *buffer; 883 884 /* 885 * For write, we only need to test the half-empty flag 886 * here - if the FIFO is completely empty, then by 887 * definition it is more than half empty. 888 * 889 * For read, check for data available. 890 */ 891 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 892 break; 893 894 if (!sg_miter_next(sg_miter)) 895 break; 896 897 buffer = sg_miter->addr; 898 remain = sg_miter->length; 899 900 len = 0; 901 if (status & MCI_RXACTIVE) 902 len = mmci_pio_read(host, buffer, remain); 903 if (status & MCI_TXACTIVE) 904 len = mmci_pio_write(host, buffer, remain, status); 905 906 sg_miter->consumed = len; 907 908 host->size -= len; 909 remain -= len; 910 911 if (remain) 912 break; 913 914 status = readl(base + MMCISTATUS); 915 } while (1); 916 917 sg_miter_stop(sg_miter); 918 919 local_irq_restore(flags); 920 921 /* 922 * If we have less than the fifo 'half-full' threshold to transfer, 923 * trigger a PIO interrupt as soon as any data is available. 924 */ 925 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 926 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 927 928 /* 929 * If we run out of data, disable the data IRQs; this 930 * prevents a race where the FIFO becomes empty before 931 * the chip itself has disabled the data path, and 932 * stops us racing with our data end IRQ. 933 */ 934 if (host->size == 0) { 935 mmci_set_mask1(host, 0); 936 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 937 } 938 939 return IRQ_HANDLED; 940} 941 942/* 943 * Handle completion of command and data transfers. 944 */ 945static irqreturn_t mmci_irq(int irq, void *dev_id) 946{ 947 struct mmci_host *host = dev_id; 948 u32 status; 949 int ret = 0; 950 951 spin_lock(&host->lock); 952 953 do { 954 struct mmc_command *cmd; 955 struct mmc_data *data; 956 957 status = readl(host->base + MMCISTATUS); 958 959 if (host->singleirq) { 960 if (status & readl(host->base + MMCIMASK1)) 961 mmci_pio_irq(irq, dev_id); 962 963 status &= ~MCI_IRQ1MASK; 964 } 965 966 status &= readl(host->base + MMCIMASK0); 967 writel(status, host->base + MMCICLEAR); 968 969 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 970 971 data = host->data; 972 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 973 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 974 MCI_DATABLOCKEND) && data) 975 mmci_data_irq(host, data, status); 976 977 cmd = host->cmd; 978 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 979 mmci_cmd_irq(host, cmd, status); 980 981 ret = 1; 982 } while (status); 983 984 spin_unlock(&host->lock); 985 986 return IRQ_RETVAL(ret); 987} 988 989static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 990{ 991 struct mmci_host *host = mmc_priv(mmc); 992 unsigned long flags; 993 994 WARN_ON(host->mrq != NULL); 995 996 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 997 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 998 mrq->data->blksz); 999 mrq->cmd->error = -EINVAL; 1000 mmc_request_done(mmc, mrq); 1001 return; 1002 } 1003 1004 pm_runtime_get_sync(mmc_dev(mmc)); 1005 1006 spin_lock_irqsave(&host->lock, flags); 1007 1008 host->mrq = mrq; 1009 1010 if (mrq->data) 1011 mmci_get_next_data(host, mrq->data); 1012 1013 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1014 mmci_start_data(host, mrq->data); 1015 1016 mmci_start_command(host, mrq->cmd, 0); 1017 1018 spin_unlock_irqrestore(&host->lock, flags); 1019} 1020 1021static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1022{ 1023 struct mmci_host *host = mmc_priv(mmc); 1024 struct variant_data *variant = host->variant; 1025 u32 pwr = 0; 1026 unsigned long flags; 1027 int ret; 1028 1029 if (host->plat->ios_handler && 1030 host->plat->ios_handler(mmc_dev(mmc), ios)) 1031 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1032 1033 switch (ios->power_mode) { 1034 case MMC_POWER_OFF: 1035 if (host->vcc) 1036 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 1037 break; 1038 case MMC_POWER_UP: 1039 if (host->vcc) { 1040 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 1041 if (ret) { 1042 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 1043 /* 1044 * The .set_ios() function in the mmc_host_ops 1045 * struct return void, and failing to set the 1046 * power should be rare so we print an error 1047 * and return here. 1048 */ 1049 return; 1050 } 1051 } 1052 /* 1053 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1054 * and instead uses MCI_PWR_ON so apply whatever value is 1055 * configured in the variant data. 1056 */ 1057 pwr |= variant->pwrreg_powerup; 1058 1059 break; 1060 case MMC_POWER_ON: 1061 pwr |= MCI_PWR_ON; 1062 break; 1063 } 1064 1065 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1066 /* 1067 * The ST Micro variant has some additional bits 1068 * indicating signal direction for the signals in 1069 * the SD/MMC bus and feedback-clock usage. 1070 */ 1071 pwr |= host->plat->sigdir; 1072 1073 if (ios->bus_width == MMC_BUS_WIDTH_4) 1074 pwr &= ~MCI_ST_DATA74DIREN; 1075 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1076 pwr &= (~MCI_ST_DATA74DIREN & 1077 ~MCI_ST_DATA31DIREN & 1078 ~MCI_ST_DATA2DIREN); 1079 } 1080 1081 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1082 if (host->hw_designer != AMBA_VENDOR_ST) 1083 pwr |= MCI_ROD; 1084 else { 1085 /* 1086 * The ST Micro variant use the ROD bit for something 1087 * else and only has OD (Open Drain). 1088 */ 1089 pwr |= MCI_OD; 1090 } 1091 } 1092 1093 spin_lock_irqsave(&host->lock, flags); 1094 1095 mmci_set_clkreg(host, ios->clock); 1096 1097 if (host->pwr != pwr) { 1098 host->pwr = pwr; 1099 writel(pwr, host->base + MMCIPOWER); 1100 } 1101 1102 spin_unlock_irqrestore(&host->lock, flags); 1103} 1104 1105static int mmci_get_ro(struct mmc_host *mmc) 1106{ 1107 struct mmci_host *host = mmc_priv(mmc); 1108 1109 if (host->gpio_wp == -ENOSYS) 1110 return -ENOSYS; 1111 1112 return gpio_get_value_cansleep(host->gpio_wp); 1113} 1114 1115static int mmci_get_cd(struct mmc_host *mmc) 1116{ 1117 struct mmci_host *host = mmc_priv(mmc); 1118 struct mmci_platform_data *plat = host->plat; 1119 unsigned int status; 1120 1121 if (host->gpio_cd == -ENOSYS) { 1122 if (!plat->status) 1123 return 1; /* Assume always present */ 1124 1125 status = plat->status(mmc_dev(host->mmc)); 1126 } else 1127 status = !!gpio_get_value_cansleep(host->gpio_cd) 1128 ^ plat->cd_invert; 1129 1130 /* 1131 * Use positive logic throughout - status is zero for no card, 1132 * non-zero for card inserted. 1133 */ 1134 return status; 1135} 1136 1137static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1138{ 1139 struct mmci_host *host = dev_id; 1140 1141 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1142 1143 return IRQ_HANDLED; 1144} 1145 1146static const struct mmc_host_ops mmci_ops = { 1147 .request = mmci_request, 1148 .pre_req = mmci_pre_request, 1149 .post_req = mmci_post_request, 1150 .set_ios = mmci_set_ios, 1151 .get_ro = mmci_get_ro, 1152 .get_cd = mmci_get_cd, 1153}; 1154 1155static int __devinit mmci_probe(struct amba_device *dev, 1156 const struct amba_id *id) 1157{ 1158 struct mmci_platform_data *plat = dev->dev.platform_data; 1159 struct variant_data *variant = id->data; 1160 struct mmci_host *host; 1161 struct mmc_host *mmc; 1162 int ret; 1163 1164 /* must have platform data */ 1165 if (!plat) { 1166 ret = -EINVAL; 1167 goto out; 1168 } 1169 1170 ret = amba_request_regions(dev, DRIVER_NAME); 1171 if (ret) 1172 goto out; 1173 1174 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1175 if (!mmc) { 1176 ret = -ENOMEM; 1177 goto rel_regions; 1178 } 1179 1180 host = mmc_priv(mmc); 1181 host->mmc = mmc; 1182 1183 host->gpio_wp = -ENOSYS; 1184 host->gpio_cd = -ENOSYS; 1185 host->gpio_cd_irq = -1; 1186 1187 host->hw_designer = amba_manf(dev); 1188 host->hw_revision = amba_rev(dev); 1189 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1190 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1191 1192 host->clk = clk_get(&dev->dev, NULL); 1193 if (IS_ERR(host->clk)) { 1194 ret = PTR_ERR(host->clk); 1195 host->clk = NULL; 1196 goto host_free; 1197 } 1198 1199 ret = clk_prepare(host->clk); 1200 if (ret) 1201 goto clk_free; 1202 1203 ret = clk_enable(host->clk); 1204 if (ret) 1205 goto clk_unprep; 1206 1207 host->plat = plat; 1208 host->variant = variant; 1209 host->mclk = clk_get_rate(host->clk); 1210 /* 1211 * According to the spec, mclk is max 100 MHz, 1212 * so we try to adjust the clock down to this, 1213 * (if possible). 1214 */ 1215 if (host->mclk > 100000000) { 1216 ret = clk_set_rate(host->clk, 100000000); 1217 if (ret < 0) 1218 goto clk_disable; 1219 host->mclk = clk_get_rate(host->clk); 1220 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1221 host->mclk); 1222 } 1223 host->phybase = dev->res.start; 1224 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1225 if (!host->base) { 1226 ret = -ENOMEM; 1227 goto clk_disable; 1228 } 1229 1230 mmc->ops = &mmci_ops; 1231 /* 1232 * The ARM and ST versions of the block have slightly different 1233 * clock divider equations which means that the minimum divider 1234 * differs too. 1235 */ 1236 if (variant->st_clkdiv) 1237 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1238 else 1239 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1240 /* 1241 * If the platform data supplies a maximum operating 1242 * frequency, this takes precedence. Else, we fall back 1243 * to using the module parameter, which has a (low) 1244 * default value in case it is not specified. Either 1245 * value must not exceed the clock rate into the block, 1246 * of course. 1247 */ 1248 if (plat->f_max) 1249 mmc->f_max = min(host->mclk, plat->f_max); 1250 else 1251 mmc->f_max = min(host->mclk, fmax); 1252 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1253 1254#ifdef CONFIG_REGULATOR 1255 /* If we're using the regulator framework, try to fetch a regulator */ 1256 host->vcc = regulator_get(&dev->dev, "vmmc"); 1257 if (IS_ERR(host->vcc)) 1258 host->vcc = NULL; 1259 else { 1260 int mask = mmc_regulator_get_ocrmask(host->vcc); 1261 1262 if (mask < 0) 1263 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1264 mask); 1265 else { 1266 host->mmc->ocr_avail = (u32) mask; 1267 if (plat->ocr_mask) 1268 dev_warn(&dev->dev, 1269 "Provided ocr_mask/setpower will not be used " 1270 "(using regulator instead)\n"); 1271 } 1272 } 1273#endif 1274 /* Fall back to platform data if no regulator is found */ 1275 if (host->vcc == NULL) 1276 mmc->ocr_avail = plat->ocr_mask; 1277 mmc->caps = plat->capabilities; 1278 mmc->caps2 = plat->capabilities2; 1279 1280 /* 1281 * We can do SGIO 1282 */ 1283 mmc->max_segs = NR_SG; 1284 1285 /* 1286 * Since only a certain number of bits are valid in the data length 1287 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1288 * single request. 1289 */ 1290 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1291 1292 /* 1293 * Set the maximum segment size. Since we aren't doing DMA 1294 * (yet) we are only limited by the data length register. 1295 */ 1296 mmc->max_seg_size = mmc->max_req_size; 1297 1298 /* 1299 * Block size can be up to 2048 bytes, but must be a power of two. 1300 */ 1301 mmc->max_blk_size = 2048; 1302 1303 /* 1304 * No limit on the number of blocks transferred. 1305 */ 1306 mmc->max_blk_count = mmc->max_req_size; 1307 1308 spin_lock_init(&host->lock); 1309 1310 writel(0, host->base + MMCIMASK0); 1311 writel(0, host->base + MMCIMASK1); 1312 writel(0xfff, host->base + MMCICLEAR); 1313 1314 if (gpio_is_valid(plat->gpio_cd)) { 1315 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1316 if (ret == 0) 1317 ret = gpio_direction_input(plat->gpio_cd); 1318 if (ret == 0) 1319 host->gpio_cd = plat->gpio_cd; 1320 else if (ret != -ENOSYS) 1321 goto err_gpio_cd; 1322 1323 /* 1324 * A gpio pin that will detect cards when inserted and removed 1325 * will most likely want to trigger on the edges if it is 1326 * 0 when ejected and 1 when inserted (or mutatis mutandis 1327 * for the inverted case) so we request triggers on both 1328 * edges. 1329 */ 1330 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1331 mmci_cd_irq, 1332 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1333 DRIVER_NAME " (cd)", host); 1334 if (ret >= 0) 1335 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1336 } 1337 if (gpio_is_valid(plat->gpio_wp)) { 1338 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1339 if (ret == 0) 1340 ret = gpio_direction_input(plat->gpio_wp); 1341 if (ret == 0) 1342 host->gpio_wp = plat->gpio_wp; 1343 else if (ret != -ENOSYS) 1344 goto err_gpio_wp; 1345 } 1346 1347 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1348 && host->gpio_cd_irq < 0) 1349 mmc->caps |= MMC_CAP_NEEDS_POLL; 1350 1351 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1352 if (ret) 1353 goto unmap; 1354 1355 if (dev->irq[1] == NO_IRQ) 1356 host->singleirq = true; 1357 else { 1358 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1359 DRIVER_NAME " (pio)", host); 1360 if (ret) 1361 goto irq0_free; 1362 } 1363 1364 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1365 1366 amba_set_drvdata(dev, mmc); 1367 1368 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1369 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1370 amba_rev(dev), (unsigned long long)dev->res.start, 1371 dev->irq[0], dev->irq[1]); 1372 1373 mmci_dma_setup(host); 1374 1375 pm_runtime_put(&dev->dev); 1376 1377 mmc_add_host(mmc); 1378 1379 return 0; 1380 1381 irq0_free: 1382 free_irq(dev->irq[0], host); 1383 unmap: 1384 if (host->gpio_wp != -ENOSYS) 1385 gpio_free(host->gpio_wp); 1386 err_gpio_wp: 1387 if (host->gpio_cd_irq >= 0) 1388 free_irq(host->gpio_cd_irq, host); 1389 if (host->gpio_cd != -ENOSYS) 1390 gpio_free(host->gpio_cd); 1391 err_gpio_cd: 1392 iounmap(host->base); 1393 clk_disable: 1394 clk_disable(host->clk); 1395 clk_unprep: 1396 clk_unprepare(host->clk); 1397 clk_free: 1398 clk_put(host->clk); 1399 host_free: 1400 mmc_free_host(mmc); 1401 rel_regions: 1402 amba_release_regions(dev); 1403 out: 1404 return ret; 1405} 1406 1407static int __devexit mmci_remove(struct amba_device *dev) 1408{ 1409 struct mmc_host *mmc = amba_get_drvdata(dev); 1410 1411 amba_set_drvdata(dev, NULL); 1412 1413 if (mmc) { 1414 struct mmci_host *host = mmc_priv(mmc); 1415 1416 /* 1417 * Undo pm_runtime_put() in probe. We use the _sync 1418 * version here so that we can access the primecell. 1419 */ 1420 pm_runtime_get_sync(&dev->dev); 1421 1422 mmc_remove_host(mmc); 1423 1424 writel(0, host->base + MMCIMASK0); 1425 writel(0, host->base + MMCIMASK1); 1426 1427 writel(0, host->base + MMCICOMMAND); 1428 writel(0, host->base + MMCIDATACTRL); 1429 1430 mmci_dma_release(host); 1431 free_irq(dev->irq[0], host); 1432 if (!host->singleirq) 1433 free_irq(dev->irq[1], host); 1434 1435 if (host->gpio_wp != -ENOSYS) 1436 gpio_free(host->gpio_wp); 1437 if (host->gpio_cd_irq >= 0) 1438 free_irq(host->gpio_cd_irq, host); 1439 if (host->gpio_cd != -ENOSYS) 1440 gpio_free(host->gpio_cd); 1441 1442 iounmap(host->base); 1443 clk_disable(host->clk); 1444 clk_unprepare(host->clk); 1445 clk_put(host->clk); 1446 1447 if (host->vcc) 1448 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1449 regulator_put(host->vcc); 1450 1451 mmc_free_host(mmc); 1452 1453 amba_release_regions(dev); 1454 } 1455 1456 return 0; 1457} 1458 1459#ifdef CONFIG_PM 1460static int mmci_suspend(struct amba_device *dev, pm_message_t state) 1461{ 1462 struct mmc_host *mmc = amba_get_drvdata(dev); 1463 int ret = 0; 1464 1465 if (mmc) { 1466 struct mmci_host *host = mmc_priv(mmc); 1467 1468 ret = mmc_suspend_host(mmc); 1469 if (ret == 0) 1470 writel(0, host->base + MMCIMASK0); 1471 } 1472 1473 return ret; 1474} 1475 1476static int mmci_resume(struct amba_device *dev) 1477{ 1478 struct mmc_host *mmc = amba_get_drvdata(dev); 1479 int ret = 0; 1480 1481 if (mmc) { 1482 struct mmci_host *host = mmc_priv(mmc); 1483 1484 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1485 1486 ret = mmc_resume_host(mmc); 1487 } 1488 1489 return ret; 1490} 1491#else 1492#define mmci_suspend NULL 1493#define mmci_resume NULL 1494#endif 1495 1496static struct amba_id mmci_ids[] = { 1497 { 1498 .id = 0x00041180, 1499 .mask = 0xff0fffff, 1500 .data = &variant_arm, 1501 }, 1502 { 1503 .id = 0x01041180, 1504 .mask = 0xff0fffff, 1505 .data = &variant_arm_extended_fifo, 1506 }, 1507 { 1508 .id = 0x00041181, 1509 .mask = 0x000fffff, 1510 .data = &variant_arm, 1511 }, 1512 /* ST Micro variants */ 1513 { 1514 .id = 0x00180180, 1515 .mask = 0x00ffffff, 1516 .data = &variant_u300, 1517 }, 1518 { 1519 .id = 0x00280180, 1520 .mask = 0x00ffffff, 1521 .data = &variant_u300, 1522 }, 1523 { 1524 .id = 0x00480180, 1525 .mask = 0xf0ffffff, 1526 .data = &variant_ux500, 1527 }, 1528 { 1529 .id = 0x10480180, 1530 .mask = 0xf0ffffff, 1531 .data = &variant_ux500v2, 1532 }, 1533 { 0, 0 }, 1534}; 1535 1536MODULE_DEVICE_TABLE(amba, mmci_ids); 1537 1538static struct amba_driver mmci_driver = { 1539 .drv = { 1540 .name = DRIVER_NAME, 1541 }, 1542 .probe = mmci_probe, 1543 .remove = __devexit_p(mmci_remove), 1544 .suspend = mmci_suspend, 1545 .resume = mmci_resume, 1546 .id_table = mmci_ids, 1547}; 1548 1549static int __init mmci_init(void) 1550{ 1551 return amba_driver_register(&mmci_driver); 1552} 1553 1554static void __exit mmci_exit(void) 1555{ 1556 amba_driver_unregister(&mmci_driver); 1557} 1558 1559module_init(mmci_init); 1560module_exit(mmci_exit); 1561module_param(fmax, uint, 0444); 1562 1563MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1564MODULE_LICENSE("GPL"); 1565