mmci.c revision 7437cfa532842ce75189826742bddf1ba137f58e
1/* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/module.h> 12#include <linux/moduleparam.h> 13#include <linux/init.h> 14#include <linux/ioport.h> 15#include <linux/device.h> 16#include <linux/interrupt.h> 17#include <linux/kernel.h> 18#include <linux/delay.h> 19#include <linux/err.h> 20#include <linux/highmem.h> 21#include <linux/log2.h> 22#include <linux/mmc/host.h> 23#include <linux/mmc/card.h> 24#include <linux/amba/bus.h> 25#include <linux/clk.h> 26#include <linux/scatterlist.h> 27#include <linux/gpio.h> 28#include <linux/regulator/consumer.h> 29#include <linux/dmaengine.h> 30#include <linux/dma-mapping.h> 31#include <linux/amba/mmci.h> 32#include <linux/pm_runtime.h> 33 34#include <asm/div64.h> 35#include <asm/io.h> 36#include <asm/sizes.h> 37 38#include "mmci.h" 39 40#define DRIVER_NAME "mmci-pl18x" 41 42static unsigned int fmax = 515633; 43 44/** 45 * struct variant_data - MMCI variant-specific quirks 46 * @clkreg: default value for MCICLOCK register 47 * @clkreg_enable: enable value for MMCICLOCK register 48 * @datalength_bits: number of bits in the MMCIDATALENGTH register 49 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 50 * is asserted (likewise for RX) 51 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 52 * is asserted (likewise for RX) 53 * @sdio: variant supports SDIO 54 * @st_clkdiv: true if using a ST-specific clock divider algorithm 55 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 56 * @pwrreg_powerup: power up value for MMCIPOWER register 57 * @signal_direction: input/out direction of bus signals can be indicated 58 */ 59struct variant_data { 60 unsigned int clkreg; 61 unsigned int clkreg_enable; 62 unsigned int datalength_bits; 63 unsigned int fifosize; 64 unsigned int fifohalfsize; 65 bool sdio; 66 bool st_clkdiv; 67 bool blksz_datactrl16; 68 u32 pwrreg_powerup; 69 bool signal_direction; 70}; 71 72static struct variant_data variant_arm = { 73 .fifosize = 16 * 4, 74 .fifohalfsize = 8 * 4, 75 .datalength_bits = 16, 76 .pwrreg_powerup = MCI_PWR_UP, 77}; 78 79static struct variant_data variant_arm_extended_fifo = { 80 .fifosize = 128 * 4, 81 .fifohalfsize = 64 * 4, 82 .datalength_bits = 16, 83 .pwrreg_powerup = MCI_PWR_UP, 84}; 85 86static struct variant_data variant_u300 = { 87 .fifosize = 16 * 4, 88 .fifohalfsize = 8 * 4, 89 .clkreg_enable = MCI_ST_U300_HWFCEN, 90 .datalength_bits = 16, 91 .sdio = true, 92 .pwrreg_powerup = MCI_PWR_ON, 93 .signal_direction = true, 94}; 95 96static struct variant_data variant_ux500 = { 97 .fifosize = 30 * 4, 98 .fifohalfsize = 8 * 4, 99 .clkreg = MCI_CLK_ENABLE, 100 .clkreg_enable = MCI_ST_UX500_HWFCEN, 101 .datalength_bits = 24, 102 .sdio = true, 103 .st_clkdiv = true, 104 .pwrreg_powerup = MCI_PWR_ON, 105 .signal_direction = true, 106}; 107 108static struct variant_data variant_ux500v2 = { 109 .fifosize = 30 * 4, 110 .fifohalfsize = 8 * 4, 111 .clkreg = MCI_CLK_ENABLE, 112 .clkreg_enable = MCI_ST_UX500_HWFCEN, 113 .datalength_bits = 24, 114 .sdio = true, 115 .st_clkdiv = true, 116 .blksz_datactrl16 = true, 117 .pwrreg_powerup = MCI_PWR_ON, 118 .signal_direction = true, 119}; 120 121/* 122 * This must be called with host->lock held 123 */ 124static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 125{ 126 if (host->clk_reg != clk) { 127 host->clk_reg = clk; 128 writel(clk, host->base + MMCICLOCK); 129 } 130} 131 132/* 133 * This must be called with host->lock held 134 */ 135static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 136{ 137 if (host->pwr_reg != pwr) { 138 host->pwr_reg = pwr; 139 writel(pwr, host->base + MMCIPOWER); 140 } 141} 142 143/* 144 * This must be called with host->lock held 145 */ 146static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 147{ 148 struct variant_data *variant = host->variant; 149 u32 clk = variant->clkreg; 150 151 if (desired) { 152 if (desired >= host->mclk) { 153 clk = MCI_CLK_BYPASS; 154 if (variant->st_clkdiv) 155 clk |= MCI_ST_UX500_NEG_EDGE; 156 host->cclk = host->mclk; 157 } else if (variant->st_clkdiv) { 158 /* 159 * DB8500 TRM says f = mclk / (clkdiv + 2) 160 * => clkdiv = (mclk / f) - 2 161 * Round the divider up so we don't exceed the max 162 * frequency 163 */ 164 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 165 if (clk >= 256) 166 clk = 255; 167 host->cclk = host->mclk / (clk + 2); 168 } else { 169 /* 170 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 171 * => clkdiv = mclk / (2 * f) - 1 172 */ 173 clk = host->mclk / (2 * desired) - 1; 174 if (clk >= 256) 175 clk = 255; 176 host->cclk = host->mclk / (2 * (clk + 1)); 177 } 178 179 clk |= variant->clkreg_enable; 180 clk |= MCI_CLK_ENABLE; 181 /* This hasn't proven to be worthwhile */ 182 /* clk |= MCI_CLK_PWRSAVE; */ 183 } 184 185 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 186 clk |= MCI_4BIT_BUS; 187 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 188 clk |= MCI_ST_8BIT_BUS; 189 190 mmci_write_clkreg(host, clk); 191} 192 193static void 194mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 195{ 196 writel(0, host->base + MMCICOMMAND); 197 198 BUG_ON(host->data); 199 200 host->mrq = NULL; 201 host->cmd = NULL; 202 203 mmc_request_done(host->mmc, mrq); 204 205 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 206 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 207} 208 209static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 210{ 211 void __iomem *base = host->base; 212 213 if (host->singleirq) { 214 unsigned int mask0 = readl(base + MMCIMASK0); 215 216 mask0 &= ~MCI_IRQ1MASK; 217 mask0 |= mask; 218 219 writel(mask0, base + MMCIMASK0); 220 } 221 222 writel(mask, base + MMCIMASK1); 223} 224 225static void mmci_stop_data(struct mmci_host *host) 226{ 227 writel(0, host->base + MMCIDATACTRL); 228 mmci_set_mask1(host, 0); 229 host->data = NULL; 230} 231 232static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 233{ 234 unsigned int flags = SG_MITER_ATOMIC; 235 236 if (data->flags & MMC_DATA_READ) 237 flags |= SG_MITER_TO_SG; 238 else 239 flags |= SG_MITER_FROM_SG; 240 241 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 242} 243 244/* 245 * All the DMA operation mode stuff goes inside this ifdef. 246 * This assumes that you have a generic DMA device interface, 247 * no custom DMA interfaces are supported. 248 */ 249#ifdef CONFIG_DMA_ENGINE 250static void __devinit mmci_dma_setup(struct mmci_host *host) 251{ 252 struct mmci_platform_data *plat = host->plat; 253 const char *rxname, *txname; 254 dma_cap_mask_t mask; 255 256 if (!plat || !plat->dma_filter) { 257 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 258 return; 259 } 260 261 /* initialize pre request cookie */ 262 host->next_data.cookie = 1; 263 264 /* Try to acquire a generic DMA engine slave channel */ 265 dma_cap_zero(mask); 266 dma_cap_set(DMA_SLAVE, mask); 267 268 /* 269 * If only an RX channel is specified, the driver will 270 * attempt to use it bidirectionally, however if it is 271 * is specified but cannot be located, DMA will be disabled. 272 */ 273 if (plat->dma_rx_param) { 274 host->dma_rx_channel = dma_request_channel(mask, 275 plat->dma_filter, 276 plat->dma_rx_param); 277 /* E.g if no DMA hardware is present */ 278 if (!host->dma_rx_channel) 279 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 280 } 281 282 if (plat->dma_tx_param) { 283 host->dma_tx_channel = dma_request_channel(mask, 284 plat->dma_filter, 285 plat->dma_tx_param); 286 if (!host->dma_tx_channel) 287 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 288 } else { 289 host->dma_tx_channel = host->dma_rx_channel; 290 } 291 292 if (host->dma_rx_channel) 293 rxname = dma_chan_name(host->dma_rx_channel); 294 else 295 rxname = "none"; 296 297 if (host->dma_tx_channel) 298 txname = dma_chan_name(host->dma_tx_channel); 299 else 300 txname = "none"; 301 302 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 303 rxname, txname); 304 305 /* 306 * Limit the maximum segment size in any SG entry according to 307 * the parameters of the DMA engine device. 308 */ 309 if (host->dma_tx_channel) { 310 struct device *dev = host->dma_tx_channel->device->dev; 311 unsigned int max_seg_size = dma_get_max_seg_size(dev); 312 313 if (max_seg_size < host->mmc->max_seg_size) 314 host->mmc->max_seg_size = max_seg_size; 315 } 316 if (host->dma_rx_channel) { 317 struct device *dev = host->dma_rx_channel->device->dev; 318 unsigned int max_seg_size = dma_get_max_seg_size(dev); 319 320 if (max_seg_size < host->mmc->max_seg_size) 321 host->mmc->max_seg_size = max_seg_size; 322 } 323} 324 325/* 326 * This is used in __devinit or __devexit so inline it 327 * so it can be discarded. 328 */ 329static inline void mmci_dma_release(struct mmci_host *host) 330{ 331 struct mmci_platform_data *plat = host->plat; 332 333 if (host->dma_rx_channel) 334 dma_release_channel(host->dma_rx_channel); 335 if (host->dma_tx_channel && plat->dma_tx_param) 336 dma_release_channel(host->dma_tx_channel); 337 host->dma_rx_channel = host->dma_tx_channel = NULL; 338} 339 340static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 341{ 342 struct dma_chan *chan = host->dma_current; 343 enum dma_data_direction dir; 344 u32 status; 345 int i; 346 347 /* Wait up to 1ms for the DMA to complete */ 348 for (i = 0; ; i++) { 349 status = readl(host->base + MMCISTATUS); 350 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 351 break; 352 udelay(10); 353 } 354 355 /* 356 * Check to see whether we still have some data left in the FIFO - 357 * this catches DMA controllers which are unable to monitor the 358 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 359 * contiguous buffers. On TX, we'll get a FIFO underrun error. 360 */ 361 if (status & MCI_RXDATAAVLBLMASK) { 362 dmaengine_terminate_all(chan); 363 if (!data->error) 364 data->error = -EIO; 365 } 366 367 if (data->flags & MMC_DATA_WRITE) { 368 dir = DMA_TO_DEVICE; 369 } else { 370 dir = DMA_FROM_DEVICE; 371 } 372 373 if (!data->host_cookie) 374 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 375 376 /* 377 * Use of DMA with scatter-gather is impossible. 378 * Give up with DMA and switch back to PIO mode. 379 */ 380 if (status & MCI_RXDATAAVLBLMASK) { 381 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 382 mmci_dma_release(host); 383 } 384} 385 386static void mmci_dma_data_error(struct mmci_host *host) 387{ 388 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 389 dmaengine_terminate_all(host->dma_current); 390} 391 392static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 393 struct mmci_host_next *next) 394{ 395 struct variant_data *variant = host->variant; 396 struct dma_slave_config conf = { 397 .src_addr = host->phybase + MMCIFIFO, 398 .dst_addr = host->phybase + MMCIFIFO, 399 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 400 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 401 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 402 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 403 }; 404 struct dma_chan *chan; 405 struct dma_device *device; 406 struct dma_async_tx_descriptor *desc; 407 enum dma_data_direction buffer_dirn; 408 int nr_sg; 409 410 /* Check if next job is already prepared */ 411 if (data->host_cookie && !next && 412 host->dma_current && host->dma_desc_current) 413 return 0; 414 415 if (!next) { 416 host->dma_current = NULL; 417 host->dma_desc_current = NULL; 418 } 419 420 if (data->flags & MMC_DATA_READ) { 421 conf.direction = DMA_DEV_TO_MEM; 422 buffer_dirn = DMA_FROM_DEVICE; 423 chan = host->dma_rx_channel; 424 } else { 425 conf.direction = DMA_MEM_TO_DEV; 426 buffer_dirn = DMA_TO_DEVICE; 427 chan = host->dma_tx_channel; 428 } 429 430 /* If there's no DMA channel, fall back to PIO */ 431 if (!chan) 432 return -EINVAL; 433 434 /* If less than or equal to the fifo size, don't bother with DMA */ 435 if (data->blksz * data->blocks <= variant->fifosize) 436 return -EINVAL; 437 438 device = chan->device; 439 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 440 if (nr_sg == 0) 441 return -EINVAL; 442 443 dmaengine_slave_config(chan, &conf); 444 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, 445 conf.direction, DMA_CTRL_ACK); 446 if (!desc) 447 goto unmap_exit; 448 449 if (next) { 450 next->dma_chan = chan; 451 next->dma_desc = desc; 452 } else { 453 host->dma_current = chan; 454 host->dma_desc_current = desc; 455 } 456 457 return 0; 458 459 unmap_exit: 460 if (!next) 461 dmaengine_terminate_all(chan); 462 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 463 return -ENOMEM; 464} 465 466static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 467{ 468 int ret; 469 struct mmc_data *data = host->data; 470 471 ret = mmci_dma_prep_data(host, host->data, NULL); 472 if (ret) 473 return ret; 474 475 /* Okay, go for it. */ 476 dev_vdbg(mmc_dev(host->mmc), 477 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 478 data->sg_len, data->blksz, data->blocks, data->flags); 479 dmaengine_submit(host->dma_desc_current); 480 dma_async_issue_pending(host->dma_current); 481 482 datactrl |= MCI_DPSM_DMAENABLE; 483 484 /* Trigger the DMA transfer */ 485 writel(datactrl, host->base + MMCIDATACTRL); 486 487 /* 488 * Let the MMCI say when the data is ended and it's time 489 * to fire next DMA request. When that happens, MMCI will 490 * call mmci_data_end() 491 */ 492 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 493 host->base + MMCIMASK0); 494 return 0; 495} 496 497static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 498{ 499 struct mmci_host_next *next = &host->next_data; 500 501 if (data->host_cookie && data->host_cookie != next->cookie) { 502 pr_warning("[%s] invalid cookie: data->host_cookie %d" 503 " host->next_data.cookie %d\n", 504 __func__, data->host_cookie, host->next_data.cookie); 505 data->host_cookie = 0; 506 } 507 508 if (!data->host_cookie) 509 return; 510 511 host->dma_desc_current = next->dma_desc; 512 host->dma_current = next->dma_chan; 513 514 next->dma_desc = NULL; 515 next->dma_chan = NULL; 516} 517 518static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 519 bool is_first_req) 520{ 521 struct mmci_host *host = mmc_priv(mmc); 522 struct mmc_data *data = mrq->data; 523 struct mmci_host_next *nd = &host->next_data; 524 525 if (!data) 526 return; 527 528 if (data->host_cookie) { 529 data->host_cookie = 0; 530 return; 531 } 532 533 /* if config for dma */ 534 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 535 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { 536 if (mmci_dma_prep_data(host, data, nd)) 537 data->host_cookie = 0; 538 else 539 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 540 } 541} 542 543static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 544 int err) 545{ 546 struct mmci_host *host = mmc_priv(mmc); 547 struct mmc_data *data = mrq->data; 548 struct dma_chan *chan; 549 enum dma_data_direction dir; 550 551 if (!data) 552 return; 553 554 if (data->flags & MMC_DATA_READ) { 555 dir = DMA_FROM_DEVICE; 556 chan = host->dma_rx_channel; 557 } else { 558 dir = DMA_TO_DEVICE; 559 chan = host->dma_tx_channel; 560 } 561 562 563 /* if config for dma */ 564 if (chan) { 565 if (err) 566 dmaengine_terminate_all(chan); 567 if (data->host_cookie) 568 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 569 data->sg_len, dir); 570 mrq->data->host_cookie = 0; 571 } 572} 573 574#else 575/* Blank functions if the DMA engine is not available */ 576static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 577{ 578} 579static inline void mmci_dma_setup(struct mmci_host *host) 580{ 581} 582 583static inline void mmci_dma_release(struct mmci_host *host) 584{ 585} 586 587static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 588{ 589} 590 591static inline void mmci_dma_data_error(struct mmci_host *host) 592{ 593} 594 595static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 596{ 597 return -ENOSYS; 598} 599 600#define mmci_pre_request NULL 601#define mmci_post_request NULL 602 603#endif 604 605static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 606{ 607 struct variant_data *variant = host->variant; 608 unsigned int datactrl, timeout, irqmask; 609 unsigned long long clks; 610 void __iomem *base; 611 int blksz_bits; 612 613 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 614 data->blksz, data->blocks, data->flags); 615 616 host->data = data; 617 host->size = data->blksz * data->blocks; 618 data->bytes_xfered = 0; 619 620 clks = (unsigned long long)data->timeout_ns * host->cclk; 621 do_div(clks, 1000000000UL); 622 623 timeout = data->timeout_clks + (unsigned int)clks; 624 625 base = host->base; 626 writel(timeout, base + MMCIDATATIMER); 627 writel(host->size, base + MMCIDATALENGTH); 628 629 blksz_bits = ffs(data->blksz) - 1; 630 BUG_ON(1 << blksz_bits != data->blksz); 631 632 if (variant->blksz_datactrl16) 633 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 634 else 635 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 636 637 if (data->flags & MMC_DATA_READ) 638 datactrl |= MCI_DPSM_DIRECTION; 639 640 /* The ST Micro variants has a special bit to enable SDIO */ 641 if (variant->sdio && host->mmc->card) 642 if (mmc_card_sdio(host->mmc->card)) 643 datactrl |= MCI_ST_DPSM_SDIOEN; 644 645 /* 646 * Attempt to use DMA operation mode, if this 647 * should fail, fall back to PIO mode 648 */ 649 if (!mmci_dma_start_data(host, datactrl)) 650 return; 651 652 /* IRQ mode, map the SG list for CPU reading/writing */ 653 mmci_init_sg(host, data); 654 655 if (data->flags & MMC_DATA_READ) { 656 irqmask = MCI_RXFIFOHALFFULLMASK; 657 658 /* 659 * If we have less than the fifo 'half-full' threshold to 660 * transfer, trigger a PIO interrupt as soon as any data 661 * is available. 662 */ 663 if (host->size < variant->fifohalfsize) 664 irqmask |= MCI_RXDATAAVLBLMASK; 665 } else { 666 /* 667 * We don't actually need to include "FIFO empty" here 668 * since its implicit in "FIFO half empty". 669 */ 670 irqmask = MCI_TXFIFOHALFEMPTYMASK; 671 } 672 673 writel(datactrl, base + MMCIDATACTRL); 674 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 675 mmci_set_mask1(host, irqmask); 676} 677 678static void 679mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 680{ 681 void __iomem *base = host->base; 682 683 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 684 cmd->opcode, cmd->arg, cmd->flags); 685 686 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 687 writel(0, base + MMCICOMMAND); 688 udelay(1); 689 } 690 691 c |= cmd->opcode | MCI_CPSM_ENABLE; 692 if (cmd->flags & MMC_RSP_PRESENT) { 693 if (cmd->flags & MMC_RSP_136) 694 c |= MCI_CPSM_LONGRSP; 695 c |= MCI_CPSM_RESPONSE; 696 } 697 if (/*interrupt*/0) 698 c |= MCI_CPSM_INTERRUPT; 699 700 host->cmd = cmd; 701 702 writel(cmd->arg, base + MMCIARGUMENT); 703 writel(c, base + MMCICOMMAND); 704} 705 706static void 707mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 708 unsigned int status) 709{ 710 /* First check for errors */ 711 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 712 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 713 u32 remain, success; 714 715 /* Terminate the DMA transfer */ 716 if (dma_inprogress(host)) 717 mmci_dma_data_error(host); 718 719 /* 720 * Calculate how far we are into the transfer. Note that 721 * the data counter gives the number of bytes transferred 722 * on the MMC bus, not on the host side. On reads, this 723 * can be as much as a FIFO-worth of data ahead. This 724 * matters for FIFO overruns only. 725 */ 726 remain = readl(host->base + MMCIDATACNT); 727 success = data->blksz * data->blocks - remain; 728 729 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 730 status, success); 731 if (status & MCI_DATACRCFAIL) { 732 /* Last block was not successful */ 733 success -= 1; 734 data->error = -EILSEQ; 735 } else if (status & MCI_DATATIMEOUT) { 736 data->error = -ETIMEDOUT; 737 } else if (status & MCI_STARTBITERR) { 738 data->error = -ECOMM; 739 } else if (status & MCI_TXUNDERRUN) { 740 data->error = -EIO; 741 } else if (status & MCI_RXOVERRUN) { 742 if (success > host->variant->fifosize) 743 success -= host->variant->fifosize; 744 else 745 success = 0; 746 data->error = -EIO; 747 } 748 data->bytes_xfered = round_down(success, data->blksz); 749 } 750 751 if (status & MCI_DATABLOCKEND) 752 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 753 754 if (status & MCI_DATAEND || data->error) { 755 if (dma_inprogress(host)) 756 mmci_dma_unmap(host, data); 757 mmci_stop_data(host); 758 759 if (!data->error) 760 /* The error clause is handled above, success! */ 761 data->bytes_xfered = data->blksz * data->blocks; 762 763 if (!data->stop) { 764 mmci_request_end(host, data->mrq); 765 } else { 766 mmci_start_command(host, data->stop, 0); 767 } 768 } 769} 770 771static void 772mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 773 unsigned int status) 774{ 775 void __iomem *base = host->base; 776 777 host->cmd = NULL; 778 779 if (status & MCI_CMDTIMEOUT) { 780 cmd->error = -ETIMEDOUT; 781 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 782 cmd->error = -EILSEQ; 783 } else { 784 cmd->resp[0] = readl(base + MMCIRESPONSE0); 785 cmd->resp[1] = readl(base + MMCIRESPONSE1); 786 cmd->resp[2] = readl(base + MMCIRESPONSE2); 787 cmd->resp[3] = readl(base + MMCIRESPONSE3); 788 } 789 790 if (!cmd->data || cmd->error) { 791 if (host->data) { 792 /* Terminate the DMA transfer */ 793 if (dma_inprogress(host)) 794 mmci_dma_data_error(host); 795 mmci_stop_data(host); 796 } 797 mmci_request_end(host, cmd->mrq); 798 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 799 mmci_start_data(host, cmd->data); 800 } 801} 802 803static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 804{ 805 void __iomem *base = host->base; 806 char *ptr = buffer; 807 u32 status; 808 int host_remain = host->size; 809 810 do { 811 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 812 813 if (count > remain) 814 count = remain; 815 816 if (count <= 0) 817 break; 818 819 /* 820 * SDIO especially may want to send something that is 821 * not divisible by 4 (as opposed to card sectors 822 * etc). Therefore make sure to always read the last bytes 823 * while only doing full 32-bit reads towards the FIFO. 824 */ 825 if (unlikely(count & 0x3)) { 826 if (count < 4) { 827 unsigned char buf[4]; 828 readsl(base + MMCIFIFO, buf, 1); 829 memcpy(ptr, buf, count); 830 } else { 831 readsl(base + MMCIFIFO, ptr, count >> 2); 832 count &= ~0x3; 833 } 834 } else { 835 readsl(base + MMCIFIFO, ptr, count >> 2); 836 } 837 838 ptr += count; 839 remain -= count; 840 host_remain -= count; 841 842 if (remain == 0) 843 break; 844 845 status = readl(base + MMCISTATUS); 846 } while (status & MCI_RXDATAAVLBL); 847 848 return ptr - buffer; 849} 850 851static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 852{ 853 struct variant_data *variant = host->variant; 854 void __iomem *base = host->base; 855 char *ptr = buffer; 856 857 do { 858 unsigned int count, maxcnt; 859 860 maxcnt = status & MCI_TXFIFOEMPTY ? 861 variant->fifosize : variant->fifohalfsize; 862 count = min(remain, maxcnt); 863 864 /* 865 * The ST Micro variant for SDIO transfer sizes 866 * less then 8 bytes should have clock H/W flow 867 * control disabled. 868 */ 869 if (variant->sdio && 870 mmc_card_sdio(host->mmc->card)) { 871 u32 clk; 872 if (count < 8) 873 clk = host->clk_reg & ~variant->clkreg_enable; 874 else 875 clk = host->clk_reg | variant->clkreg_enable; 876 877 mmci_write_clkreg(host, clk); 878 } 879 880 /* 881 * SDIO especially may want to send something that is 882 * not divisible by 4 (as opposed to card sectors 883 * etc), and the FIFO only accept full 32-bit writes. 884 * So compensate by adding +3 on the count, a single 885 * byte become a 32bit write, 7 bytes will be two 886 * 32bit writes etc. 887 */ 888 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); 889 890 ptr += count; 891 remain -= count; 892 893 if (remain == 0) 894 break; 895 896 status = readl(base + MMCISTATUS); 897 } while (status & MCI_TXFIFOHALFEMPTY); 898 899 return ptr - buffer; 900} 901 902/* 903 * PIO data transfer IRQ handler. 904 */ 905static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 906{ 907 struct mmci_host *host = dev_id; 908 struct sg_mapping_iter *sg_miter = &host->sg_miter; 909 struct variant_data *variant = host->variant; 910 void __iomem *base = host->base; 911 unsigned long flags; 912 u32 status; 913 914 status = readl(base + MMCISTATUS); 915 916 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 917 918 local_irq_save(flags); 919 920 do { 921 unsigned int remain, len; 922 char *buffer; 923 924 /* 925 * For write, we only need to test the half-empty flag 926 * here - if the FIFO is completely empty, then by 927 * definition it is more than half empty. 928 * 929 * For read, check for data available. 930 */ 931 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 932 break; 933 934 if (!sg_miter_next(sg_miter)) 935 break; 936 937 buffer = sg_miter->addr; 938 remain = sg_miter->length; 939 940 len = 0; 941 if (status & MCI_RXACTIVE) 942 len = mmci_pio_read(host, buffer, remain); 943 if (status & MCI_TXACTIVE) 944 len = mmci_pio_write(host, buffer, remain, status); 945 946 sg_miter->consumed = len; 947 948 host->size -= len; 949 remain -= len; 950 951 if (remain) 952 break; 953 954 status = readl(base + MMCISTATUS); 955 } while (1); 956 957 sg_miter_stop(sg_miter); 958 959 local_irq_restore(flags); 960 961 /* 962 * If we have less than the fifo 'half-full' threshold to transfer, 963 * trigger a PIO interrupt as soon as any data is available. 964 */ 965 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 966 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 967 968 /* 969 * If we run out of data, disable the data IRQs; this 970 * prevents a race where the FIFO becomes empty before 971 * the chip itself has disabled the data path, and 972 * stops us racing with our data end IRQ. 973 */ 974 if (host->size == 0) { 975 mmci_set_mask1(host, 0); 976 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 977 } 978 979 return IRQ_HANDLED; 980} 981 982/* 983 * Handle completion of command and data transfers. 984 */ 985static irqreturn_t mmci_irq(int irq, void *dev_id) 986{ 987 struct mmci_host *host = dev_id; 988 u32 status; 989 int ret = 0; 990 991 spin_lock(&host->lock); 992 993 do { 994 struct mmc_command *cmd; 995 struct mmc_data *data; 996 997 status = readl(host->base + MMCISTATUS); 998 999 if (host->singleirq) { 1000 if (status & readl(host->base + MMCIMASK1)) 1001 mmci_pio_irq(irq, dev_id); 1002 1003 status &= ~MCI_IRQ1MASK; 1004 } 1005 1006 status &= readl(host->base + MMCIMASK0); 1007 writel(status, host->base + MMCICLEAR); 1008 1009 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1010 1011 data = host->data; 1012 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 1013 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 1014 MCI_DATABLOCKEND) && data) 1015 mmci_data_irq(host, data, status); 1016 1017 cmd = host->cmd; 1018 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 1019 mmci_cmd_irq(host, cmd, status); 1020 1021 ret = 1; 1022 } while (status); 1023 1024 spin_unlock(&host->lock); 1025 1026 return IRQ_RETVAL(ret); 1027} 1028 1029static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1030{ 1031 struct mmci_host *host = mmc_priv(mmc); 1032 unsigned long flags; 1033 1034 WARN_ON(host->mrq != NULL); 1035 1036 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 1037 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 1038 mrq->data->blksz); 1039 mrq->cmd->error = -EINVAL; 1040 mmc_request_done(mmc, mrq); 1041 return; 1042 } 1043 1044 pm_runtime_get_sync(mmc_dev(mmc)); 1045 1046 spin_lock_irqsave(&host->lock, flags); 1047 1048 host->mrq = mrq; 1049 1050 if (mrq->data) 1051 mmci_get_next_data(host, mrq->data); 1052 1053 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1054 mmci_start_data(host, mrq->data); 1055 1056 mmci_start_command(host, mrq->cmd, 0); 1057 1058 spin_unlock_irqrestore(&host->lock, flags); 1059} 1060 1061static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1062{ 1063 struct mmci_host *host = mmc_priv(mmc); 1064 struct variant_data *variant = host->variant; 1065 u32 pwr = 0; 1066 unsigned long flags; 1067 int ret; 1068 1069 pm_runtime_get_sync(mmc_dev(mmc)); 1070 1071 if (host->plat->ios_handler && 1072 host->plat->ios_handler(mmc_dev(mmc), ios)) 1073 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1074 1075 switch (ios->power_mode) { 1076 case MMC_POWER_OFF: 1077 if (host->vcc) 1078 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 1079 break; 1080 case MMC_POWER_UP: 1081 if (host->vcc) { 1082 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 1083 if (ret) { 1084 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 1085 /* 1086 * The .set_ios() function in the mmc_host_ops 1087 * struct return void, and failing to set the 1088 * power should be rare so we print an error 1089 * and return here. 1090 */ 1091 goto out; 1092 } 1093 } 1094 /* 1095 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1096 * and instead uses MCI_PWR_ON so apply whatever value is 1097 * configured in the variant data. 1098 */ 1099 pwr |= variant->pwrreg_powerup; 1100 1101 break; 1102 case MMC_POWER_ON: 1103 pwr |= MCI_PWR_ON; 1104 break; 1105 } 1106 1107 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1108 /* 1109 * The ST Micro variant has some additional bits 1110 * indicating signal direction for the signals in 1111 * the SD/MMC bus and feedback-clock usage. 1112 */ 1113 pwr |= host->plat->sigdir; 1114 1115 if (ios->bus_width == MMC_BUS_WIDTH_4) 1116 pwr &= ~MCI_ST_DATA74DIREN; 1117 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1118 pwr &= (~MCI_ST_DATA74DIREN & 1119 ~MCI_ST_DATA31DIREN & 1120 ~MCI_ST_DATA2DIREN); 1121 } 1122 1123 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1124 if (host->hw_designer != AMBA_VENDOR_ST) 1125 pwr |= MCI_ROD; 1126 else { 1127 /* 1128 * The ST Micro variant use the ROD bit for something 1129 * else and only has OD (Open Drain). 1130 */ 1131 pwr |= MCI_OD; 1132 } 1133 } 1134 1135 spin_lock_irqsave(&host->lock, flags); 1136 1137 mmci_set_clkreg(host, ios->clock); 1138 mmci_write_pwrreg(host, pwr); 1139 1140 spin_unlock_irqrestore(&host->lock, flags); 1141 1142 out: 1143 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1144 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1145} 1146 1147static int mmci_get_ro(struct mmc_host *mmc) 1148{ 1149 struct mmci_host *host = mmc_priv(mmc); 1150 1151 if (host->gpio_wp == -ENOSYS) 1152 return -ENOSYS; 1153 1154 return gpio_get_value_cansleep(host->gpio_wp); 1155} 1156 1157static int mmci_get_cd(struct mmc_host *mmc) 1158{ 1159 struct mmci_host *host = mmc_priv(mmc); 1160 struct mmci_platform_data *plat = host->plat; 1161 unsigned int status; 1162 1163 if (host->gpio_cd == -ENOSYS) { 1164 if (!plat->status) 1165 return 1; /* Assume always present */ 1166 1167 status = plat->status(mmc_dev(host->mmc)); 1168 } else 1169 status = !!gpio_get_value_cansleep(host->gpio_cd) 1170 ^ plat->cd_invert; 1171 1172 /* 1173 * Use positive logic throughout - status is zero for no card, 1174 * non-zero for card inserted. 1175 */ 1176 return status; 1177} 1178 1179static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1180{ 1181 struct mmci_host *host = dev_id; 1182 1183 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1184 1185 return IRQ_HANDLED; 1186} 1187 1188static const struct mmc_host_ops mmci_ops = { 1189 .request = mmci_request, 1190 .pre_req = mmci_pre_request, 1191 .post_req = mmci_post_request, 1192 .set_ios = mmci_set_ios, 1193 .get_ro = mmci_get_ro, 1194 .get_cd = mmci_get_cd, 1195}; 1196 1197static int __devinit mmci_probe(struct amba_device *dev, 1198 const struct amba_id *id) 1199{ 1200 struct mmci_platform_data *plat = dev->dev.platform_data; 1201 struct variant_data *variant = id->data; 1202 struct mmci_host *host; 1203 struct mmc_host *mmc; 1204 int ret; 1205 1206 /* must have platform data */ 1207 if (!plat) { 1208 ret = -EINVAL; 1209 goto out; 1210 } 1211 1212 ret = amba_request_regions(dev, DRIVER_NAME); 1213 if (ret) 1214 goto out; 1215 1216 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1217 if (!mmc) { 1218 ret = -ENOMEM; 1219 goto rel_regions; 1220 } 1221 1222 host = mmc_priv(mmc); 1223 host->mmc = mmc; 1224 1225 host->gpio_wp = -ENOSYS; 1226 host->gpio_cd = -ENOSYS; 1227 host->gpio_cd_irq = -1; 1228 1229 host->hw_designer = amba_manf(dev); 1230 host->hw_revision = amba_rev(dev); 1231 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1232 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1233 1234 host->clk = clk_get(&dev->dev, NULL); 1235 if (IS_ERR(host->clk)) { 1236 ret = PTR_ERR(host->clk); 1237 host->clk = NULL; 1238 goto host_free; 1239 } 1240 1241 ret = clk_prepare(host->clk); 1242 if (ret) 1243 goto clk_free; 1244 1245 ret = clk_enable(host->clk); 1246 if (ret) 1247 goto clk_unprep; 1248 1249 host->plat = plat; 1250 host->variant = variant; 1251 host->mclk = clk_get_rate(host->clk); 1252 /* 1253 * According to the spec, mclk is max 100 MHz, 1254 * so we try to adjust the clock down to this, 1255 * (if possible). 1256 */ 1257 if (host->mclk > 100000000) { 1258 ret = clk_set_rate(host->clk, 100000000); 1259 if (ret < 0) 1260 goto clk_disable; 1261 host->mclk = clk_get_rate(host->clk); 1262 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1263 host->mclk); 1264 } 1265 host->phybase = dev->res.start; 1266 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1267 if (!host->base) { 1268 ret = -ENOMEM; 1269 goto clk_disable; 1270 } 1271 1272 mmc->ops = &mmci_ops; 1273 /* 1274 * The ARM and ST versions of the block have slightly different 1275 * clock divider equations which means that the minimum divider 1276 * differs too. 1277 */ 1278 if (variant->st_clkdiv) 1279 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1280 else 1281 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1282 /* 1283 * If the platform data supplies a maximum operating 1284 * frequency, this takes precedence. Else, we fall back 1285 * to using the module parameter, which has a (low) 1286 * default value in case it is not specified. Either 1287 * value must not exceed the clock rate into the block, 1288 * of course. 1289 */ 1290 if (plat->f_max) 1291 mmc->f_max = min(host->mclk, plat->f_max); 1292 else 1293 mmc->f_max = min(host->mclk, fmax); 1294 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1295 1296#ifdef CONFIG_REGULATOR 1297 /* If we're using the regulator framework, try to fetch a regulator */ 1298 host->vcc = regulator_get(&dev->dev, "vmmc"); 1299 if (IS_ERR(host->vcc)) 1300 host->vcc = NULL; 1301 else { 1302 int mask = mmc_regulator_get_ocrmask(host->vcc); 1303 1304 if (mask < 0) 1305 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1306 mask); 1307 else { 1308 host->mmc->ocr_avail = (u32) mask; 1309 if (plat->ocr_mask) 1310 dev_warn(&dev->dev, 1311 "Provided ocr_mask/setpower will not be used " 1312 "(using regulator instead)\n"); 1313 } 1314 } 1315#endif 1316 /* Fall back to platform data if no regulator is found */ 1317 if (host->vcc == NULL) 1318 mmc->ocr_avail = plat->ocr_mask; 1319 mmc->caps = plat->capabilities; 1320 mmc->caps2 = plat->capabilities2; 1321 1322 /* 1323 * We can do SGIO 1324 */ 1325 mmc->max_segs = NR_SG; 1326 1327 /* 1328 * Since only a certain number of bits are valid in the data length 1329 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1330 * single request. 1331 */ 1332 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1333 1334 /* 1335 * Set the maximum segment size. Since we aren't doing DMA 1336 * (yet) we are only limited by the data length register. 1337 */ 1338 mmc->max_seg_size = mmc->max_req_size; 1339 1340 /* 1341 * Block size can be up to 2048 bytes, but must be a power of two. 1342 */ 1343 mmc->max_blk_size = 2048; 1344 1345 /* 1346 * No limit on the number of blocks transferred. 1347 */ 1348 mmc->max_blk_count = mmc->max_req_size; 1349 1350 spin_lock_init(&host->lock); 1351 1352 writel(0, host->base + MMCIMASK0); 1353 writel(0, host->base + MMCIMASK1); 1354 writel(0xfff, host->base + MMCICLEAR); 1355 1356 if (gpio_is_valid(plat->gpio_cd)) { 1357 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1358 if (ret == 0) 1359 ret = gpio_direction_input(plat->gpio_cd); 1360 if (ret == 0) 1361 host->gpio_cd = plat->gpio_cd; 1362 else if (ret != -ENOSYS) 1363 goto err_gpio_cd; 1364 1365 /* 1366 * A gpio pin that will detect cards when inserted and removed 1367 * will most likely want to trigger on the edges if it is 1368 * 0 when ejected and 1 when inserted (or mutatis mutandis 1369 * for the inverted case) so we request triggers on both 1370 * edges. 1371 */ 1372 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1373 mmci_cd_irq, 1374 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1375 DRIVER_NAME " (cd)", host); 1376 if (ret >= 0) 1377 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1378 } 1379 if (gpio_is_valid(plat->gpio_wp)) { 1380 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1381 if (ret == 0) 1382 ret = gpio_direction_input(plat->gpio_wp); 1383 if (ret == 0) 1384 host->gpio_wp = plat->gpio_wp; 1385 else if (ret != -ENOSYS) 1386 goto err_gpio_wp; 1387 } 1388 1389 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1390 && host->gpio_cd_irq < 0) 1391 mmc->caps |= MMC_CAP_NEEDS_POLL; 1392 1393 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1394 if (ret) 1395 goto unmap; 1396 1397 if (dev->irq[1] == NO_IRQ) 1398 host->singleirq = true; 1399 else { 1400 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1401 DRIVER_NAME " (pio)", host); 1402 if (ret) 1403 goto irq0_free; 1404 } 1405 1406 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1407 1408 amba_set_drvdata(dev, mmc); 1409 1410 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1411 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1412 amba_rev(dev), (unsigned long long)dev->res.start, 1413 dev->irq[0], dev->irq[1]); 1414 1415 mmci_dma_setup(host); 1416 1417 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1418 pm_runtime_use_autosuspend(&dev->dev); 1419 pm_runtime_put(&dev->dev); 1420 1421 mmc_add_host(mmc); 1422 1423 return 0; 1424 1425 irq0_free: 1426 free_irq(dev->irq[0], host); 1427 unmap: 1428 if (host->gpio_wp != -ENOSYS) 1429 gpio_free(host->gpio_wp); 1430 err_gpio_wp: 1431 if (host->gpio_cd_irq >= 0) 1432 free_irq(host->gpio_cd_irq, host); 1433 if (host->gpio_cd != -ENOSYS) 1434 gpio_free(host->gpio_cd); 1435 err_gpio_cd: 1436 iounmap(host->base); 1437 clk_disable: 1438 clk_disable(host->clk); 1439 clk_unprep: 1440 clk_unprepare(host->clk); 1441 clk_free: 1442 clk_put(host->clk); 1443 host_free: 1444 mmc_free_host(mmc); 1445 rel_regions: 1446 amba_release_regions(dev); 1447 out: 1448 return ret; 1449} 1450 1451static int __devexit mmci_remove(struct amba_device *dev) 1452{ 1453 struct mmc_host *mmc = amba_get_drvdata(dev); 1454 1455 amba_set_drvdata(dev, NULL); 1456 1457 if (mmc) { 1458 struct mmci_host *host = mmc_priv(mmc); 1459 1460 /* 1461 * Undo pm_runtime_put() in probe. We use the _sync 1462 * version here so that we can access the primecell. 1463 */ 1464 pm_runtime_get_sync(&dev->dev); 1465 1466 mmc_remove_host(mmc); 1467 1468 writel(0, host->base + MMCIMASK0); 1469 writel(0, host->base + MMCIMASK1); 1470 1471 writel(0, host->base + MMCICOMMAND); 1472 writel(0, host->base + MMCIDATACTRL); 1473 1474 mmci_dma_release(host); 1475 free_irq(dev->irq[0], host); 1476 if (!host->singleirq) 1477 free_irq(dev->irq[1], host); 1478 1479 if (host->gpio_wp != -ENOSYS) 1480 gpio_free(host->gpio_wp); 1481 if (host->gpio_cd_irq >= 0) 1482 free_irq(host->gpio_cd_irq, host); 1483 if (host->gpio_cd != -ENOSYS) 1484 gpio_free(host->gpio_cd); 1485 1486 iounmap(host->base); 1487 clk_disable(host->clk); 1488 clk_unprepare(host->clk); 1489 clk_put(host->clk); 1490 1491 if (host->vcc) 1492 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1493 regulator_put(host->vcc); 1494 1495 mmc_free_host(mmc); 1496 1497 amba_release_regions(dev); 1498 } 1499 1500 return 0; 1501} 1502 1503#ifdef CONFIG_SUSPEND 1504static int mmci_suspend(struct device *dev) 1505{ 1506 struct amba_device *adev = to_amba_device(dev); 1507 struct mmc_host *mmc = amba_get_drvdata(adev); 1508 int ret = 0; 1509 1510 if (mmc) { 1511 struct mmci_host *host = mmc_priv(mmc); 1512 1513 ret = mmc_suspend_host(mmc); 1514 if (ret == 0) { 1515 pm_runtime_get_sync(dev); 1516 writel(0, host->base + MMCIMASK0); 1517 } 1518 } 1519 1520 return ret; 1521} 1522 1523static int mmci_resume(struct device *dev) 1524{ 1525 struct amba_device *adev = to_amba_device(dev); 1526 struct mmc_host *mmc = amba_get_drvdata(adev); 1527 int ret = 0; 1528 1529 if (mmc) { 1530 struct mmci_host *host = mmc_priv(mmc); 1531 1532 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1533 pm_runtime_put(dev); 1534 1535 ret = mmc_resume_host(mmc); 1536 } 1537 1538 return ret; 1539} 1540#endif 1541 1542static const struct dev_pm_ops mmci_dev_pm_ops = { 1543 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) 1544}; 1545 1546static struct amba_id mmci_ids[] = { 1547 { 1548 .id = 0x00041180, 1549 .mask = 0xff0fffff, 1550 .data = &variant_arm, 1551 }, 1552 { 1553 .id = 0x01041180, 1554 .mask = 0xff0fffff, 1555 .data = &variant_arm_extended_fifo, 1556 }, 1557 { 1558 .id = 0x00041181, 1559 .mask = 0x000fffff, 1560 .data = &variant_arm, 1561 }, 1562 /* ST Micro variants */ 1563 { 1564 .id = 0x00180180, 1565 .mask = 0x00ffffff, 1566 .data = &variant_u300, 1567 }, 1568 { 1569 .id = 0x00280180, 1570 .mask = 0x00ffffff, 1571 .data = &variant_u300, 1572 }, 1573 { 1574 .id = 0x00480180, 1575 .mask = 0xf0ffffff, 1576 .data = &variant_ux500, 1577 }, 1578 { 1579 .id = 0x10480180, 1580 .mask = 0xf0ffffff, 1581 .data = &variant_ux500v2, 1582 }, 1583 { 0, 0 }, 1584}; 1585 1586MODULE_DEVICE_TABLE(amba, mmci_ids); 1587 1588static struct amba_driver mmci_driver = { 1589 .drv = { 1590 .name = DRIVER_NAME, 1591 .pm = &mmci_dev_pm_ops, 1592 }, 1593 .probe = mmci_probe, 1594 .remove = __devexit_p(mmci_remove), 1595 .id_table = mmci_ids, 1596}; 1597 1598static int __init mmci_init(void) 1599{ 1600 return amba_driver_register(&mmci_driver); 1601} 1602 1603static void __exit mmci_exit(void) 1604{ 1605 amba_driver_unregister(&mmci_driver); 1606} 1607 1608module_init(mmci_init); 1609module_exit(mmci_exit); 1610module_param(fmax, uint, 0444); 1611 1612MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1613MODULE_LICENSE("GPL"); 1614