mmci.c revision 599c1d5c750ddf528c7c6d3cdc466708f0502e66
1/* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/module.h> 12#include <linux/moduleparam.h> 13#include <linux/init.h> 14#include <linux/ioport.h> 15#include <linux/device.h> 16#include <linux/interrupt.h> 17#include <linux/kernel.h> 18#include <linux/slab.h> 19#include <linux/delay.h> 20#include <linux/err.h> 21#include <linux/highmem.h> 22#include <linux/log2.h> 23#include <linux/mmc/pm.h> 24#include <linux/mmc/host.h> 25#include <linux/mmc/card.h> 26#include <linux/amba/bus.h> 27#include <linux/clk.h> 28#include <linux/scatterlist.h> 29#include <linux/gpio.h> 30#include <linux/of_gpio.h> 31#include <linux/regulator/consumer.h> 32#include <linux/dmaengine.h> 33#include <linux/dma-mapping.h> 34#include <linux/amba/mmci.h> 35#include <linux/pm_runtime.h> 36#include <linux/types.h> 37#include <linux/pinctrl/consumer.h> 38 39#include <asm/div64.h> 40#include <asm/io.h> 41#include <asm/sizes.h> 42 43#include "mmci.h" 44 45#define DRIVER_NAME "mmci-pl18x" 46 47static unsigned int fmax = 515633; 48 49/** 50 * struct variant_data - MMCI variant-specific quirks 51 * @clkreg: default value for MCICLOCK register 52 * @clkreg_enable: enable value for MMCICLOCK register 53 * @datalength_bits: number of bits in the MMCIDATALENGTH register 54 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 55 * is asserted (likewise for RX) 56 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 57 * is asserted (likewise for RX) 58 * @sdio: variant supports SDIO 59 * @st_clkdiv: true if using a ST-specific clock divider algorithm 60 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 61 * @pwrreg_powerup: power up value for MMCIPOWER register 62 * @signal_direction: input/out direction of bus signals can be indicated 63 */ 64struct variant_data { 65 unsigned int clkreg; 66 unsigned int clkreg_enable; 67 unsigned int datalength_bits; 68 unsigned int fifosize; 69 unsigned int fifohalfsize; 70 bool sdio; 71 bool st_clkdiv; 72 bool blksz_datactrl16; 73 u32 pwrreg_powerup; 74 bool signal_direction; 75}; 76 77static struct variant_data variant_arm = { 78 .fifosize = 16 * 4, 79 .fifohalfsize = 8 * 4, 80 .datalength_bits = 16, 81 .pwrreg_powerup = MCI_PWR_UP, 82}; 83 84static struct variant_data variant_arm_extended_fifo = { 85 .fifosize = 128 * 4, 86 .fifohalfsize = 64 * 4, 87 .datalength_bits = 16, 88 .pwrreg_powerup = MCI_PWR_UP, 89}; 90 91static struct variant_data variant_u300 = { 92 .fifosize = 16 * 4, 93 .fifohalfsize = 8 * 4, 94 .clkreg_enable = MCI_ST_U300_HWFCEN, 95 .datalength_bits = 16, 96 .sdio = true, 97 .pwrreg_powerup = MCI_PWR_ON, 98 .signal_direction = true, 99}; 100 101static struct variant_data variant_nomadik = { 102 .fifosize = 16 * 4, 103 .fifohalfsize = 8 * 4, 104 .clkreg = MCI_CLK_ENABLE, 105 .datalength_bits = 24, 106 .sdio = true, 107 .st_clkdiv = true, 108 .pwrreg_powerup = MCI_PWR_ON, 109 .signal_direction = true, 110}; 111 112static struct variant_data variant_ux500 = { 113 .fifosize = 30 * 4, 114 .fifohalfsize = 8 * 4, 115 .clkreg = MCI_CLK_ENABLE, 116 .clkreg_enable = MCI_ST_UX500_HWFCEN, 117 .datalength_bits = 24, 118 .sdio = true, 119 .st_clkdiv = true, 120 .pwrreg_powerup = MCI_PWR_ON, 121 .signal_direction = true, 122}; 123 124static struct variant_data variant_ux500v2 = { 125 .fifosize = 30 * 4, 126 .fifohalfsize = 8 * 4, 127 .clkreg = MCI_CLK_ENABLE, 128 .clkreg_enable = MCI_ST_UX500_HWFCEN, 129 .datalength_bits = 24, 130 .sdio = true, 131 .st_clkdiv = true, 132 .blksz_datactrl16 = true, 133 .pwrreg_powerup = MCI_PWR_ON, 134 .signal_direction = true, 135}; 136 137/* 138 * This must be called with host->lock held 139 */ 140static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 141{ 142 if (host->clk_reg != clk) { 143 host->clk_reg = clk; 144 writel(clk, host->base + MMCICLOCK); 145 } 146} 147 148/* 149 * This must be called with host->lock held 150 */ 151static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 152{ 153 if (host->pwr_reg != pwr) { 154 host->pwr_reg = pwr; 155 writel(pwr, host->base + MMCIPOWER); 156 } 157} 158 159/* 160 * This must be called with host->lock held 161 */ 162static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 163{ 164 struct variant_data *variant = host->variant; 165 u32 clk = variant->clkreg; 166 167 if (desired) { 168 if (desired >= host->mclk) { 169 clk = MCI_CLK_BYPASS; 170 if (variant->st_clkdiv) 171 clk |= MCI_ST_UX500_NEG_EDGE; 172 host->cclk = host->mclk; 173 } else if (variant->st_clkdiv) { 174 /* 175 * DB8500 TRM says f = mclk / (clkdiv + 2) 176 * => clkdiv = (mclk / f) - 2 177 * Round the divider up so we don't exceed the max 178 * frequency 179 */ 180 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 181 if (clk >= 256) 182 clk = 255; 183 host->cclk = host->mclk / (clk + 2); 184 } else { 185 /* 186 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 187 * => clkdiv = mclk / (2 * f) - 1 188 */ 189 clk = host->mclk / (2 * desired) - 1; 190 if (clk >= 256) 191 clk = 255; 192 host->cclk = host->mclk / (2 * (clk + 1)); 193 } 194 195 clk |= variant->clkreg_enable; 196 clk |= MCI_CLK_ENABLE; 197 /* This hasn't proven to be worthwhile */ 198 /* clk |= MCI_CLK_PWRSAVE; */ 199 } 200 201 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 202 clk |= MCI_4BIT_BUS; 203 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 204 clk |= MCI_ST_8BIT_BUS; 205 206 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 207 clk |= MCI_ST_UX500_NEG_EDGE; 208 209 mmci_write_clkreg(host, clk); 210} 211 212static void 213mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 214{ 215 writel(0, host->base + MMCICOMMAND); 216 217 BUG_ON(host->data); 218 219 host->mrq = NULL; 220 host->cmd = NULL; 221 222 mmc_request_done(host->mmc, mrq); 223 224 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 225 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 226} 227 228static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 229{ 230 void __iomem *base = host->base; 231 232 if (host->singleirq) { 233 unsigned int mask0 = readl(base + MMCIMASK0); 234 235 mask0 &= ~MCI_IRQ1MASK; 236 mask0 |= mask; 237 238 writel(mask0, base + MMCIMASK0); 239 } 240 241 writel(mask, base + MMCIMASK1); 242} 243 244static void mmci_stop_data(struct mmci_host *host) 245{ 246 writel(0, host->base + MMCIDATACTRL); 247 mmci_set_mask1(host, 0); 248 host->data = NULL; 249} 250 251static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 252{ 253 unsigned int flags = SG_MITER_ATOMIC; 254 255 if (data->flags & MMC_DATA_READ) 256 flags |= SG_MITER_TO_SG; 257 else 258 flags |= SG_MITER_FROM_SG; 259 260 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 261} 262 263/* 264 * All the DMA operation mode stuff goes inside this ifdef. 265 * This assumes that you have a generic DMA device interface, 266 * no custom DMA interfaces are supported. 267 */ 268#ifdef CONFIG_DMA_ENGINE 269static void mmci_dma_setup(struct mmci_host *host) 270{ 271 struct mmci_platform_data *plat = host->plat; 272 const char *rxname, *txname; 273 dma_cap_mask_t mask; 274 275 if (!plat || !plat->dma_filter) { 276 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 277 return; 278 } 279 280 /* initialize pre request cookie */ 281 host->next_data.cookie = 1; 282 283 /* Try to acquire a generic DMA engine slave channel */ 284 dma_cap_zero(mask); 285 dma_cap_set(DMA_SLAVE, mask); 286 287 /* 288 * If only an RX channel is specified, the driver will 289 * attempt to use it bidirectionally, however if it is 290 * is specified but cannot be located, DMA will be disabled. 291 */ 292 if (plat->dma_rx_param) { 293 host->dma_rx_channel = dma_request_channel(mask, 294 plat->dma_filter, 295 plat->dma_rx_param); 296 /* E.g if no DMA hardware is present */ 297 if (!host->dma_rx_channel) 298 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 299 } 300 301 if (plat->dma_tx_param) { 302 host->dma_tx_channel = dma_request_channel(mask, 303 plat->dma_filter, 304 plat->dma_tx_param); 305 if (!host->dma_tx_channel) 306 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 307 } else { 308 host->dma_tx_channel = host->dma_rx_channel; 309 } 310 311 if (host->dma_rx_channel) 312 rxname = dma_chan_name(host->dma_rx_channel); 313 else 314 rxname = "none"; 315 316 if (host->dma_tx_channel) 317 txname = dma_chan_name(host->dma_tx_channel); 318 else 319 txname = "none"; 320 321 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 322 rxname, txname); 323 324 /* 325 * Limit the maximum segment size in any SG entry according to 326 * the parameters of the DMA engine device. 327 */ 328 if (host->dma_tx_channel) { 329 struct device *dev = host->dma_tx_channel->device->dev; 330 unsigned int max_seg_size = dma_get_max_seg_size(dev); 331 332 if (max_seg_size < host->mmc->max_seg_size) 333 host->mmc->max_seg_size = max_seg_size; 334 } 335 if (host->dma_rx_channel) { 336 struct device *dev = host->dma_rx_channel->device->dev; 337 unsigned int max_seg_size = dma_get_max_seg_size(dev); 338 339 if (max_seg_size < host->mmc->max_seg_size) 340 host->mmc->max_seg_size = max_seg_size; 341 } 342} 343 344/* 345 * This is used in or so inline it 346 * so it can be discarded. 347 */ 348static inline void mmci_dma_release(struct mmci_host *host) 349{ 350 struct mmci_platform_data *plat = host->plat; 351 352 if (host->dma_rx_channel) 353 dma_release_channel(host->dma_rx_channel); 354 if (host->dma_tx_channel && plat->dma_tx_param) 355 dma_release_channel(host->dma_tx_channel); 356 host->dma_rx_channel = host->dma_tx_channel = NULL; 357} 358 359static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 360{ 361 struct dma_chan *chan = host->dma_current; 362 enum dma_data_direction dir; 363 u32 status; 364 int i; 365 366 /* Wait up to 1ms for the DMA to complete */ 367 for (i = 0; ; i++) { 368 status = readl(host->base + MMCISTATUS); 369 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 370 break; 371 udelay(10); 372 } 373 374 /* 375 * Check to see whether we still have some data left in the FIFO - 376 * this catches DMA controllers which are unable to monitor the 377 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 378 * contiguous buffers. On TX, we'll get a FIFO underrun error. 379 */ 380 if (status & MCI_RXDATAAVLBLMASK) { 381 dmaengine_terminate_all(chan); 382 if (!data->error) 383 data->error = -EIO; 384 } 385 386 if (data->flags & MMC_DATA_WRITE) { 387 dir = DMA_TO_DEVICE; 388 } else { 389 dir = DMA_FROM_DEVICE; 390 } 391 392 if (!data->host_cookie) 393 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 394 395 /* 396 * Use of DMA with scatter-gather is impossible. 397 * Give up with DMA and switch back to PIO mode. 398 */ 399 if (status & MCI_RXDATAAVLBLMASK) { 400 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 401 mmci_dma_release(host); 402 } 403} 404 405static void mmci_dma_data_error(struct mmci_host *host) 406{ 407 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 408 dmaengine_terminate_all(host->dma_current); 409} 410 411static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 412 struct mmci_host_next *next) 413{ 414 struct variant_data *variant = host->variant; 415 struct dma_slave_config conf = { 416 .src_addr = host->phybase + MMCIFIFO, 417 .dst_addr = host->phybase + MMCIFIFO, 418 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 419 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 420 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 421 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 422 .device_fc = false, 423 }; 424 struct dma_chan *chan; 425 struct dma_device *device; 426 struct dma_async_tx_descriptor *desc; 427 enum dma_data_direction buffer_dirn; 428 int nr_sg; 429 430 /* Check if next job is already prepared */ 431 if (data->host_cookie && !next && 432 host->dma_current && host->dma_desc_current) 433 return 0; 434 435 if (!next) { 436 host->dma_current = NULL; 437 host->dma_desc_current = NULL; 438 } 439 440 if (data->flags & MMC_DATA_READ) { 441 conf.direction = DMA_DEV_TO_MEM; 442 buffer_dirn = DMA_FROM_DEVICE; 443 chan = host->dma_rx_channel; 444 } else { 445 conf.direction = DMA_MEM_TO_DEV; 446 buffer_dirn = DMA_TO_DEVICE; 447 chan = host->dma_tx_channel; 448 } 449 450 /* If there's no DMA channel, fall back to PIO */ 451 if (!chan) 452 return -EINVAL; 453 454 /* If less than or equal to the fifo size, don't bother with DMA */ 455 if (data->blksz * data->blocks <= variant->fifosize) 456 return -EINVAL; 457 458 device = chan->device; 459 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 460 if (nr_sg == 0) 461 return -EINVAL; 462 463 dmaengine_slave_config(chan, &conf); 464 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 465 conf.direction, DMA_CTRL_ACK); 466 if (!desc) 467 goto unmap_exit; 468 469 if (next) { 470 next->dma_chan = chan; 471 next->dma_desc = desc; 472 } else { 473 host->dma_current = chan; 474 host->dma_desc_current = desc; 475 } 476 477 return 0; 478 479 unmap_exit: 480 if (!next) 481 dmaengine_terminate_all(chan); 482 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 483 return -ENOMEM; 484} 485 486static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 487{ 488 int ret; 489 struct mmc_data *data = host->data; 490 491 ret = mmci_dma_prep_data(host, host->data, NULL); 492 if (ret) 493 return ret; 494 495 /* Okay, go for it. */ 496 dev_vdbg(mmc_dev(host->mmc), 497 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 498 data->sg_len, data->blksz, data->blocks, data->flags); 499 dmaengine_submit(host->dma_desc_current); 500 dma_async_issue_pending(host->dma_current); 501 502 datactrl |= MCI_DPSM_DMAENABLE; 503 504 /* Trigger the DMA transfer */ 505 writel(datactrl, host->base + MMCIDATACTRL); 506 507 /* 508 * Let the MMCI say when the data is ended and it's time 509 * to fire next DMA request. When that happens, MMCI will 510 * call mmci_data_end() 511 */ 512 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 513 host->base + MMCIMASK0); 514 return 0; 515} 516 517static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 518{ 519 struct mmci_host_next *next = &host->next_data; 520 521 if (data->host_cookie && data->host_cookie != next->cookie) { 522 pr_warning("[%s] invalid cookie: data->host_cookie %d" 523 " host->next_data.cookie %d\n", 524 __func__, data->host_cookie, host->next_data.cookie); 525 data->host_cookie = 0; 526 } 527 528 if (!data->host_cookie) 529 return; 530 531 host->dma_desc_current = next->dma_desc; 532 host->dma_current = next->dma_chan; 533 534 next->dma_desc = NULL; 535 next->dma_chan = NULL; 536} 537 538static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 539 bool is_first_req) 540{ 541 struct mmci_host *host = mmc_priv(mmc); 542 struct mmc_data *data = mrq->data; 543 struct mmci_host_next *nd = &host->next_data; 544 545 if (!data) 546 return; 547 548 if (data->host_cookie) { 549 data->host_cookie = 0; 550 return; 551 } 552 553 /* if config for dma */ 554 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 555 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { 556 if (mmci_dma_prep_data(host, data, nd)) 557 data->host_cookie = 0; 558 else 559 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 560 } 561} 562 563static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 564 int err) 565{ 566 struct mmci_host *host = mmc_priv(mmc); 567 struct mmc_data *data = mrq->data; 568 struct dma_chan *chan; 569 enum dma_data_direction dir; 570 571 if (!data) 572 return; 573 574 if (data->flags & MMC_DATA_READ) { 575 dir = DMA_FROM_DEVICE; 576 chan = host->dma_rx_channel; 577 } else { 578 dir = DMA_TO_DEVICE; 579 chan = host->dma_tx_channel; 580 } 581 582 583 /* if config for dma */ 584 if (chan) { 585 if (err) 586 dmaengine_terminate_all(chan); 587 if (data->host_cookie) 588 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 589 data->sg_len, dir); 590 mrq->data->host_cookie = 0; 591 } 592} 593 594#else 595/* Blank functions if the DMA engine is not available */ 596static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 597{ 598} 599static inline void mmci_dma_setup(struct mmci_host *host) 600{ 601} 602 603static inline void mmci_dma_release(struct mmci_host *host) 604{ 605} 606 607static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 608{ 609} 610 611static inline void mmci_dma_data_error(struct mmci_host *host) 612{ 613} 614 615static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 616{ 617 return -ENOSYS; 618} 619 620#define mmci_pre_request NULL 621#define mmci_post_request NULL 622 623#endif 624 625static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 626{ 627 struct variant_data *variant = host->variant; 628 unsigned int datactrl, timeout, irqmask; 629 unsigned long long clks; 630 void __iomem *base; 631 int blksz_bits; 632 633 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 634 data->blksz, data->blocks, data->flags); 635 636 host->data = data; 637 host->size = data->blksz * data->blocks; 638 data->bytes_xfered = 0; 639 640 clks = (unsigned long long)data->timeout_ns * host->cclk; 641 do_div(clks, 1000000000UL); 642 643 timeout = data->timeout_clks + (unsigned int)clks; 644 645 base = host->base; 646 writel(timeout, base + MMCIDATATIMER); 647 writel(host->size, base + MMCIDATALENGTH); 648 649 blksz_bits = ffs(data->blksz) - 1; 650 BUG_ON(1 << blksz_bits != data->blksz); 651 652 if (variant->blksz_datactrl16) 653 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 654 else 655 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 656 657 if (data->flags & MMC_DATA_READ) 658 datactrl |= MCI_DPSM_DIRECTION; 659 660 /* The ST Micro variants has a special bit to enable SDIO */ 661 if (variant->sdio && host->mmc->card) 662 if (mmc_card_sdio(host->mmc->card)) { 663 /* 664 * The ST Micro variants has a special bit 665 * to enable SDIO. 666 */ 667 u32 clk; 668 669 datactrl |= MCI_ST_DPSM_SDIOEN; 670 671 /* 672 * The ST Micro variant for SDIO small write transfers 673 * needs to have clock H/W flow control disabled, 674 * otherwise the transfer will not start. The threshold 675 * depends on the rate of MCLK. 676 */ 677 if (data->flags & MMC_DATA_WRITE && 678 (host->size < 8 || 679 (host->size <= 8 && host->mclk > 50000000))) 680 clk = host->clk_reg & ~variant->clkreg_enable; 681 else 682 clk = host->clk_reg | variant->clkreg_enable; 683 684 mmci_write_clkreg(host, clk); 685 } 686 687 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 688 datactrl |= MCI_ST_DPSM_DDRMODE; 689 690 /* 691 * Attempt to use DMA operation mode, if this 692 * should fail, fall back to PIO mode 693 */ 694 if (!mmci_dma_start_data(host, datactrl)) 695 return; 696 697 /* IRQ mode, map the SG list for CPU reading/writing */ 698 mmci_init_sg(host, data); 699 700 if (data->flags & MMC_DATA_READ) { 701 irqmask = MCI_RXFIFOHALFFULLMASK; 702 703 /* 704 * If we have less than the fifo 'half-full' threshold to 705 * transfer, trigger a PIO interrupt as soon as any data 706 * is available. 707 */ 708 if (host->size < variant->fifohalfsize) 709 irqmask |= MCI_RXDATAAVLBLMASK; 710 } else { 711 /* 712 * We don't actually need to include "FIFO empty" here 713 * since its implicit in "FIFO half empty". 714 */ 715 irqmask = MCI_TXFIFOHALFEMPTYMASK; 716 } 717 718 writel(datactrl, base + MMCIDATACTRL); 719 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 720 mmci_set_mask1(host, irqmask); 721} 722 723static void 724mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 725{ 726 void __iomem *base = host->base; 727 728 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 729 cmd->opcode, cmd->arg, cmd->flags); 730 731 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 732 writel(0, base + MMCICOMMAND); 733 udelay(1); 734 } 735 736 c |= cmd->opcode | MCI_CPSM_ENABLE; 737 if (cmd->flags & MMC_RSP_PRESENT) { 738 if (cmd->flags & MMC_RSP_136) 739 c |= MCI_CPSM_LONGRSP; 740 c |= MCI_CPSM_RESPONSE; 741 } 742 if (/*interrupt*/0) 743 c |= MCI_CPSM_INTERRUPT; 744 745 host->cmd = cmd; 746 747 writel(cmd->arg, base + MMCIARGUMENT); 748 writel(c, base + MMCICOMMAND); 749} 750 751static void 752mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 753 unsigned int status) 754{ 755 /* First check for errors */ 756 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 757 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 758 u32 remain, success; 759 760 /* Terminate the DMA transfer */ 761 if (dma_inprogress(host)) 762 mmci_dma_data_error(host); 763 764 /* 765 * Calculate how far we are into the transfer. Note that 766 * the data counter gives the number of bytes transferred 767 * on the MMC bus, not on the host side. On reads, this 768 * can be as much as a FIFO-worth of data ahead. This 769 * matters for FIFO overruns only. 770 */ 771 remain = readl(host->base + MMCIDATACNT); 772 success = data->blksz * data->blocks - remain; 773 774 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 775 status, success); 776 if (status & MCI_DATACRCFAIL) { 777 /* Last block was not successful */ 778 success -= 1; 779 data->error = -EILSEQ; 780 } else if (status & MCI_DATATIMEOUT) { 781 data->error = -ETIMEDOUT; 782 } else if (status & MCI_STARTBITERR) { 783 data->error = -ECOMM; 784 } else if (status & MCI_TXUNDERRUN) { 785 data->error = -EIO; 786 } else if (status & MCI_RXOVERRUN) { 787 if (success > host->variant->fifosize) 788 success -= host->variant->fifosize; 789 else 790 success = 0; 791 data->error = -EIO; 792 } 793 data->bytes_xfered = round_down(success, data->blksz); 794 } 795 796 if (status & MCI_DATABLOCKEND) 797 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 798 799 if (status & MCI_DATAEND || data->error) { 800 if (dma_inprogress(host)) 801 mmci_dma_unmap(host, data); 802 mmci_stop_data(host); 803 804 if (!data->error) 805 /* The error clause is handled above, success! */ 806 data->bytes_xfered = data->blksz * data->blocks; 807 808 if (!data->stop) { 809 mmci_request_end(host, data->mrq); 810 } else { 811 mmci_start_command(host, data->stop, 0); 812 } 813 } 814} 815 816static void 817mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 818 unsigned int status) 819{ 820 void __iomem *base = host->base; 821 822 host->cmd = NULL; 823 824 if (status & MCI_CMDTIMEOUT) { 825 cmd->error = -ETIMEDOUT; 826 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 827 cmd->error = -EILSEQ; 828 } else { 829 cmd->resp[0] = readl(base + MMCIRESPONSE0); 830 cmd->resp[1] = readl(base + MMCIRESPONSE1); 831 cmd->resp[2] = readl(base + MMCIRESPONSE2); 832 cmd->resp[3] = readl(base + MMCIRESPONSE3); 833 } 834 835 if (!cmd->data || cmd->error) { 836 if (host->data) { 837 /* Terminate the DMA transfer */ 838 if (dma_inprogress(host)) 839 mmci_dma_data_error(host); 840 mmci_stop_data(host); 841 } 842 mmci_request_end(host, cmd->mrq); 843 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 844 mmci_start_data(host, cmd->data); 845 } 846} 847 848static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 849{ 850 void __iomem *base = host->base; 851 char *ptr = buffer; 852 u32 status; 853 int host_remain = host->size; 854 855 do { 856 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 857 858 if (count > remain) 859 count = remain; 860 861 if (count <= 0) 862 break; 863 864 /* 865 * SDIO especially may want to send something that is 866 * not divisible by 4 (as opposed to card sectors 867 * etc). Therefore make sure to always read the last bytes 868 * while only doing full 32-bit reads towards the FIFO. 869 */ 870 if (unlikely(count & 0x3)) { 871 if (count < 4) { 872 unsigned char buf[4]; 873 ioread32_rep(base + MMCIFIFO, buf, 1); 874 memcpy(ptr, buf, count); 875 } else { 876 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 877 count &= ~0x3; 878 } 879 } else { 880 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 881 } 882 883 ptr += count; 884 remain -= count; 885 host_remain -= count; 886 887 if (remain == 0) 888 break; 889 890 status = readl(base + MMCISTATUS); 891 } while (status & MCI_RXDATAAVLBL); 892 893 return ptr - buffer; 894} 895 896static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 897{ 898 struct variant_data *variant = host->variant; 899 void __iomem *base = host->base; 900 char *ptr = buffer; 901 902 do { 903 unsigned int count, maxcnt; 904 905 maxcnt = status & MCI_TXFIFOEMPTY ? 906 variant->fifosize : variant->fifohalfsize; 907 count = min(remain, maxcnt); 908 909 /* 910 * SDIO especially may want to send something that is 911 * not divisible by 4 (as opposed to card sectors 912 * etc), and the FIFO only accept full 32-bit writes. 913 * So compensate by adding +3 on the count, a single 914 * byte become a 32bit write, 7 bytes will be two 915 * 32bit writes etc. 916 */ 917 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2); 918 919 ptr += count; 920 remain -= count; 921 922 if (remain == 0) 923 break; 924 925 status = readl(base + MMCISTATUS); 926 } while (status & MCI_TXFIFOHALFEMPTY); 927 928 return ptr - buffer; 929} 930 931/* 932 * PIO data transfer IRQ handler. 933 */ 934static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 935{ 936 struct mmci_host *host = dev_id; 937 struct sg_mapping_iter *sg_miter = &host->sg_miter; 938 struct variant_data *variant = host->variant; 939 void __iomem *base = host->base; 940 unsigned long flags; 941 u32 status; 942 943 status = readl(base + MMCISTATUS); 944 945 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 946 947 local_irq_save(flags); 948 949 do { 950 unsigned int remain, len; 951 char *buffer; 952 953 /* 954 * For write, we only need to test the half-empty flag 955 * here - if the FIFO is completely empty, then by 956 * definition it is more than half empty. 957 * 958 * For read, check for data available. 959 */ 960 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 961 break; 962 963 if (!sg_miter_next(sg_miter)) 964 break; 965 966 buffer = sg_miter->addr; 967 remain = sg_miter->length; 968 969 len = 0; 970 if (status & MCI_RXACTIVE) 971 len = mmci_pio_read(host, buffer, remain); 972 if (status & MCI_TXACTIVE) 973 len = mmci_pio_write(host, buffer, remain, status); 974 975 sg_miter->consumed = len; 976 977 host->size -= len; 978 remain -= len; 979 980 if (remain) 981 break; 982 983 status = readl(base + MMCISTATUS); 984 } while (1); 985 986 sg_miter_stop(sg_miter); 987 988 local_irq_restore(flags); 989 990 /* 991 * If we have less than the fifo 'half-full' threshold to transfer, 992 * trigger a PIO interrupt as soon as any data is available. 993 */ 994 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 995 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 996 997 /* 998 * If we run out of data, disable the data IRQs; this 999 * prevents a race where the FIFO becomes empty before 1000 * the chip itself has disabled the data path, and 1001 * stops us racing with our data end IRQ. 1002 */ 1003 if (host->size == 0) { 1004 mmci_set_mask1(host, 0); 1005 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 1006 } 1007 1008 return IRQ_HANDLED; 1009} 1010 1011/* 1012 * Handle completion of command and data transfers. 1013 */ 1014static irqreturn_t mmci_irq(int irq, void *dev_id) 1015{ 1016 struct mmci_host *host = dev_id; 1017 u32 status; 1018 int ret = 0; 1019 1020 spin_lock(&host->lock); 1021 1022 do { 1023 struct mmc_command *cmd; 1024 struct mmc_data *data; 1025 1026 status = readl(host->base + MMCISTATUS); 1027 1028 if (host->singleirq) { 1029 if (status & readl(host->base + MMCIMASK1)) 1030 mmci_pio_irq(irq, dev_id); 1031 1032 status &= ~MCI_IRQ1MASK; 1033 } 1034 1035 status &= readl(host->base + MMCIMASK0); 1036 writel(status, host->base + MMCICLEAR); 1037 1038 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1039 1040 data = host->data; 1041 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 1042 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 1043 MCI_DATABLOCKEND) && data) 1044 mmci_data_irq(host, data, status); 1045 1046 cmd = host->cmd; 1047 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 1048 mmci_cmd_irq(host, cmd, status); 1049 1050 ret = 1; 1051 } while (status); 1052 1053 spin_unlock(&host->lock); 1054 1055 return IRQ_RETVAL(ret); 1056} 1057 1058static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1059{ 1060 struct mmci_host *host = mmc_priv(mmc); 1061 unsigned long flags; 1062 1063 WARN_ON(host->mrq != NULL); 1064 1065 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 1066 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 1067 mrq->data->blksz); 1068 mrq->cmd->error = -EINVAL; 1069 mmc_request_done(mmc, mrq); 1070 return; 1071 } 1072 1073 pm_runtime_get_sync(mmc_dev(mmc)); 1074 1075 spin_lock_irqsave(&host->lock, flags); 1076 1077 host->mrq = mrq; 1078 1079 if (mrq->data) 1080 mmci_get_next_data(host, mrq->data); 1081 1082 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1083 mmci_start_data(host, mrq->data); 1084 1085 mmci_start_command(host, mrq->cmd, 0); 1086 1087 spin_unlock_irqrestore(&host->lock, flags); 1088} 1089 1090static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1091{ 1092 struct mmci_host *host = mmc_priv(mmc); 1093 struct variant_data *variant = host->variant; 1094 u32 pwr = 0; 1095 unsigned long flags; 1096 1097 pm_runtime_get_sync(mmc_dev(mmc)); 1098 1099 if (host->plat->ios_handler && 1100 host->plat->ios_handler(mmc_dev(mmc), ios)) 1101 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1102 1103 switch (ios->power_mode) { 1104 case MMC_POWER_OFF: 1105 if (!IS_ERR(mmc->supply.vmmc)) 1106 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1107 break; 1108 case MMC_POWER_UP: 1109 if (!IS_ERR(mmc->supply.vmmc)) 1110 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 1111 1112 /* 1113 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1114 * and instead uses MCI_PWR_ON so apply whatever value is 1115 * configured in the variant data. 1116 */ 1117 pwr |= variant->pwrreg_powerup; 1118 1119 break; 1120 case MMC_POWER_ON: 1121 pwr |= MCI_PWR_ON; 1122 break; 1123 } 1124 1125 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1126 /* 1127 * The ST Micro variant has some additional bits 1128 * indicating signal direction for the signals in 1129 * the SD/MMC bus and feedback-clock usage. 1130 */ 1131 pwr |= host->plat->sigdir; 1132 1133 if (ios->bus_width == MMC_BUS_WIDTH_4) 1134 pwr &= ~MCI_ST_DATA74DIREN; 1135 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1136 pwr &= (~MCI_ST_DATA74DIREN & 1137 ~MCI_ST_DATA31DIREN & 1138 ~MCI_ST_DATA2DIREN); 1139 } 1140 1141 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1142 if (host->hw_designer != AMBA_VENDOR_ST) 1143 pwr |= MCI_ROD; 1144 else { 1145 /* 1146 * The ST Micro variant use the ROD bit for something 1147 * else and only has OD (Open Drain). 1148 */ 1149 pwr |= MCI_OD; 1150 } 1151 } 1152 1153 spin_lock_irqsave(&host->lock, flags); 1154 1155 mmci_set_clkreg(host, ios->clock); 1156 mmci_write_pwrreg(host, pwr); 1157 1158 spin_unlock_irqrestore(&host->lock, flags); 1159 1160 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1161 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1162} 1163 1164static int mmci_get_ro(struct mmc_host *mmc) 1165{ 1166 struct mmci_host *host = mmc_priv(mmc); 1167 1168 if (host->gpio_wp == -ENOSYS) 1169 return -ENOSYS; 1170 1171 return gpio_get_value_cansleep(host->gpio_wp); 1172} 1173 1174static int mmci_get_cd(struct mmc_host *mmc) 1175{ 1176 struct mmci_host *host = mmc_priv(mmc); 1177 struct mmci_platform_data *plat = host->plat; 1178 unsigned int status; 1179 1180 if (host->gpio_cd == -ENOSYS) { 1181 if (!plat->status) 1182 return 1; /* Assume always present */ 1183 1184 status = plat->status(mmc_dev(host->mmc)); 1185 } else 1186 status = !!gpio_get_value_cansleep(host->gpio_cd) 1187 ^ plat->cd_invert; 1188 1189 /* 1190 * Use positive logic throughout - status is zero for no card, 1191 * non-zero for card inserted. 1192 */ 1193 return status; 1194} 1195 1196static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1197{ 1198 struct mmci_host *host = dev_id; 1199 1200 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1201 1202 return IRQ_HANDLED; 1203} 1204 1205static const struct mmc_host_ops mmci_ops = { 1206 .request = mmci_request, 1207 .pre_req = mmci_pre_request, 1208 .post_req = mmci_post_request, 1209 .set_ios = mmci_set_ios, 1210 .get_ro = mmci_get_ro, 1211 .get_cd = mmci_get_cd, 1212}; 1213 1214#ifdef CONFIG_OF 1215static void mmci_dt_populate_generic_pdata(struct device_node *np, 1216 struct mmci_platform_data *pdata) 1217{ 1218 int bus_width = 0; 1219 1220 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); 1221 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); 1222 1223 if (of_get_property(np, "cd-inverted", NULL)) 1224 pdata->cd_invert = true; 1225 else 1226 pdata->cd_invert = false; 1227 1228 of_property_read_u32(np, "max-frequency", &pdata->f_max); 1229 if (!pdata->f_max) 1230 pr_warn("%s has no 'max-frequency' property\n", np->full_name); 1231 1232 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) 1233 pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED; 1234 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) 1235 pdata->capabilities |= MMC_CAP_SD_HIGHSPEED; 1236 1237 of_property_read_u32(np, "bus-width", &bus_width); 1238 switch (bus_width) { 1239 case 0 : 1240 /* No bus-width supplied. */ 1241 break; 1242 case 4 : 1243 pdata->capabilities |= MMC_CAP_4_BIT_DATA; 1244 break; 1245 case 8 : 1246 pdata->capabilities |= MMC_CAP_8_BIT_DATA; 1247 break; 1248 default : 1249 pr_warn("%s: Unsupported bus width\n", np->full_name); 1250 } 1251} 1252#else 1253static void mmci_dt_populate_generic_pdata(struct device_node *np, 1254 struct mmci_platform_data *pdata) 1255{ 1256 return; 1257} 1258#endif 1259 1260static int mmci_probe(struct amba_device *dev, 1261 const struct amba_id *id) 1262{ 1263 struct mmci_platform_data *plat = dev->dev.platform_data; 1264 struct device_node *np = dev->dev.of_node; 1265 struct variant_data *variant = id->data; 1266 struct mmci_host *host; 1267 struct mmc_host *mmc; 1268 int ret; 1269 1270 /* Must have platform data or Device Tree. */ 1271 if (!plat && !np) { 1272 dev_err(&dev->dev, "No plat data or DT found\n"); 1273 return -EINVAL; 1274 } 1275 1276 if (!plat) { 1277 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); 1278 if (!plat) 1279 return -ENOMEM; 1280 } 1281 1282 if (np) 1283 mmci_dt_populate_generic_pdata(np, plat); 1284 1285 ret = amba_request_regions(dev, DRIVER_NAME); 1286 if (ret) 1287 goto out; 1288 1289 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1290 if (!mmc) { 1291 ret = -ENOMEM; 1292 goto rel_regions; 1293 } 1294 1295 host = mmc_priv(mmc); 1296 host->mmc = mmc; 1297 1298 host->gpio_wp = -ENOSYS; 1299 host->gpio_cd = -ENOSYS; 1300 host->gpio_cd_irq = -1; 1301 1302 host->hw_designer = amba_manf(dev); 1303 host->hw_revision = amba_rev(dev); 1304 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1305 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1306 1307 host->clk = clk_get(&dev->dev, NULL); 1308 if (IS_ERR(host->clk)) { 1309 ret = PTR_ERR(host->clk); 1310 host->clk = NULL; 1311 goto host_free; 1312 } 1313 1314 ret = clk_prepare_enable(host->clk); 1315 if (ret) 1316 goto clk_free; 1317 1318 host->plat = plat; 1319 host->variant = variant; 1320 host->mclk = clk_get_rate(host->clk); 1321 /* 1322 * According to the spec, mclk is max 100 MHz, 1323 * so we try to adjust the clock down to this, 1324 * (if possible). 1325 */ 1326 if (host->mclk > 100000000) { 1327 ret = clk_set_rate(host->clk, 100000000); 1328 if (ret < 0) 1329 goto clk_disable; 1330 host->mclk = clk_get_rate(host->clk); 1331 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1332 host->mclk); 1333 } 1334 host->phybase = dev->res.start; 1335 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1336 if (!host->base) { 1337 ret = -ENOMEM; 1338 goto clk_disable; 1339 } 1340 1341 mmc->ops = &mmci_ops; 1342 /* 1343 * The ARM and ST versions of the block have slightly different 1344 * clock divider equations which means that the minimum divider 1345 * differs too. 1346 */ 1347 if (variant->st_clkdiv) 1348 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1349 else 1350 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1351 /* 1352 * If the platform data supplies a maximum operating 1353 * frequency, this takes precedence. Else, we fall back 1354 * to using the module parameter, which has a (low) 1355 * default value in case it is not specified. Either 1356 * value must not exceed the clock rate into the block, 1357 * of course. 1358 */ 1359 if (plat->f_max) 1360 mmc->f_max = min(host->mclk, plat->f_max); 1361 else 1362 mmc->f_max = min(host->mclk, fmax); 1363 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1364 1365 host->pinctrl = devm_pinctrl_get(&dev->dev); 1366 if (IS_ERR(host->pinctrl)) { 1367 ret = PTR_ERR(host->pinctrl); 1368 goto clk_disable; 1369 } 1370 1371 host->pins_default = pinctrl_lookup_state(host->pinctrl, 1372 PINCTRL_STATE_DEFAULT); 1373 1374 /* enable pins to be muxed in and configured */ 1375 if (!IS_ERR(host->pins_default)) { 1376 ret = pinctrl_select_state(host->pinctrl, host->pins_default); 1377 if (ret) 1378 dev_warn(&dev->dev, "could not set default pins\n"); 1379 } else 1380 dev_warn(&dev->dev, "could not get default pinstate\n"); 1381 1382 /* Get regulators and the supported OCR mask */ 1383 mmc_regulator_get_supply(mmc); 1384 if (!mmc->ocr_avail) 1385 mmc->ocr_avail = plat->ocr_mask; 1386 else if (plat->ocr_mask) 1387 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); 1388 1389 mmc->caps = plat->capabilities; 1390 mmc->caps2 = plat->capabilities2; 1391 1392 /* We support these PM capabilities. */ 1393 mmc->pm_caps = MMC_PM_KEEP_POWER; 1394 1395 /* 1396 * We can do SGIO 1397 */ 1398 mmc->max_segs = NR_SG; 1399 1400 /* 1401 * Since only a certain number of bits are valid in the data length 1402 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1403 * single request. 1404 */ 1405 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1406 1407 /* 1408 * Set the maximum segment size. Since we aren't doing DMA 1409 * (yet) we are only limited by the data length register. 1410 */ 1411 mmc->max_seg_size = mmc->max_req_size; 1412 1413 /* 1414 * Block size can be up to 2048 bytes, but must be a power of two. 1415 */ 1416 mmc->max_blk_size = 1 << 11; 1417 1418 /* 1419 * Limit the number of blocks transferred so that we don't overflow 1420 * the maximum request size. 1421 */ 1422 mmc->max_blk_count = mmc->max_req_size >> 11; 1423 1424 spin_lock_init(&host->lock); 1425 1426 writel(0, host->base + MMCIMASK0); 1427 writel(0, host->base + MMCIMASK1); 1428 writel(0xfff, host->base + MMCICLEAR); 1429 1430 if (plat->gpio_cd == -EPROBE_DEFER) { 1431 ret = -EPROBE_DEFER; 1432 goto err_gpio_cd; 1433 } 1434 if (gpio_is_valid(plat->gpio_cd)) { 1435 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1436 if (ret == 0) 1437 ret = gpio_direction_input(plat->gpio_cd); 1438 if (ret == 0) 1439 host->gpio_cd = plat->gpio_cd; 1440 else if (ret != -ENOSYS) 1441 goto err_gpio_cd; 1442 1443 /* 1444 * A gpio pin that will detect cards when inserted and removed 1445 * will most likely want to trigger on the edges if it is 1446 * 0 when ejected and 1 when inserted (or mutatis mutandis 1447 * for the inverted case) so we request triggers on both 1448 * edges. 1449 */ 1450 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1451 mmci_cd_irq, 1452 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1453 DRIVER_NAME " (cd)", host); 1454 if (ret >= 0) 1455 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1456 } 1457 if (plat->gpio_wp == -EPROBE_DEFER) { 1458 ret = -EPROBE_DEFER; 1459 goto err_gpio_wp; 1460 } 1461 if (gpio_is_valid(plat->gpio_wp)) { 1462 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1463 if (ret == 0) 1464 ret = gpio_direction_input(plat->gpio_wp); 1465 if (ret == 0) 1466 host->gpio_wp = plat->gpio_wp; 1467 else if (ret != -ENOSYS) 1468 goto err_gpio_wp; 1469 } 1470 1471 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1472 && host->gpio_cd_irq < 0) 1473 mmc->caps |= MMC_CAP_NEEDS_POLL; 1474 1475 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1476 if (ret) 1477 goto unmap; 1478 1479 if (!dev->irq[1]) 1480 host->singleirq = true; 1481 else { 1482 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1483 DRIVER_NAME " (pio)", host); 1484 if (ret) 1485 goto irq0_free; 1486 } 1487 1488 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1489 1490 amba_set_drvdata(dev, mmc); 1491 1492 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1493 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1494 amba_rev(dev), (unsigned long long)dev->res.start, 1495 dev->irq[0], dev->irq[1]); 1496 1497 mmci_dma_setup(host); 1498 1499 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1500 pm_runtime_use_autosuspend(&dev->dev); 1501 pm_runtime_put(&dev->dev); 1502 1503 mmc_add_host(mmc); 1504 1505 return 0; 1506 1507 irq0_free: 1508 free_irq(dev->irq[0], host); 1509 unmap: 1510 if (host->gpio_wp != -ENOSYS) 1511 gpio_free(host->gpio_wp); 1512 err_gpio_wp: 1513 if (host->gpio_cd_irq >= 0) 1514 free_irq(host->gpio_cd_irq, host); 1515 if (host->gpio_cd != -ENOSYS) 1516 gpio_free(host->gpio_cd); 1517 err_gpio_cd: 1518 iounmap(host->base); 1519 clk_disable: 1520 clk_disable_unprepare(host->clk); 1521 clk_free: 1522 clk_put(host->clk); 1523 host_free: 1524 mmc_free_host(mmc); 1525 rel_regions: 1526 amba_release_regions(dev); 1527 out: 1528 return ret; 1529} 1530 1531static int mmci_remove(struct amba_device *dev) 1532{ 1533 struct mmc_host *mmc = amba_get_drvdata(dev); 1534 1535 amba_set_drvdata(dev, NULL); 1536 1537 if (mmc) { 1538 struct mmci_host *host = mmc_priv(mmc); 1539 1540 /* 1541 * Undo pm_runtime_put() in probe. We use the _sync 1542 * version here so that we can access the primecell. 1543 */ 1544 pm_runtime_get_sync(&dev->dev); 1545 1546 mmc_remove_host(mmc); 1547 1548 writel(0, host->base + MMCIMASK0); 1549 writel(0, host->base + MMCIMASK1); 1550 1551 writel(0, host->base + MMCICOMMAND); 1552 writel(0, host->base + MMCIDATACTRL); 1553 1554 mmci_dma_release(host); 1555 free_irq(dev->irq[0], host); 1556 if (!host->singleirq) 1557 free_irq(dev->irq[1], host); 1558 1559 if (host->gpio_wp != -ENOSYS) 1560 gpio_free(host->gpio_wp); 1561 if (host->gpio_cd_irq >= 0) 1562 free_irq(host->gpio_cd_irq, host); 1563 if (host->gpio_cd != -ENOSYS) 1564 gpio_free(host->gpio_cd); 1565 1566 iounmap(host->base); 1567 clk_disable_unprepare(host->clk); 1568 clk_put(host->clk); 1569 1570 mmc_free_host(mmc); 1571 1572 amba_release_regions(dev); 1573 } 1574 1575 return 0; 1576} 1577 1578#ifdef CONFIG_SUSPEND 1579static int mmci_suspend(struct device *dev) 1580{ 1581 struct amba_device *adev = to_amba_device(dev); 1582 struct mmc_host *mmc = amba_get_drvdata(adev); 1583 int ret = 0; 1584 1585 if (mmc) { 1586 struct mmci_host *host = mmc_priv(mmc); 1587 1588 ret = mmc_suspend_host(mmc); 1589 if (ret == 0) { 1590 pm_runtime_get_sync(dev); 1591 writel(0, host->base + MMCIMASK0); 1592 } 1593 } 1594 1595 return ret; 1596} 1597 1598static int mmci_resume(struct device *dev) 1599{ 1600 struct amba_device *adev = to_amba_device(dev); 1601 struct mmc_host *mmc = amba_get_drvdata(adev); 1602 int ret = 0; 1603 1604 if (mmc) { 1605 struct mmci_host *host = mmc_priv(mmc); 1606 1607 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1608 pm_runtime_put(dev); 1609 1610 ret = mmc_resume_host(mmc); 1611 } 1612 1613 return ret; 1614} 1615#endif 1616 1617static const struct dev_pm_ops mmci_dev_pm_ops = { 1618 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) 1619}; 1620 1621static struct amba_id mmci_ids[] = { 1622 { 1623 .id = 0x00041180, 1624 .mask = 0xff0fffff, 1625 .data = &variant_arm, 1626 }, 1627 { 1628 .id = 0x01041180, 1629 .mask = 0xff0fffff, 1630 .data = &variant_arm_extended_fifo, 1631 }, 1632 { 1633 .id = 0x00041181, 1634 .mask = 0x000fffff, 1635 .data = &variant_arm, 1636 }, 1637 /* ST Micro variants */ 1638 { 1639 .id = 0x00180180, 1640 .mask = 0x00ffffff, 1641 .data = &variant_u300, 1642 }, 1643 { 1644 .id = 0x10180180, 1645 .mask = 0xf0ffffff, 1646 .data = &variant_nomadik, 1647 }, 1648 { 1649 .id = 0x00280180, 1650 .mask = 0x00ffffff, 1651 .data = &variant_u300, 1652 }, 1653 { 1654 .id = 0x00480180, 1655 .mask = 0xf0ffffff, 1656 .data = &variant_ux500, 1657 }, 1658 { 1659 .id = 0x10480180, 1660 .mask = 0xf0ffffff, 1661 .data = &variant_ux500v2, 1662 }, 1663 { 0, 0 }, 1664}; 1665 1666MODULE_DEVICE_TABLE(amba, mmci_ids); 1667 1668static struct amba_driver mmci_driver = { 1669 .drv = { 1670 .name = DRIVER_NAME, 1671 .pm = &mmci_dev_pm_ops, 1672 }, 1673 .probe = mmci_probe, 1674 .remove = mmci_remove, 1675 .id_table = mmci_ids, 1676}; 1677 1678module_amba_driver(mmci_driver); 1679 1680module_param(fmax, uint, 0444); 1681 1682MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1683MODULE_LICENSE("GPL"); 1684