mmci.c revision f829c04204de83aa0d13307d2a2dc07c0d9a94e3
1/* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/module.h> 12#include <linux/moduleparam.h> 13#include <linux/init.h> 14#include <linux/ioport.h> 15#include <linux/device.h> 16#include <linux/interrupt.h> 17#include <linux/kernel.h> 18#include <linux/slab.h> 19#include <linux/delay.h> 20#include <linux/err.h> 21#include <linux/highmem.h> 22#include <linux/log2.h> 23#include <linux/mmc/pm.h> 24#include <linux/mmc/host.h> 25#include <linux/mmc/card.h> 26#include <linux/amba/bus.h> 27#include <linux/clk.h> 28#include <linux/scatterlist.h> 29#include <linux/gpio.h> 30#include <linux/of_gpio.h> 31#include <linux/regulator/consumer.h> 32#include <linux/dmaengine.h> 33#include <linux/dma-mapping.h> 34#include <linux/amba/mmci.h> 35#include <linux/pm_runtime.h> 36#include <linux/types.h> 37#include <linux/pinctrl/consumer.h> 38 39#include <asm/div64.h> 40#include <asm/io.h> 41#include <asm/sizes.h> 42 43#include "mmci.h" 44 45#define DRIVER_NAME "mmci-pl18x" 46 47static unsigned int fmax = 515633; 48 49/** 50 * struct variant_data - MMCI variant-specific quirks 51 * @clkreg: default value for MCICLOCK register 52 * @clkreg_enable: enable value for MMCICLOCK register 53 * @datalength_bits: number of bits in the MMCIDATALENGTH register 54 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 55 * is asserted (likewise for RX) 56 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 57 * is asserted (likewise for RX) 58 * @sdio: variant supports SDIO 59 * @st_clkdiv: true if using a ST-specific clock divider algorithm 60 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 61 * @pwrreg_powerup: power up value for MMCIPOWER register 62 * @signal_direction: input/out direction of bus signals can be indicated 63 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock 64 * @busy_detect: true if busy detection on dat0 is supported 65 */ 66struct variant_data { 67 unsigned int clkreg; 68 unsigned int clkreg_enable; 69 unsigned int datalength_bits; 70 unsigned int fifosize; 71 unsigned int fifohalfsize; 72 bool sdio; 73 bool st_clkdiv; 74 bool blksz_datactrl16; 75 u32 pwrreg_powerup; 76 bool signal_direction; 77 bool pwrreg_clkgate; 78 bool busy_detect; 79}; 80 81static struct variant_data variant_arm = { 82 .fifosize = 16 * 4, 83 .fifohalfsize = 8 * 4, 84 .datalength_bits = 16, 85 .pwrreg_powerup = MCI_PWR_UP, 86}; 87 88static struct variant_data variant_arm_extended_fifo = { 89 .fifosize = 128 * 4, 90 .fifohalfsize = 64 * 4, 91 .datalength_bits = 16, 92 .pwrreg_powerup = MCI_PWR_UP, 93}; 94 95static struct variant_data variant_arm_extended_fifo_hwfc = { 96 .fifosize = 128 * 4, 97 .fifohalfsize = 64 * 4, 98 .clkreg_enable = MCI_ARM_HWFCEN, 99 .datalength_bits = 16, 100 .pwrreg_powerup = MCI_PWR_UP, 101}; 102 103static struct variant_data variant_u300 = { 104 .fifosize = 16 * 4, 105 .fifohalfsize = 8 * 4, 106 .clkreg_enable = MCI_ST_U300_HWFCEN, 107 .datalength_bits = 16, 108 .sdio = true, 109 .pwrreg_powerup = MCI_PWR_ON, 110 .signal_direction = true, 111 .pwrreg_clkgate = true, 112}; 113 114static struct variant_data variant_nomadik = { 115 .fifosize = 16 * 4, 116 .fifohalfsize = 8 * 4, 117 .clkreg = MCI_CLK_ENABLE, 118 .datalength_bits = 24, 119 .sdio = true, 120 .st_clkdiv = true, 121 .pwrreg_powerup = MCI_PWR_ON, 122 .signal_direction = true, 123 .pwrreg_clkgate = true, 124}; 125 126static struct variant_data variant_ux500 = { 127 .fifosize = 30 * 4, 128 .fifohalfsize = 8 * 4, 129 .clkreg = MCI_CLK_ENABLE, 130 .clkreg_enable = MCI_ST_UX500_HWFCEN, 131 .datalength_bits = 24, 132 .sdio = true, 133 .st_clkdiv = true, 134 .pwrreg_powerup = MCI_PWR_ON, 135 .signal_direction = true, 136 .pwrreg_clkgate = true, 137 .busy_detect = true, 138}; 139 140static struct variant_data variant_ux500v2 = { 141 .fifosize = 30 * 4, 142 .fifohalfsize = 8 * 4, 143 .clkreg = MCI_CLK_ENABLE, 144 .clkreg_enable = MCI_ST_UX500_HWFCEN, 145 .datalength_bits = 24, 146 .sdio = true, 147 .st_clkdiv = true, 148 .blksz_datactrl16 = true, 149 .pwrreg_powerup = MCI_PWR_ON, 150 .signal_direction = true, 151 .pwrreg_clkgate = true, 152 .busy_detect = true, 153}; 154 155static int mmci_card_busy(struct mmc_host *mmc) 156{ 157 struct mmci_host *host = mmc_priv(mmc); 158 unsigned long flags; 159 int busy = 0; 160 161 pm_runtime_get_sync(mmc_dev(mmc)); 162 163 spin_lock_irqsave(&host->lock, flags); 164 if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY) 165 busy = 1; 166 spin_unlock_irqrestore(&host->lock, flags); 167 168 pm_runtime_mark_last_busy(mmc_dev(mmc)); 169 pm_runtime_put_autosuspend(mmc_dev(mmc)); 170 171 return busy; 172} 173 174/* 175 * Validate mmc prerequisites 176 */ 177static int mmci_validate_data(struct mmci_host *host, 178 struct mmc_data *data) 179{ 180 if (!data) 181 return 0; 182 183 if (!is_power_of_2(data->blksz)) { 184 dev_err(mmc_dev(host->mmc), 185 "unsupported block size (%d bytes)\n", data->blksz); 186 return -EINVAL; 187 } 188 189 return 0; 190} 191 192static void mmci_reg_delay(struct mmci_host *host) 193{ 194 /* 195 * According to the spec, at least three feedback clock cycles 196 * of max 52 MHz must pass between two writes to the MMCICLOCK reg. 197 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes. 198 * Worst delay time during card init is at 100 kHz => 30 us. 199 * Worst delay time when up and running is at 25 MHz => 120 ns. 200 */ 201 if (host->cclk < 25000000) 202 udelay(30); 203 else 204 ndelay(120); 205} 206 207/* 208 * This must be called with host->lock held 209 */ 210static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 211{ 212 if (host->clk_reg != clk) { 213 host->clk_reg = clk; 214 writel(clk, host->base + MMCICLOCK); 215 } 216} 217 218/* 219 * This must be called with host->lock held 220 */ 221static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 222{ 223 if (host->pwr_reg != pwr) { 224 host->pwr_reg = pwr; 225 writel(pwr, host->base + MMCIPOWER); 226 } 227} 228 229/* 230 * This must be called with host->lock held 231 */ 232static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl) 233{ 234 /* Keep ST Micro busy mode if enabled */ 235 datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE; 236 237 if (host->datactrl_reg != datactrl) { 238 host->datactrl_reg = datactrl; 239 writel(datactrl, host->base + MMCIDATACTRL); 240 } 241} 242 243/* 244 * This must be called with host->lock held 245 */ 246static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 247{ 248 struct variant_data *variant = host->variant; 249 u32 clk = variant->clkreg; 250 251 /* Make sure cclk reflects the current calculated clock */ 252 host->cclk = 0; 253 254 if (desired) { 255 if (desired >= host->mclk) { 256 clk = MCI_CLK_BYPASS; 257 if (variant->st_clkdiv) 258 clk |= MCI_ST_UX500_NEG_EDGE; 259 host->cclk = host->mclk; 260 } else if (variant->st_clkdiv) { 261 /* 262 * DB8500 TRM says f = mclk / (clkdiv + 2) 263 * => clkdiv = (mclk / f) - 2 264 * Round the divider up so we don't exceed the max 265 * frequency 266 */ 267 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 268 if (clk >= 256) 269 clk = 255; 270 host->cclk = host->mclk / (clk + 2); 271 } else { 272 /* 273 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 274 * => clkdiv = mclk / (2 * f) - 1 275 */ 276 clk = host->mclk / (2 * desired) - 1; 277 if (clk >= 256) 278 clk = 255; 279 host->cclk = host->mclk / (2 * (clk + 1)); 280 } 281 282 clk |= variant->clkreg_enable; 283 clk |= MCI_CLK_ENABLE; 284 /* This hasn't proven to be worthwhile */ 285 /* clk |= MCI_CLK_PWRSAVE; */ 286 } 287 288 /* Set actual clock for debug */ 289 host->mmc->actual_clock = host->cclk; 290 291 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 292 clk |= MCI_4BIT_BUS; 293 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 294 clk |= MCI_ST_8BIT_BUS; 295 296 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 297 clk |= MCI_ST_UX500_NEG_EDGE; 298 299 mmci_write_clkreg(host, clk); 300} 301 302static void 303mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 304{ 305 writel(0, host->base + MMCICOMMAND); 306 307 BUG_ON(host->data); 308 309 host->mrq = NULL; 310 host->cmd = NULL; 311 312 mmc_request_done(host->mmc, mrq); 313 314 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 315 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 316} 317 318static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 319{ 320 void __iomem *base = host->base; 321 322 if (host->singleirq) { 323 unsigned int mask0 = readl(base + MMCIMASK0); 324 325 mask0 &= ~MCI_IRQ1MASK; 326 mask0 |= mask; 327 328 writel(mask0, base + MMCIMASK0); 329 } 330 331 writel(mask, base + MMCIMASK1); 332} 333 334static void mmci_stop_data(struct mmci_host *host) 335{ 336 mmci_write_datactrlreg(host, 0); 337 mmci_set_mask1(host, 0); 338 host->data = NULL; 339} 340 341static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 342{ 343 unsigned int flags = SG_MITER_ATOMIC; 344 345 if (data->flags & MMC_DATA_READ) 346 flags |= SG_MITER_TO_SG; 347 else 348 flags |= SG_MITER_FROM_SG; 349 350 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 351} 352 353/* 354 * All the DMA operation mode stuff goes inside this ifdef. 355 * This assumes that you have a generic DMA device interface, 356 * no custom DMA interfaces are supported. 357 */ 358#ifdef CONFIG_DMA_ENGINE 359static void mmci_dma_setup(struct mmci_host *host) 360{ 361 struct mmci_platform_data *plat = host->plat; 362 const char *rxname, *txname; 363 dma_cap_mask_t mask; 364 365 host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); 366 host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); 367 368 /* initialize pre request cookie */ 369 host->next_data.cookie = 1; 370 371 /* Try to acquire a generic DMA engine slave channel */ 372 dma_cap_zero(mask); 373 dma_cap_set(DMA_SLAVE, mask); 374 375 if (plat && plat->dma_filter) { 376 if (!host->dma_rx_channel && plat->dma_rx_param) { 377 host->dma_rx_channel = dma_request_channel(mask, 378 plat->dma_filter, 379 plat->dma_rx_param); 380 /* E.g if no DMA hardware is present */ 381 if (!host->dma_rx_channel) 382 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 383 } 384 385 if (!host->dma_tx_channel && plat->dma_tx_param) { 386 host->dma_tx_channel = dma_request_channel(mask, 387 plat->dma_filter, 388 plat->dma_tx_param); 389 if (!host->dma_tx_channel) 390 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 391 } 392 } 393 394 /* 395 * If only an RX channel is specified, the driver will 396 * attempt to use it bidirectionally, however if it is 397 * is specified but cannot be located, DMA will be disabled. 398 */ 399 if (host->dma_rx_channel && !host->dma_tx_channel) 400 host->dma_tx_channel = host->dma_rx_channel; 401 402 if (host->dma_rx_channel) 403 rxname = dma_chan_name(host->dma_rx_channel); 404 else 405 rxname = "none"; 406 407 if (host->dma_tx_channel) 408 txname = dma_chan_name(host->dma_tx_channel); 409 else 410 txname = "none"; 411 412 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 413 rxname, txname); 414 415 /* 416 * Limit the maximum segment size in any SG entry according to 417 * the parameters of the DMA engine device. 418 */ 419 if (host->dma_tx_channel) { 420 struct device *dev = host->dma_tx_channel->device->dev; 421 unsigned int max_seg_size = dma_get_max_seg_size(dev); 422 423 if (max_seg_size < host->mmc->max_seg_size) 424 host->mmc->max_seg_size = max_seg_size; 425 } 426 if (host->dma_rx_channel) { 427 struct device *dev = host->dma_rx_channel->device->dev; 428 unsigned int max_seg_size = dma_get_max_seg_size(dev); 429 430 if (max_seg_size < host->mmc->max_seg_size) 431 host->mmc->max_seg_size = max_seg_size; 432 } 433} 434 435/* 436 * This is used in or so inline it 437 * so it can be discarded. 438 */ 439static inline void mmci_dma_release(struct mmci_host *host) 440{ 441 struct mmci_platform_data *plat = host->plat; 442 443 if (host->dma_rx_channel) 444 dma_release_channel(host->dma_rx_channel); 445 if (host->dma_tx_channel && plat->dma_tx_param) 446 dma_release_channel(host->dma_tx_channel); 447 host->dma_rx_channel = host->dma_tx_channel = NULL; 448} 449 450static void mmci_dma_data_error(struct mmci_host *host) 451{ 452 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 453 dmaengine_terminate_all(host->dma_current); 454 host->dma_current = NULL; 455 host->dma_desc_current = NULL; 456 host->data->host_cookie = 0; 457} 458 459static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 460{ 461 struct dma_chan *chan; 462 enum dma_data_direction dir; 463 464 if (data->flags & MMC_DATA_READ) { 465 dir = DMA_FROM_DEVICE; 466 chan = host->dma_rx_channel; 467 } else { 468 dir = DMA_TO_DEVICE; 469 chan = host->dma_tx_channel; 470 } 471 472 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 473} 474 475static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) 476{ 477 u32 status; 478 int i; 479 480 /* Wait up to 1ms for the DMA to complete */ 481 for (i = 0; ; i++) { 482 status = readl(host->base + MMCISTATUS); 483 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 484 break; 485 udelay(10); 486 } 487 488 /* 489 * Check to see whether we still have some data left in the FIFO - 490 * this catches DMA controllers which are unable to monitor the 491 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 492 * contiguous buffers. On TX, we'll get a FIFO underrun error. 493 */ 494 if (status & MCI_RXDATAAVLBLMASK) { 495 mmci_dma_data_error(host); 496 if (!data->error) 497 data->error = -EIO; 498 } 499 500 if (!data->host_cookie) 501 mmci_dma_unmap(host, data); 502 503 /* 504 * Use of DMA with scatter-gather is impossible. 505 * Give up with DMA and switch back to PIO mode. 506 */ 507 if (status & MCI_RXDATAAVLBLMASK) { 508 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 509 mmci_dma_release(host); 510 } 511 512 host->dma_current = NULL; 513 host->dma_desc_current = NULL; 514} 515 516/* prepares DMA channel and DMA descriptor, returns non-zero on failure */ 517static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 518 struct dma_chan **dma_chan, 519 struct dma_async_tx_descriptor **dma_desc) 520{ 521 struct variant_data *variant = host->variant; 522 struct dma_slave_config conf = { 523 .src_addr = host->phybase + MMCIFIFO, 524 .dst_addr = host->phybase + MMCIFIFO, 525 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 526 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 527 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 528 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 529 .device_fc = false, 530 }; 531 struct dma_chan *chan; 532 struct dma_device *device; 533 struct dma_async_tx_descriptor *desc; 534 enum dma_data_direction buffer_dirn; 535 int nr_sg; 536 537 if (data->flags & MMC_DATA_READ) { 538 conf.direction = DMA_DEV_TO_MEM; 539 buffer_dirn = DMA_FROM_DEVICE; 540 chan = host->dma_rx_channel; 541 } else { 542 conf.direction = DMA_MEM_TO_DEV; 543 buffer_dirn = DMA_TO_DEVICE; 544 chan = host->dma_tx_channel; 545 } 546 547 /* If there's no DMA channel, fall back to PIO */ 548 if (!chan) 549 return -EINVAL; 550 551 /* If less than or equal to the fifo size, don't bother with DMA */ 552 if (data->blksz * data->blocks <= variant->fifosize) 553 return -EINVAL; 554 555 device = chan->device; 556 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 557 if (nr_sg == 0) 558 return -EINVAL; 559 560 dmaengine_slave_config(chan, &conf); 561 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 562 conf.direction, DMA_CTRL_ACK); 563 if (!desc) 564 goto unmap_exit; 565 566 *dma_chan = chan; 567 *dma_desc = desc; 568 569 return 0; 570 571 unmap_exit: 572 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 573 return -ENOMEM; 574} 575 576static inline int mmci_dma_prep_data(struct mmci_host *host, 577 struct mmc_data *data) 578{ 579 /* Check if next job is already prepared. */ 580 if (host->dma_current && host->dma_desc_current) 581 return 0; 582 583 /* No job were prepared thus do it now. */ 584 return __mmci_dma_prep_data(host, data, &host->dma_current, 585 &host->dma_desc_current); 586} 587 588static inline int mmci_dma_prep_next(struct mmci_host *host, 589 struct mmc_data *data) 590{ 591 struct mmci_host_next *nd = &host->next_data; 592 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); 593} 594 595static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 596{ 597 int ret; 598 struct mmc_data *data = host->data; 599 600 ret = mmci_dma_prep_data(host, host->data); 601 if (ret) 602 return ret; 603 604 /* Okay, go for it. */ 605 dev_vdbg(mmc_dev(host->mmc), 606 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 607 data->sg_len, data->blksz, data->blocks, data->flags); 608 dmaengine_submit(host->dma_desc_current); 609 dma_async_issue_pending(host->dma_current); 610 611 datactrl |= MCI_DPSM_DMAENABLE; 612 613 /* Trigger the DMA transfer */ 614 mmci_write_datactrlreg(host, datactrl); 615 616 /* 617 * Let the MMCI say when the data is ended and it's time 618 * to fire next DMA request. When that happens, MMCI will 619 * call mmci_data_end() 620 */ 621 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 622 host->base + MMCIMASK0); 623 return 0; 624} 625 626static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 627{ 628 struct mmci_host_next *next = &host->next_data; 629 630 WARN_ON(data->host_cookie && data->host_cookie != next->cookie); 631 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan)); 632 633 host->dma_desc_current = next->dma_desc; 634 host->dma_current = next->dma_chan; 635 next->dma_desc = NULL; 636 next->dma_chan = NULL; 637} 638 639static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 640 bool is_first_req) 641{ 642 struct mmci_host *host = mmc_priv(mmc); 643 struct mmc_data *data = mrq->data; 644 struct mmci_host_next *nd = &host->next_data; 645 646 if (!data) 647 return; 648 649 BUG_ON(data->host_cookie); 650 651 if (mmci_validate_data(host, data)) 652 return; 653 654 if (!mmci_dma_prep_next(host, data)) 655 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 656} 657 658static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 659 int err) 660{ 661 struct mmci_host *host = mmc_priv(mmc); 662 struct mmc_data *data = mrq->data; 663 664 if (!data || !data->host_cookie) 665 return; 666 667 mmci_dma_unmap(host, data); 668 669 if (err) { 670 struct mmci_host_next *next = &host->next_data; 671 struct dma_chan *chan; 672 if (data->flags & MMC_DATA_READ) 673 chan = host->dma_rx_channel; 674 else 675 chan = host->dma_tx_channel; 676 dmaengine_terminate_all(chan); 677 678 next->dma_desc = NULL; 679 next->dma_chan = NULL; 680 } 681} 682 683#else 684/* Blank functions if the DMA engine is not available */ 685static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 686{ 687} 688static inline void mmci_dma_setup(struct mmci_host *host) 689{ 690} 691 692static inline void mmci_dma_release(struct mmci_host *host) 693{ 694} 695 696static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 697{ 698} 699 700static inline void mmci_dma_finalize(struct mmci_host *host, 701 struct mmc_data *data) 702{ 703} 704 705static inline void mmci_dma_data_error(struct mmci_host *host) 706{ 707} 708 709static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 710{ 711 return -ENOSYS; 712} 713 714#define mmci_pre_request NULL 715#define mmci_post_request NULL 716 717#endif 718 719static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 720{ 721 struct variant_data *variant = host->variant; 722 unsigned int datactrl, timeout, irqmask; 723 unsigned long long clks; 724 void __iomem *base; 725 int blksz_bits; 726 727 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 728 data->blksz, data->blocks, data->flags); 729 730 host->data = data; 731 host->size = data->blksz * data->blocks; 732 data->bytes_xfered = 0; 733 734 clks = (unsigned long long)data->timeout_ns * host->cclk; 735 do_div(clks, 1000000000UL); 736 737 timeout = data->timeout_clks + (unsigned int)clks; 738 739 base = host->base; 740 writel(timeout, base + MMCIDATATIMER); 741 writel(host->size, base + MMCIDATALENGTH); 742 743 blksz_bits = ffs(data->blksz) - 1; 744 BUG_ON(1 << blksz_bits != data->blksz); 745 746 if (variant->blksz_datactrl16) 747 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 748 else 749 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 750 751 if (data->flags & MMC_DATA_READ) 752 datactrl |= MCI_DPSM_DIRECTION; 753 754 /* The ST Micro variants has a special bit to enable SDIO */ 755 if (variant->sdio && host->mmc->card) 756 if (mmc_card_sdio(host->mmc->card)) { 757 /* 758 * The ST Micro variants has a special bit 759 * to enable SDIO. 760 */ 761 u32 clk; 762 763 datactrl |= MCI_ST_DPSM_SDIOEN; 764 765 /* 766 * The ST Micro variant for SDIO small write transfers 767 * needs to have clock H/W flow control disabled, 768 * otherwise the transfer will not start. The threshold 769 * depends on the rate of MCLK. 770 */ 771 if (data->flags & MMC_DATA_WRITE && 772 (host->size < 8 || 773 (host->size <= 8 && host->mclk > 50000000))) 774 clk = host->clk_reg & ~variant->clkreg_enable; 775 else 776 clk = host->clk_reg | variant->clkreg_enable; 777 778 mmci_write_clkreg(host, clk); 779 } 780 781 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 782 datactrl |= MCI_ST_DPSM_DDRMODE; 783 784 /* 785 * Attempt to use DMA operation mode, if this 786 * should fail, fall back to PIO mode 787 */ 788 if (!mmci_dma_start_data(host, datactrl)) 789 return; 790 791 /* IRQ mode, map the SG list for CPU reading/writing */ 792 mmci_init_sg(host, data); 793 794 if (data->flags & MMC_DATA_READ) { 795 irqmask = MCI_RXFIFOHALFFULLMASK; 796 797 /* 798 * If we have less than the fifo 'half-full' threshold to 799 * transfer, trigger a PIO interrupt as soon as any data 800 * is available. 801 */ 802 if (host->size < variant->fifohalfsize) 803 irqmask |= MCI_RXDATAAVLBLMASK; 804 } else { 805 /* 806 * We don't actually need to include "FIFO empty" here 807 * since its implicit in "FIFO half empty". 808 */ 809 irqmask = MCI_TXFIFOHALFEMPTYMASK; 810 } 811 812 mmci_write_datactrlreg(host, datactrl); 813 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 814 mmci_set_mask1(host, irqmask); 815} 816 817static void 818mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 819{ 820 void __iomem *base = host->base; 821 822 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 823 cmd->opcode, cmd->arg, cmd->flags); 824 825 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 826 writel(0, base + MMCICOMMAND); 827 udelay(1); 828 } 829 830 c |= cmd->opcode | MCI_CPSM_ENABLE; 831 if (cmd->flags & MMC_RSP_PRESENT) { 832 if (cmd->flags & MMC_RSP_136) 833 c |= MCI_CPSM_LONGRSP; 834 c |= MCI_CPSM_RESPONSE; 835 } 836 if (/*interrupt*/0) 837 c |= MCI_CPSM_INTERRUPT; 838 839 host->cmd = cmd; 840 841 writel(cmd->arg, base + MMCIARGUMENT); 842 writel(c, base + MMCICOMMAND); 843} 844 845static void 846mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 847 unsigned int status) 848{ 849 /* First check for errors */ 850 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 851 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 852 u32 remain, success; 853 854 /* Terminate the DMA transfer */ 855 if (dma_inprogress(host)) { 856 mmci_dma_data_error(host); 857 mmci_dma_unmap(host, data); 858 } 859 860 /* 861 * Calculate how far we are into the transfer. Note that 862 * the data counter gives the number of bytes transferred 863 * on the MMC bus, not on the host side. On reads, this 864 * can be as much as a FIFO-worth of data ahead. This 865 * matters for FIFO overruns only. 866 */ 867 remain = readl(host->base + MMCIDATACNT); 868 success = data->blksz * data->blocks - remain; 869 870 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 871 status, success); 872 if (status & MCI_DATACRCFAIL) { 873 /* Last block was not successful */ 874 success -= 1; 875 data->error = -EILSEQ; 876 } else if (status & MCI_DATATIMEOUT) { 877 data->error = -ETIMEDOUT; 878 } else if (status & MCI_STARTBITERR) { 879 data->error = -ECOMM; 880 } else if (status & MCI_TXUNDERRUN) { 881 data->error = -EIO; 882 } else if (status & MCI_RXOVERRUN) { 883 if (success > host->variant->fifosize) 884 success -= host->variant->fifosize; 885 else 886 success = 0; 887 data->error = -EIO; 888 } 889 data->bytes_xfered = round_down(success, data->blksz); 890 } 891 892 if (status & MCI_DATABLOCKEND) 893 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 894 895 if (status & MCI_DATAEND || data->error) { 896 if (dma_inprogress(host)) 897 mmci_dma_finalize(host, data); 898 mmci_stop_data(host); 899 900 if (!data->error) 901 /* The error clause is handled above, success! */ 902 data->bytes_xfered = data->blksz * data->blocks; 903 904 if (!data->stop || host->mrq->sbc) { 905 mmci_request_end(host, data->mrq); 906 } else { 907 mmci_start_command(host, data->stop, 0); 908 } 909 } 910} 911 912static void 913mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 914 unsigned int status) 915{ 916 void __iomem *base = host->base; 917 bool sbc = (cmd == host->mrq->sbc); 918 919 host->cmd = NULL; 920 921 if (status & MCI_CMDTIMEOUT) { 922 cmd->error = -ETIMEDOUT; 923 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 924 cmd->error = -EILSEQ; 925 } else { 926 cmd->resp[0] = readl(base + MMCIRESPONSE0); 927 cmd->resp[1] = readl(base + MMCIRESPONSE1); 928 cmd->resp[2] = readl(base + MMCIRESPONSE2); 929 cmd->resp[3] = readl(base + MMCIRESPONSE3); 930 } 931 932 if ((!sbc && !cmd->data) || cmd->error) { 933 if (host->data) { 934 /* Terminate the DMA transfer */ 935 if (dma_inprogress(host)) { 936 mmci_dma_data_error(host); 937 mmci_dma_unmap(host, host->data); 938 } 939 mmci_stop_data(host); 940 } 941 mmci_request_end(host, host->mrq); 942 } else if (sbc) { 943 mmci_start_command(host, host->mrq->cmd, 0); 944 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 945 mmci_start_data(host, cmd->data); 946 } 947} 948 949static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 950{ 951 void __iomem *base = host->base; 952 char *ptr = buffer; 953 u32 status; 954 int host_remain = host->size; 955 956 do { 957 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 958 959 if (count > remain) 960 count = remain; 961 962 if (count <= 0) 963 break; 964 965 /* 966 * SDIO especially may want to send something that is 967 * not divisible by 4 (as opposed to card sectors 968 * etc). Therefore make sure to always read the last bytes 969 * while only doing full 32-bit reads towards the FIFO. 970 */ 971 if (unlikely(count & 0x3)) { 972 if (count < 4) { 973 unsigned char buf[4]; 974 ioread32_rep(base + MMCIFIFO, buf, 1); 975 memcpy(ptr, buf, count); 976 } else { 977 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 978 count &= ~0x3; 979 } 980 } else { 981 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 982 } 983 984 ptr += count; 985 remain -= count; 986 host_remain -= count; 987 988 if (remain == 0) 989 break; 990 991 status = readl(base + MMCISTATUS); 992 } while (status & MCI_RXDATAAVLBL); 993 994 return ptr - buffer; 995} 996 997static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 998{ 999 struct variant_data *variant = host->variant; 1000 void __iomem *base = host->base; 1001 char *ptr = buffer; 1002 1003 do { 1004 unsigned int count, maxcnt; 1005 1006 maxcnt = status & MCI_TXFIFOEMPTY ? 1007 variant->fifosize : variant->fifohalfsize; 1008 count = min(remain, maxcnt); 1009 1010 /* 1011 * SDIO especially may want to send something that is 1012 * not divisible by 4 (as opposed to card sectors 1013 * etc), and the FIFO only accept full 32-bit writes. 1014 * So compensate by adding +3 on the count, a single 1015 * byte become a 32bit write, 7 bytes will be two 1016 * 32bit writes etc. 1017 */ 1018 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2); 1019 1020 ptr += count; 1021 remain -= count; 1022 1023 if (remain == 0) 1024 break; 1025 1026 status = readl(base + MMCISTATUS); 1027 } while (status & MCI_TXFIFOHALFEMPTY); 1028 1029 return ptr - buffer; 1030} 1031 1032/* 1033 * PIO data transfer IRQ handler. 1034 */ 1035static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 1036{ 1037 struct mmci_host *host = dev_id; 1038 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1039 struct variant_data *variant = host->variant; 1040 void __iomem *base = host->base; 1041 unsigned long flags; 1042 u32 status; 1043 1044 status = readl(base + MMCISTATUS); 1045 1046 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 1047 1048 local_irq_save(flags); 1049 1050 do { 1051 unsigned int remain, len; 1052 char *buffer; 1053 1054 /* 1055 * For write, we only need to test the half-empty flag 1056 * here - if the FIFO is completely empty, then by 1057 * definition it is more than half empty. 1058 * 1059 * For read, check for data available. 1060 */ 1061 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 1062 break; 1063 1064 if (!sg_miter_next(sg_miter)) 1065 break; 1066 1067 buffer = sg_miter->addr; 1068 remain = sg_miter->length; 1069 1070 len = 0; 1071 if (status & MCI_RXACTIVE) 1072 len = mmci_pio_read(host, buffer, remain); 1073 if (status & MCI_TXACTIVE) 1074 len = mmci_pio_write(host, buffer, remain, status); 1075 1076 sg_miter->consumed = len; 1077 1078 host->size -= len; 1079 remain -= len; 1080 1081 if (remain) 1082 break; 1083 1084 status = readl(base + MMCISTATUS); 1085 } while (1); 1086 1087 sg_miter_stop(sg_miter); 1088 1089 local_irq_restore(flags); 1090 1091 /* 1092 * If we have less than the fifo 'half-full' threshold to transfer, 1093 * trigger a PIO interrupt as soon as any data is available. 1094 */ 1095 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 1096 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 1097 1098 /* 1099 * If we run out of data, disable the data IRQs; this 1100 * prevents a race where the FIFO becomes empty before 1101 * the chip itself has disabled the data path, and 1102 * stops us racing with our data end IRQ. 1103 */ 1104 if (host->size == 0) { 1105 mmci_set_mask1(host, 0); 1106 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 1107 } 1108 1109 return IRQ_HANDLED; 1110} 1111 1112/* 1113 * Handle completion of command and data transfers. 1114 */ 1115static irqreturn_t mmci_irq(int irq, void *dev_id) 1116{ 1117 struct mmci_host *host = dev_id; 1118 u32 status; 1119 int ret = 0; 1120 1121 spin_lock(&host->lock); 1122 1123 do { 1124 struct mmc_command *cmd; 1125 struct mmc_data *data; 1126 1127 status = readl(host->base + MMCISTATUS); 1128 1129 if (host->singleirq) { 1130 if (status & readl(host->base + MMCIMASK1)) 1131 mmci_pio_irq(irq, dev_id); 1132 1133 status &= ~MCI_IRQ1MASK; 1134 } 1135 1136 status &= readl(host->base + MMCIMASK0); 1137 writel(status, host->base + MMCICLEAR); 1138 1139 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1140 1141 data = host->data; 1142 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 1143 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 1144 MCI_DATABLOCKEND) && data) 1145 mmci_data_irq(host, data, status); 1146 1147 cmd = host->cmd; 1148 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 1149 mmci_cmd_irq(host, cmd, status); 1150 1151 ret = 1; 1152 } while (status); 1153 1154 spin_unlock(&host->lock); 1155 1156 return IRQ_RETVAL(ret); 1157} 1158 1159static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1160{ 1161 struct mmci_host *host = mmc_priv(mmc); 1162 unsigned long flags; 1163 1164 WARN_ON(host->mrq != NULL); 1165 1166 mrq->cmd->error = mmci_validate_data(host, mrq->data); 1167 if (mrq->cmd->error) { 1168 mmc_request_done(mmc, mrq); 1169 return; 1170 } 1171 1172 pm_runtime_get_sync(mmc_dev(mmc)); 1173 1174 spin_lock_irqsave(&host->lock, flags); 1175 1176 host->mrq = mrq; 1177 1178 if (mrq->data) 1179 mmci_get_next_data(host, mrq->data); 1180 1181 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1182 mmci_start_data(host, mrq->data); 1183 1184 if (mrq->sbc) 1185 mmci_start_command(host, mrq->sbc, 0); 1186 else 1187 mmci_start_command(host, mrq->cmd, 0); 1188 1189 spin_unlock_irqrestore(&host->lock, flags); 1190} 1191 1192static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1193{ 1194 struct mmci_host *host = mmc_priv(mmc); 1195 struct variant_data *variant = host->variant; 1196 u32 pwr = 0; 1197 unsigned long flags; 1198 int ret; 1199 1200 pm_runtime_get_sync(mmc_dev(mmc)); 1201 1202 if (host->plat->ios_handler && 1203 host->plat->ios_handler(mmc_dev(mmc), ios)) 1204 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1205 1206 switch (ios->power_mode) { 1207 case MMC_POWER_OFF: 1208 if (!IS_ERR(mmc->supply.vmmc)) 1209 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1210 1211 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 1212 regulator_disable(mmc->supply.vqmmc); 1213 host->vqmmc_enabled = false; 1214 } 1215 1216 break; 1217 case MMC_POWER_UP: 1218 if (!IS_ERR(mmc->supply.vmmc)) 1219 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 1220 1221 /* 1222 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1223 * and instead uses MCI_PWR_ON so apply whatever value is 1224 * configured in the variant data. 1225 */ 1226 pwr |= variant->pwrreg_powerup; 1227 1228 break; 1229 case MMC_POWER_ON: 1230 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 1231 ret = regulator_enable(mmc->supply.vqmmc); 1232 if (ret < 0) 1233 dev_err(mmc_dev(mmc), 1234 "failed to enable vqmmc regulator\n"); 1235 else 1236 host->vqmmc_enabled = true; 1237 } 1238 1239 pwr |= MCI_PWR_ON; 1240 break; 1241 } 1242 1243 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1244 /* 1245 * The ST Micro variant has some additional bits 1246 * indicating signal direction for the signals in 1247 * the SD/MMC bus and feedback-clock usage. 1248 */ 1249 pwr |= host->plat->sigdir; 1250 1251 if (ios->bus_width == MMC_BUS_WIDTH_4) 1252 pwr &= ~MCI_ST_DATA74DIREN; 1253 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1254 pwr &= (~MCI_ST_DATA74DIREN & 1255 ~MCI_ST_DATA31DIREN & 1256 ~MCI_ST_DATA2DIREN); 1257 } 1258 1259 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1260 if (host->hw_designer != AMBA_VENDOR_ST) 1261 pwr |= MCI_ROD; 1262 else { 1263 /* 1264 * The ST Micro variant use the ROD bit for something 1265 * else and only has OD (Open Drain). 1266 */ 1267 pwr |= MCI_OD; 1268 } 1269 } 1270 1271 /* 1272 * If clock = 0 and the variant requires the MMCIPOWER to be used for 1273 * gating the clock, the MCI_PWR_ON bit is cleared. 1274 */ 1275 if (!ios->clock && variant->pwrreg_clkgate) 1276 pwr &= ~MCI_PWR_ON; 1277 1278 spin_lock_irqsave(&host->lock, flags); 1279 1280 mmci_set_clkreg(host, ios->clock); 1281 mmci_write_pwrreg(host, pwr); 1282 mmci_reg_delay(host); 1283 1284 spin_unlock_irqrestore(&host->lock, flags); 1285 1286 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1287 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1288} 1289 1290static int mmci_get_ro(struct mmc_host *mmc) 1291{ 1292 struct mmci_host *host = mmc_priv(mmc); 1293 1294 if (host->gpio_wp == -ENOSYS) 1295 return -ENOSYS; 1296 1297 return gpio_get_value_cansleep(host->gpio_wp); 1298} 1299 1300static int mmci_get_cd(struct mmc_host *mmc) 1301{ 1302 struct mmci_host *host = mmc_priv(mmc); 1303 struct mmci_platform_data *plat = host->plat; 1304 unsigned int status; 1305 1306 if (host->gpio_cd == -ENOSYS) { 1307 if (!plat->status) 1308 return 1; /* Assume always present */ 1309 1310 status = plat->status(mmc_dev(host->mmc)); 1311 } else 1312 status = !!gpio_get_value_cansleep(host->gpio_cd) 1313 ^ plat->cd_invert; 1314 1315 /* 1316 * Use positive logic throughout - status is zero for no card, 1317 * non-zero for card inserted. 1318 */ 1319 return status; 1320} 1321 1322static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) 1323{ 1324 int ret = 0; 1325 1326 if (!IS_ERR(mmc->supply.vqmmc)) { 1327 1328 pm_runtime_get_sync(mmc_dev(mmc)); 1329 1330 switch (ios->signal_voltage) { 1331 case MMC_SIGNAL_VOLTAGE_330: 1332 ret = regulator_set_voltage(mmc->supply.vqmmc, 1333 2700000, 3600000); 1334 break; 1335 case MMC_SIGNAL_VOLTAGE_180: 1336 ret = regulator_set_voltage(mmc->supply.vqmmc, 1337 1700000, 1950000); 1338 break; 1339 case MMC_SIGNAL_VOLTAGE_120: 1340 ret = regulator_set_voltage(mmc->supply.vqmmc, 1341 1100000, 1300000); 1342 break; 1343 } 1344 1345 if (ret) 1346 dev_warn(mmc_dev(mmc), "Voltage switch failed\n"); 1347 1348 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1349 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1350 } 1351 1352 return ret; 1353} 1354 1355static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1356{ 1357 struct mmci_host *host = dev_id; 1358 1359 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1360 1361 return IRQ_HANDLED; 1362} 1363 1364static struct mmc_host_ops mmci_ops = { 1365 .request = mmci_request, 1366 .pre_req = mmci_pre_request, 1367 .post_req = mmci_post_request, 1368 .set_ios = mmci_set_ios, 1369 .get_ro = mmci_get_ro, 1370 .get_cd = mmci_get_cd, 1371 .start_signal_voltage_switch = mmci_sig_volt_switch, 1372}; 1373 1374#ifdef CONFIG_OF 1375static void mmci_dt_populate_generic_pdata(struct device_node *np, 1376 struct mmci_platform_data *pdata) 1377{ 1378 int bus_width = 0; 1379 1380 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); 1381 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); 1382 1383 if (of_get_property(np, "cd-inverted", NULL)) 1384 pdata->cd_invert = true; 1385 else 1386 pdata->cd_invert = false; 1387 1388 of_property_read_u32(np, "max-frequency", &pdata->f_max); 1389 if (!pdata->f_max) 1390 pr_warn("%s has no 'max-frequency' property\n", np->full_name); 1391 1392 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) 1393 pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED; 1394 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) 1395 pdata->capabilities |= MMC_CAP_SD_HIGHSPEED; 1396 1397 of_property_read_u32(np, "bus-width", &bus_width); 1398 switch (bus_width) { 1399 case 0 : 1400 /* No bus-width supplied. */ 1401 break; 1402 case 4 : 1403 pdata->capabilities |= MMC_CAP_4_BIT_DATA; 1404 break; 1405 case 8 : 1406 pdata->capabilities |= MMC_CAP_8_BIT_DATA; 1407 break; 1408 default : 1409 pr_warn("%s: Unsupported bus width\n", np->full_name); 1410 } 1411} 1412#else 1413static void mmci_dt_populate_generic_pdata(struct device_node *np, 1414 struct mmci_platform_data *pdata) 1415{ 1416 return; 1417} 1418#endif 1419 1420static int mmci_probe(struct amba_device *dev, 1421 const struct amba_id *id) 1422{ 1423 struct mmci_platform_data *plat = dev->dev.platform_data; 1424 struct device_node *np = dev->dev.of_node; 1425 struct variant_data *variant = id->data; 1426 struct mmci_host *host; 1427 struct mmc_host *mmc; 1428 int ret; 1429 1430 /* Must have platform data or Device Tree. */ 1431 if (!plat && !np) { 1432 dev_err(&dev->dev, "No plat data or DT found\n"); 1433 return -EINVAL; 1434 } 1435 1436 if (!plat) { 1437 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); 1438 if (!plat) 1439 return -ENOMEM; 1440 } 1441 1442 if (np) 1443 mmci_dt_populate_generic_pdata(np, plat); 1444 1445 ret = amba_request_regions(dev, DRIVER_NAME); 1446 if (ret) 1447 goto out; 1448 1449 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1450 if (!mmc) { 1451 ret = -ENOMEM; 1452 goto rel_regions; 1453 } 1454 1455 host = mmc_priv(mmc); 1456 host->mmc = mmc; 1457 1458 host->gpio_wp = -ENOSYS; 1459 host->gpio_cd = -ENOSYS; 1460 host->gpio_cd_irq = -1; 1461 1462 host->hw_designer = amba_manf(dev); 1463 host->hw_revision = amba_rev(dev); 1464 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1465 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1466 1467 host->clk = devm_clk_get(&dev->dev, NULL); 1468 if (IS_ERR(host->clk)) { 1469 ret = PTR_ERR(host->clk); 1470 goto host_free; 1471 } 1472 1473 ret = clk_prepare_enable(host->clk); 1474 if (ret) 1475 goto host_free; 1476 1477 host->plat = plat; 1478 host->variant = variant; 1479 host->mclk = clk_get_rate(host->clk); 1480 /* 1481 * According to the spec, mclk is max 100 MHz, 1482 * so we try to adjust the clock down to this, 1483 * (if possible). 1484 */ 1485 if (host->mclk > 100000000) { 1486 ret = clk_set_rate(host->clk, 100000000); 1487 if (ret < 0) 1488 goto clk_disable; 1489 host->mclk = clk_get_rate(host->clk); 1490 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1491 host->mclk); 1492 } 1493 host->phybase = dev->res.start; 1494 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1495 if (!host->base) { 1496 ret = -ENOMEM; 1497 goto clk_disable; 1498 } 1499 1500 if (variant->busy_detect) { 1501 mmci_ops.card_busy = mmci_card_busy; 1502 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE); 1503 } 1504 1505 mmc->ops = &mmci_ops; 1506 /* 1507 * The ARM and ST versions of the block have slightly different 1508 * clock divider equations which means that the minimum divider 1509 * differs too. 1510 */ 1511 if (variant->st_clkdiv) 1512 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1513 else 1514 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1515 /* 1516 * If the platform data supplies a maximum operating 1517 * frequency, this takes precedence. Else, we fall back 1518 * to using the module parameter, which has a (low) 1519 * default value in case it is not specified. Either 1520 * value must not exceed the clock rate into the block, 1521 * of course. 1522 */ 1523 if (plat->f_max) 1524 mmc->f_max = min(host->mclk, plat->f_max); 1525 else 1526 mmc->f_max = min(host->mclk, fmax); 1527 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1528 1529 /* Get regulators and the supported OCR mask */ 1530 mmc_regulator_get_supply(mmc); 1531 if (!mmc->ocr_avail) 1532 mmc->ocr_avail = plat->ocr_mask; 1533 else if (plat->ocr_mask) 1534 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); 1535 1536 mmc->caps = plat->capabilities; 1537 mmc->caps2 = plat->capabilities2; 1538 1539 /* We support these PM capabilities. */ 1540 mmc->pm_caps = MMC_PM_KEEP_POWER; 1541 1542 /* 1543 * We can do SGIO 1544 */ 1545 mmc->max_segs = NR_SG; 1546 1547 /* 1548 * Since only a certain number of bits are valid in the data length 1549 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1550 * single request. 1551 */ 1552 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1553 1554 /* 1555 * Set the maximum segment size. Since we aren't doing DMA 1556 * (yet) we are only limited by the data length register. 1557 */ 1558 mmc->max_seg_size = mmc->max_req_size; 1559 1560 /* 1561 * Block size can be up to 2048 bytes, but must be a power of two. 1562 */ 1563 mmc->max_blk_size = 1 << 11; 1564 1565 /* 1566 * Limit the number of blocks transferred so that we don't overflow 1567 * the maximum request size. 1568 */ 1569 mmc->max_blk_count = mmc->max_req_size >> 11; 1570 1571 spin_lock_init(&host->lock); 1572 1573 writel(0, host->base + MMCIMASK0); 1574 writel(0, host->base + MMCIMASK1); 1575 writel(0xfff, host->base + MMCICLEAR); 1576 1577 if (plat->gpio_cd == -EPROBE_DEFER) { 1578 ret = -EPROBE_DEFER; 1579 goto err_gpio_cd; 1580 } 1581 if (gpio_is_valid(plat->gpio_cd)) { 1582 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1583 if (ret == 0) 1584 ret = gpio_direction_input(plat->gpio_cd); 1585 if (ret == 0) 1586 host->gpio_cd = plat->gpio_cd; 1587 else if (ret != -ENOSYS) 1588 goto err_gpio_cd; 1589 1590 /* 1591 * A gpio pin that will detect cards when inserted and removed 1592 * will most likely want to trigger on the edges if it is 1593 * 0 when ejected and 1 when inserted (or mutatis mutandis 1594 * for the inverted case) so we request triggers on both 1595 * edges. 1596 */ 1597 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1598 mmci_cd_irq, 1599 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1600 DRIVER_NAME " (cd)", host); 1601 if (ret >= 0) 1602 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1603 } 1604 if (plat->gpio_wp == -EPROBE_DEFER) { 1605 ret = -EPROBE_DEFER; 1606 goto err_gpio_wp; 1607 } 1608 if (gpio_is_valid(plat->gpio_wp)) { 1609 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1610 if (ret == 0) 1611 ret = gpio_direction_input(plat->gpio_wp); 1612 if (ret == 0) 1613 host->gpio_wp = plat->gpio_wp; 1614 else if (ret != -ENOSYS) 1615 goto err_gpio_wp; 1616 } 1617 1618 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1619 && host->gpio_cd_irq < 0) 1620 mmc->caps |= MMC_CAP_NEEDS_POLL; 1621 1622 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1623 if (ret) 1624 goto unmap; 1625 1626 if (!dev->irq[1]) 1627 host->singleirq = true; 1628 else { 1629 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1630 DRIVER_NAME " (pio)", host); 1631 if (ret) 1632 goto irq0_free; 1633 } 1634 1635 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1636 1637 amba_set_drvdata(dev, mmc); 1638 1639 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1640 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1641 amba_rev(dev), (unsigned long long)dev->res.start, 1642 dev->irq[0], dev->irq[1]); 1643 1644 mmci_dma_setup(host); 1645 1646 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1647 pm_runtime_use_autosuspend(&dev->dev); 1648 pm_runtime_put(&dev->dev); 1649 1650 mmc_add_host(mmc); 1651 1652 return 0; 1653 1654 irq0_free: 1655 free_irq(dev->irq[0], host); 1656 unmap: 1657 if (host->gpio_wp != -ENOSYS) 1658 gpio_free(host->gpio_wp); 1659 err_gpio_wp: 1660 if (host->gpio_cd_irq >= 0) 1661 free_irq(host->gpio_cd_irq, host); 1662 if (host->gpio_cd != -ENOSYS) 1663 gpio_free(host->gpio_cd); 1664 err_gpio_cd: 1665 iounmap(host->base); 1666 clk_disable: 1667 clk_disable_unprepare(host->clk); 1668 host_free: 1669 mmc_free_host(mmc); 1670 rel_regions: 1671 amba_release_regions(dev); 1672 out: 1673 return ret; 1674} 1675 1676static int mmci_remove(struct amba_device *dev) 1677{ 1678 struct mmc_host *mmc = amba_get_drvdata(dev); 1679 1680 amba_set_drvdata(dev, NULL); 1681 1682 if (mmc) { 1683 struct mmci_host *host = mmc_priv(mmc); 1684 1685 /* 1686 * Undo pm_runtime_put() in probe. We use the _sync 1687 * version here so that we can access the primecell. 1688 */ 1689 pm_runtime_get_sync(&dev->dev); 1690 1691 mmc_remove_host(mmc); 1692 1693 writel(0, host->base + MMCIMASK0); 1694 writel(0, host->base + MMCIMASK1); 1695 1696 writel(0, host->base + MMCICOMMAND); 1697 writel(0, host->base + MMCIDATACTRL); 1698 1699 mmci_dma_release(host); 1700 free_irq(dev->irq[0], host); 1701 if (!host->singleirq) 1702 free_irq(dev->irq[1], host); 1703 1704 if (host->gpio_wp != -ENOSYS) 1705 gpio_free(host->gpio_wp); 1706 if (host->gpio_cd_irq >= 0) 1707 free_irq(host->gpio_cd_irq, host); 1708 if (host->gpio_cd != -ENOSYS) 1709 gpio_free(host->gpio_cd); 1710 1711 iounmap(host->base); 1712 clk_disable_unprepare(host->clk); 1713 1714 mmc_free_host(mmc); 1715 1716 amba_release_regions(dev); 1717 } 1718 1719 return 0; 1720} 1721 1722#ifdef CONFIG_SUSPEND 1723static int mmci_suspend(struct device *dev) 1724{ 1725 struct amba_device *adev = to_amba_device(dev); 1726 struct mmc_host *mmc = amba_get_drvdata(adev); 1727 int ret = 0; 1728 1729 if (mmc) { 1730 struct mmci_host *host = mmc_priv(mmc); 1731 1732 ret = mmc_suspend_host(mmc); 1733 if (ret == 0) { 1734 pm_runtime_get_sync(dev); 1735 writel(0, host->base + MMCIMASK0); 1736 } 1737 } 1738 1739 return ret; 1740} 1741 1742static int mmci_resume(struct device *dev) 1743{ 1744 struct amba_device *adev = to_amba_device(dev); 1745 struct mmc_host *mmc = amba_get_drvdata(adev); 1746 int ret = 0; 1747 1748 if (mmc) { 1749 struct mmci_host *host = mmc_priv(mmc); 1750 1751 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1752 pm_runtime_put(dev); 1753 1754 ret = mmc_resume_host(mmc); 1755 } 1756 1757 return ret; 1758} 1759#endif 1760 1761#ifdef CONFIG_PM_RUNTIME 1762static int mmci_runtime_suspend(struct device *dev) 1763{ 1764 struct amba_device *adev = to_amba_device(dev); 1765 struct mmc_host *mmc = amba_get_drvdata(adev); 1766 1767 if (mmc) { 1768 struct mmci_host *host = mmc_priv(mmc); 1769 pinctrl_pm_select_sleep_state(dev); 1770 clk_disable_unprepare(host->clk); 1771 } 1772 1773 return 0; 1774} 1775 1776static int mmci_runtime_resume(struct device *dev) 1777{ 1778 struct amba_device *adev = to_amba_device(dev); 1779 struct mmc_host *mmc = amba_get_drvdata(adev); 1780 1781 if (mmc) { 1782 struct mmci_host *host = mmc_priv(mmc); 1783 clk_prepare_enable(host->clk); 1784 pinctrl_pm_select_default_state(dev); 1785 } 1786 1787 return 0; 1788} 1789#endif 1790 1791static const struct dev_pm_ops mmci_dev_pm_ops = { 1792 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) 1793 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) 1794}; 1795 1796static struct amba_id mmci_ids[] = { 1797 { 1798 .id = 0x00041180, 1799 .mask = 0xff0fffff, 1800 .data = &variant_arm, 1801 }, 1802 { 1803 .id = 0x01041180, 1804 .mask = 0xff0fffff, 1805 .data = &variant_arm_extended_fifo, 1806 }, 1807 { 1808 .id = 0x02041180, 1809 .mask = 0xff0fffff, 1810 .data = &variant_arm_extended_fifo_hwfc, 1811 }, 1812 { 1813 .id = 0x00041181, 1814 .mask = 0x000fffff, 1815 .data = &variant_arm, 1816 }, 1817 /* ST Micro variants */ 1818 { 1819 .id = 0x00180180, 1820 .mask = 0x00ffffff, 1821 .data = &variant_u300, 1822 }, 1823 { 1824 .id = 0x10180180, 1825 .mask = 0xf0ffffff, 1826 .data = &variant_nomadik, 1827 }, 1828 { 1829 .id = 0x00280180, 1830 .mask = 0x00ffffff, 1831 .data = &variant_u300, 1832 }, 1833 { 1834 .id = 0x00480180, 1835 .mask = 0xf0ffffff, 1836 .data = &variant_ux500, 1837 }, 1838 { 1839 .id = 0x10480180, 1840 .mask = 0xf0ffffff, 1841 .data = &variant_ux500v2, 1842 }, 1843 { 0, 0 }, 1844}; 1845 1846MODULE_DEVICE_TABLE(amba, mmci_ids); 1847 1848static struct amba_driver mmci_driver = { 1849 .drv = { 1850 .name = DRIVER_NAME, 1851 .pm = &mmci_dev_pm_ops, 1852 }, 1853 .probe = mmci_probe, 1854 .remove = mmci_remove, 1855 .id_table = mmci_ids, 1856}; 1857 1858module_amba_driver(mmci_driver); 1859 1860module_param(fmax, uint, 0444); 1861 1862MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1863MODULE_LICENSE("GPL"); 1864