Lines Matching refs:host

2  *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
25 #include <linux/mmc/host.h>
225 struct mmci_host *host = mmc_priv(mmc);
231 spin_lock_irqsave(&host->lock, flags);
232 if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
234 spin_unlock_irqrestore(&host->lock, flags);
245 static int mmci_validate_data(struct mmci_host *host,
252 dev_err(mmc_dev(host->mmc),
260 static void mmci_reg_delay(struct mmci_host *host)
269 if (host->cclk < 25000000)
276 * This must be called with host->lock held
278 static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
280 if (host->clk_reg != clk) {
281 host->clk_reg = clk;
282 writel(clk, host->base + MMCICLOCK);
287 * This must be called with host->lock held
289 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
291 if (host->pwr_reg != pwr) {
292 host->pwr_reg = pwr;
293 writel(pwr, host->base + MMCIPOWER);
298 * This must be called with host->lock held
300 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
303 datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE;
305 if (host->datactrl_reg != datactrl) {
306 host->datactrl_reg = datactrl;
307 writel(datactrl, host->base + MMCIDATACTRL);
312 * This must be called with host->lock held
314 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
316 struct variant_data *variant = host->variant;
320 host->cclk = 0;
324 host->cclk = host->mclk;
325 } else if (desired >= host->mclk) {
329 host->cclk = host->mclk;
337 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
340 host->cclk = host->mclk / (clk + 2);
346 clk = host->mclk / (2 * desired) - 1;
349 host->cclk = host->mclk / (2 * (clk + 1));
359 host->mmc->actual_clock = host->cclk;
361 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
363 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
366 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
367 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
370 mmci_write_clkreg(host, clk);
374 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
376 writel(0, host->base + MMCICOMMAND);
378 BUG_ON(host->data);
380 host->mrq = NULL;
381 host->cmd = NULL;
383 mmc_request_done(host->mmc, mrq);
385 pm_runtime_mark_last_busy(mmc_dev(host->mmc));
386 pm_runtime_put_autosuspend(mmc_dev(host->mmc));
389 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
391 void __iomem *base = host->base;
393 if (host->singleirq) {
405 static void mmci_stop_data(struct mmci_host *host)
407 mmci_write_datactrlreg(host, 0);
408 mmci_set_mask1(host, 0);
409 host->data = NULL;
412 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
421 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
430 static void mmci_dma_setup(struct mmci_host *host)
434 struct variant_data *variant = host->variant;
436 host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
437 host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
440 host->next_data.cookie = 1;
451 if (host->dma_rx_channel && !host->dma_tx_channel)
452 host->dma_tx_channel = host->dma_rx_channel;
454 if (host->dma_rx_channel)
455 rxname = dma_chan_name(host->dma_rx_channel);
459 if (host->dma_tx_channel)
460 txname = dma_chan_name(host->dma_tx_channel);
464 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
471 if (host->dma_tx_channel) {
472 struct device *dev = host->dma_tx_channel->device->dev;
475 if (max_seg_size < host->mmc->max_seg_size)
476 host->mmc->max_seg_size = max_seg_size;
478 if (host->dma_rx_channel) {
479 struct device *dev = host->dma_rx_channel->device->dev;
482 if (max_seg_size < host->mmc->max_seg_size)
483 host->mmc->max_seg_size = max_seg_size;
486 if (variant->qcom_dml && host->dma_rx_channel && host->dma_tx_channel)
487 if (dml_hw_init(host, host->mmc->parent->of_node))
495 static inline void mmci_dma_release(struct mmci_host *host)
497 if (host->dma_rx_channel)
498 dma_release_channel(host->dma_rx_channel);
499 if (host->dma_tx_channel)
500 dma_release_channel(host->dma_tx_channel);
501 host->dma_rx_channel = host->dma_tx_channel = NULL;
504 static void mmci_dma_data_error(struct mmci_host *host)
506 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
507 dmaengine_terminate_all(host->dma_current);
508 host->dma_current = NULL;
509 host->dma_desc_current = NULL;
510 host->data->host_cookie = 0;
513 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
520 chan = host->dma_rx_channel;
523 chan = host->dma_tx_channel;
529 static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
536 status = readl(host->base + MMCISTATUS);
549 mmci_dma_data_error(host);
555 mmci_dma_unmap(host, data);
562 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
563 mmci_dma_release(host);
566 host->dma_current = NULL;
567 host->dma_desc_current = NULL;
571 static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
575 struct variant_data *variant = host->variant;
577 .src_addr = host->phybase + MMCIFIFO,
578 .dst_addr = host->phybase + MMCIFIFO,
595 chan = host->dma_rx_channel;
599 chan = host->dma_tx_channel;
615 if (host->variant->qcom_dml)
634 static inline int mmci_dma_prep_data(struct mmci_host *host,
638 if (host->dma_current && host->dma_desc_current)
642 return __mmci_dma_prep_data(host, data, &host->dma_current,
643 &host->dma_desc_current);
646 static inline int mmci_dma_prep_next(struct mmci_host *host,
649 struct mmci_host_next *nd = &host->next_data;
650 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
653 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
656 struct mmc_data *data = host->data;
658 ret = mmci_dma_prep_data(host, host->data);
663 dev_vdbg(mmc_dev(host->mmc),
666 dmaengine_submit(host->dma_desc_current);
667 dma_async_issue_pending(host->dma_current);
669 if (host->variant->qcom_dml)
670 dml_start_xfer(host, data);
675 mmci_write_datactrlreg(host, datactrl);
682 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
683 host->base + MMCIMASK0);
687 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
689 struct mmci_host_next *next = &host->next_data;
694 host->dma_desc_current = next->dma_desc;
695 host->dma_current = next->dma_chan;
703 struct mmci_host *host = mmc_priv(mmc);
705 struct mmci_host_next *nd = &host->next_data;
712 if (mmci_validate_data(host, data))
715 if (!mmci_dma_prep_next(host, data))
722 struct mmci_host *host = mmc_priv(mmc);
728 mmci_dma_unmap(host, data);
731 struct mmci_host_next *next = &host->next_data;
734 chan = host->dma_rx_channel;
736 chan = host->dma_tx_channel;
746 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
749 static inline void mmci_dma_setup(struct mmci_host *host)
753 static inline void mmci_dma_release(struct mmci_host *host)
757 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
761 static inline void mmci_dma_finalize(struct mmci_host *host,
766 static inline void mmci_dma_data_error(struct mmci_host *host)
770 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
780 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
782 struct variant_data *variant = host->variant;
788 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
791 host->data = data;
792 host->size = data->blksz * data->blocks;
795 clks = (unsigned long long)data->timeout_ns * host->cclk;
800 base = host->base;
802 writel(host->size, base + MMCIDATALENGTH);
817 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
829 (host->size < 8 ||
830 (host->size <= 8 && host->mclk > 50000000)))
831 clk = host->clk_reg & ~variant->clkreg_enable;
833 clk = host->clk_reg | variant->clkreg_enable;
835 mmci_write_clkreg(host, clk);
838 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
839 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
846 if (!mmci_dma_start_data(host, datactrl))
850 mmci_init_sg(host, data);
860 if (host->size < variant->fifohalfsize)
870 mmci_write_datactrlreg(host, datactrl);
872 mmci_set_mask1(host, irqmask);
876 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
878 void __iomem *base = host->base;
880 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
885 mmci_reg_delay(host);
898 c |= host->variant->data_cmd_enable;
900 host->cmd = cmd;
907 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
920 if (dma_inprogress(host)) {
921 mmci_dma_data_error(host);
922 mmci_dma_unmap(host, data);
928 * on the MMC bus, not on the host side. On reads, this
932 remain = readl(host->base + MMCIDATACNT);
935 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
948 if (success > host->variant->fifosize)
949 success -= host->variant->fifosize;
958 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
961 if (dma_inprogress(host))
962 mmci_dma_finalize(host, data);
963 mmci_stop_data(host);
969 if (!data->stop || host->mrq->sbc) {
970 mmci_request_end(host, data->mrq);
972 mmci_start_command(host, data->stop, 0);
978 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
981 void __iomem *base = host->base;
987 sbc = (cmd == host->mrq->sbc);
988 busy_resp = host->variant->busy_detect && (cmd->flags & MMC_RSP_BUSY);
990 if (!((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
995 if (host->busy_status && (status & MCI_ST_CARDBUSY))
999 if (!host->busy_status && busy_resp &&
1004 host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND);
1009 if (host->busy_status) {
1012 host->busy_status = 0;
1015 host->cmd = NULL;
1029 if (host->data) {
1031 if (dma_inprogress(host)) {
1032 mmci_dma_data_error(host);
1033 mmci_dma_unmap(host, host->data);
1035 mmci_stop_data(host);
1037 mmci_request_end(host, host->mrq);
1039 mmci_start_command(host, host->mrq->cmd, 0);
1041 mmci_start_data(host, cmd->data);
1045 static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
1047 return remain - (readl(host->base + MMCIFIFOCNT) << 2);
1050 static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
1057 return host->variant->fifohalfsize;
1064 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
1066 void __iomem *base = host->base;
1068 u32 status = readl(host->base + MMCISTATUS);
1069 int host_remain = host->size;
1072 int count = host->get_rx_fifocnt(host, status, host_remain);
1112 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1114 struct variant_data *variant = host->variant;
1115 void __iomem *base = host->base;
1152 struct mmci_host *host = dev_id;
1153 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1154 struct variant_data *variant = host->variant;
1155 void __iomem *base = host->base;
1161 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1187 len = mmci_pio_read(host, buffer, remain);
1189 len = mmci_pio_write(host, buffer, remain, status);
1193 host->size -= len;
1210 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1211 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1219 if (host->size == 0) {
1220 mmci_set_mask1(host, 0);
1232 struct mmci_host *host = dev_id;
1236 spin_lock(&host->lock);
1239 status = readl(host->base + MMCISTATUS);
1241 if (host->singleirq) {
1242 if (status & readl(host->base + MMCIMASK1))
1253 status &= readl(host->base + MMCIMASK0);
1254 writel(status, host->base + MMCICLEAR);
1256 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1258 if (host->variant->reversed_irq_handling) {
1259 mmci_data_irq(host, host->data, status);
1260 mmci_cmd_irq(host, host->cmd, status);
1262 mmci_cmd_irq(host, host->cmd, status);
1263 mmci_data_irq(host, host->data, status);
1267 if (host->busy_status)
1273 spin_unlock(&host->lock);
1280 struct mmci_host *host = mmc_priv(mmc);
1283 WARN_ON(host->mrq != NULL);
1285 mrq->cmd->error = mmci_validate_data(host, mrq->data);
1293 spin_lock_irqsave(&host->lock, flags);
1295 host->mrq = mrq;
1298 mmci_get_next_data(host, mrq->data);
1301 mmci_start_data(host, mrq->data);
1304 mmci_start_command(host, mrq->sbc, 0);
1306 mmci_start_command(host, mrq->cmd, 0);
1308 spin_unlock_irqrestore(&host->lock, flags);
1313 struct mmci_host *host = mmc_priv(mmc);
1314 struct variant_data *variant = host->variant;
1321 if (host->plat->ios_handler &&
1322 host->plat->ios_handler(mmc_dev(mmc), ios))
1330 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1332 host->vqmmc_enabled = false;
1349 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1355 host->vqmmc_enabled = true;
1368 pwr |= host->pwr_reg_add;
1379 if (host->hw_designer != AMBA_VENDOR_ST)
1397 if (host->variant->explicit_mclk_control &&
1398 ios->clock != host->clock_cache) {
1399 ret = clk_set_rate(host->clk, ios->clock);
1401 dev_err(mmc_dev(host->mmc),
1404 host->mclk = clk_get_rate(host->clk);
1406 host->clock_cache = ios->clock;
1408 spin_lock_irqsave(&host->lock, flags);
1410 mmci_set_clkreg(host, ios->clock);
1411 mmci_write_pwrreg(host, pwr);
1412 mmci_reg_delay(host);
1414 spin_unlock_irqrestore(&host->lock, flags);
1422 struct mmci_host *host = mmc_priv(mmc);
1423 struct mmci_platform_data *plat = host->plat;
1430 status = plat->status(mmc_dev(host->mmc));
1480 struct mmci_host *host = mmc_priv(mmc);
1487 host->pwr_reg_add |= MCI_ST_DATA0DIREN;
1489 host->pwr_reg_add |= MCI_ST_DATA2DIREN;
1491 host->pwr_reg_add |= MCI_ST_DATA31DIREN;
1493 host->pwr_reg_add |= MCI_ST_DATA74DIREN;
1495 host->pwr_reg_add |= MCI_ST_CMDDIREN;
1497 host->pwr_reg_add |= MCI_ST_FBCLKEN;
1513 struct mmci_host *host;
1537 host = mmc_priv(mmc);
1538 host->mmc = mmc;
1540 host->hw_designer = amba_manf(dev);
1541 host->hw_revision = amba_rev(dev);
1542 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1543 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1545 host->clk = devm_clk_get(&dev->dev, NULL);
1546 if (IS_ERR(host->clk)) {
1547 ret = PTR_ERR(host->clk);
1551 ret = clk_prepare_enable(host->clk);
1556 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
1558 host->get_rx_fifocnt = mmci_get_rx_fifocnt;
1560 host->plat = plat;
1561 host->variant = variant;
1562 host->mclk = clk_get_rate(host->clk);
1568 if (host->mclk > variant->f_max) {
1569 ret = clk_set_rate(host->clk, variant->f_max);
1572 host->mclk = clk_get_rate(host->clk);
1574 host->mclk);
1577 host->phybase = dev->res.start;
1578 host->base = devm_ioremap_resource(&dev->dev, &dev->res);
1579 if (IS_ERR(host->base)) {
1580 ret = PTR_ERR(host->base);
1591 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1593 mmc->f_min = clk_round_rate(host->clk, 100000);
1595 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1605 min(host->mclk, mmc->f_max);
1608 fmax : min(host->mclk, fmax);
1632 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
1671 spin_lock_init(&host->lock);
1673 writel(0, host->base + MMCIMASK0);
1674 writel(0, host->base + MMCIMASK1);
1675 writel(0xfff, host->base + MMCICLEAR);
1709 DRIVER_NAME " (cmd)", host);
1714 host->singleirq = true;
1717 IRQF_SHARED, DRIVER_NAME " (pio)", host);
1722 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1731 mmci_dma_setup(host);
1742 clk_disable_unprepare(host->clk);
1753 struct mmci_host *host = mmc_priv(mmc);
1763 writel(0, host->base + MMCIMASK0);
1764 writel(0, host->base + MMCIMASK1);
1766 writel(0, host->base + MMCICOMMAND);
1767 writel(0, host->base + MMCIDATACTRL);
1769 mmci_dma_release(host);
1770 clk_disable_unprepare(host->clk);
1778 static void mmci_save(struct mmci_host *host)
1782 spin_lock_irqsave(&host->lock, flags);
1784 writel(0, host->base + MMCIMASK0);
1785 if (host->variant->pwrreg_nopower) {
1786 writel(0, host->base + MMCIDATACTRL);
1787 writel(0, host->base + MMCIPOWER);
1788 writel(0, host->base + MMCICLOCK);
1790 mmci_reg_delay(host);
1792 spin_unlock_irqrestore(&host->lock, flags);
1795 static void mmci_restore(struct mmci_host *host)
1799 spin_lock_irqsave(&host->lock, flags);
1801 if (host->variant->pwrreg_nopower) {
1802 writel(host->clk_reg, host->base + MMCICLOCK);
1803 writel(host->datactrl_reg, host->base + MMCIDATACTRL);
1804 writel(host->pwr_reg, host->base + MMCIPOWER);
1806 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1807 mmci_reg_delay(host);
1809 spin_unlock_irqrestore(&host->lock, flags);
1818 struct mmci_host *host = mmc_priv(mmc);
1820 mmci_save(host);
1821 clk_disable_unprepare(host->clk);
1833 struct mmci_host *host = mmc_priv(mmc);
1834 clk_prepare_enable(host->clk);
1835 mmci_restore(host);