Lines Matching refs:host

53 #include <linux/mmc/host.h>
260 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
263 writel(val | readl(host->addr + reg), host->addr + reg);
266 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
269 writel(~val & readl(host->addr + reg), host->addr + reg);
274 struct sh_mmcif_host *host = arg;
275 struct mmc_request *mrq = host->mrq;
277 dev_dbg(&host->pd->dev, "Command completed\n");
280 dev_name(&host->pd->dev)))
283 complete(&host->dma_complete);
286 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
288 struct mmc_data *data = host->mrq->data;
291 struct dma_chan *chan = host->chan_rx;
298 host->dma_active = true;
305 desc->callback_param = host;
307 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
310 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
317 host->chan_rx = NULL;
318 host->dma_active = false;
321 chan = host->chan_tx;
323 host->chan_tx = NULL;
326 dev_warn(&host->pd->dev,
328 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
331 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
335 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
337 struct mmc_data *data = host->mrq->data;
340 struct dma_chan *chan = host->chan_tx;
347 host->dma_active = true;
354 desc->callback_param = host;
356 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
359 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
366 host->chan_tx = NULL;
367 host->dma_active = false;
370 chan = host->chan_rx;
372 host->chan_rx = NULL;
375 dev_warn(&host->pd->dev,
377 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
380 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
385 sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
406 (void *)(unsigned long)slave_id, &host->pd->dev,
409 dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
415 res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
438 static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
441 host->dma_active = false;
446 } else if (!host->pd->dev.of_node) {
451 host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV);
452 if (!host->chan_tx)
455 host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM);
456 if (!host->chan_rx) {
457 dma_release_channel(host->chan_tx);
458 host->chan_tx = NULL;
462 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
464 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
466 if (host->chan_tx) {
467 struct dma_chan *chan = host->chan_tx;
468 host->chan_tx = NULL;
471 if (host->chan_rx) {
472 struct dma_chan *chan = host->chan_rx;
473 host->chan_rx = NULL;
477 host->dma_active = false;
480 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
482 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
485 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
486 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
490 if (sup_pclk && clk == host->clk)
491 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
493 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
494 ((fls(DIV_ROUND_UP(host->clk,
497 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
500 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
504 tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
506 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
507 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
508 if (host->ccs_enable)
510 if (host->clk_ctrl2_enable)
511 sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
512 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
515 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
518 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
523 host->sd_error = false;
525 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
526 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
527 dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
528 dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
531 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
532 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
534 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
540 dev_err(&host->pd->dev,
544 sh_mmcif_sync_reset(host);
545 dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
550 dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
551 host->state, host->wait_for);
554 dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
555 host->state, host->wait_for);
558 dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
559 host->state, host->wait_for);
565 static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
567 struct mmc_data *data = host->mrq->data;
569 host->sg_blkidx += host->blocksize;
571 /* data->sg->length must be a multiple of host->blocksize? */
572 BUG_ON(host->sg_blkidx > data->sg->length);
574 if (host->sg_blkidx == data->sg->length) {
575 host->sg_blkidx = 0;
576 if (++host->sg_idx < data->sg_len)
577 host->pio_ptr = sg_virt(++data->sg);
579 host->pio_ptr = p;
582 return host->sg_idx != data->sg_len;
585 static void sh_mmcif_single_read(struct sh_mmcif_host *host,
588 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
591 host->wait_for = MMCIF_WAIT_FOR_READ;
594 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
597 static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
599 struct mmc_data *data = host->mrq->data;
603 if (host->sd_error) {
604 data->error = sh_mmcif_error_manage(host);
605 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
609 for (i = 0; i < host->blocksize / 4; i++)
610 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
613 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
614 host->wait_for = MMCIF_WAIT_FOR_READ_END;
619 static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
627 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
630 host->wait_for = MMCIF_WAIT_FOR_MREAD;
631 host->sg_idx = 0;
632 host->sg_blkidx = 0;
633 host->pio_ptr = sg_virt(data->sg);
635 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
638 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
640 struct mmc_data *data = host->mrq->data;
641 u32 *p = host->pio_ptr;
644 if (host->sd_error) {
645 data->error = sh_mmcif_error_manage(host);
646 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
652 for (i = 0; i < host->blocksize / 4; i++)
653 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
655 if (!sh_mmcif_next_block(host, p))
658 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
663 static void sh_mmcif_single_write(struct sh_mmcif_host *host,
666 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
669 host->wait_for = MMCIF_WAIT_FOR_WRITE;
672 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
675 static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
677 struct mmc_data *data = host->mrq->data;
681 if (host->sd_error) {
682 data->error = sh_mmcif_error_manage(host);
683 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
687 for (i = 0; i < host->blocksize / 4; i++)
688 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
691 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
692 host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
697 static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
705 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
708 host->wait_for = MMCIF_WAIT_FOR_MWRITE;
709 host->sg_idx = 0;
710 host->sg_blkidx = 0;
711 host->pio_ptr = sg_virt(data->sg);
713 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
716 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
718 struct mmc_data *data = host->mrq->data;
719 u32 *p = host->pio_ptr;
722 if (host->sd_error) {
723 data->error = sh_mmcif_error_manage(host);
724 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
730 for (i = 0; i < host->blocksize / 4; i++)
731 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
733 if (!sh_mmcif_next_block(host, p))
736 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
741 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
745 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
746 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
747 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
748 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
750 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
753 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
756 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
759 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
781 dev_err(&host->pd->dev, "Unsupported response type.\n");
798 switch (host->bus_width) {
809 dev_err(&host->pd->dev, "Unsupported bus width.\n");
812 switch (host->timing) {
815 * MMC core will only set this timing, if the host
831 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
849 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
854 sh_mmcif_multi_read(host, mrq);
857 sh_mmcif_multi_write(host, mrq);
860 sh_mmcif_single_write(host, mrq);
864 sh_mmcif_single_read(host, mrq);
867 dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
872 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
894 if (host->ccs_enable)
898 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
899 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
902 opc = sh_mmcif_set_cmd(host, mrq);
904 if (host->ccs_enable)
905 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
907 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
908 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
910 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
912 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
914 host->wait_for = MMCIF_WAIT_FOR_CMD;
915 schedule_delayed_work(&host->timeout_work, host->timeout);
918 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
923 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
926 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
929 dev_err(&host->pd->dev, "unsupported stop cmd\n");
930 mrq->stop->error = sh_mmcif_error_manage(host);
934 host->wait_for = MMCIF_WAIT_FOR_STOP;
939 struct sh_mmcif_host *host = mmc_priv(mmc);
942 spin_lock_irqsave(&host->lock, flags);
943 if (host->state != STATE_IDLE) {
944 dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
945 spin_unlock_irqrestore(&host->lock, flags);
951 host->state = STATE_REQUEST;
952 spin_unlock_irqrestore(&host->lock, flags);
962 host->state = STATE_IDLE;
970 host->mrq = mrq;
972 sh_mmcif_start_cmd(host, mrq);
975 static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
977 int ret = clk_prepare_enable(host->hclk);
980 host->clk = clk_get_rate(host->hclk);
981 host->mmc->f_max = host->clk / 2;
982 host->mmc->f_min = host->clk / 512;
988 static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
990 struct mmc_host *mmc = host->mmc;
1000 struct sh_mmcif_host *host = mmc_priv(mmc);
1003 spin_lock_irqsave(&host->lock, flags);
1004 if (host->state != STATE_IDLE) {
1005 dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
1006 spin_unlock_irqrestore(&host->lock, flags);
1010 host->state = STATE_IOS;
1011 spin_unlock_irqrestore(&host->lock, flags);
1014 if (!host->card_present) {
1016 sh_mmcif_request_dma(host, host->pd->dev.platform_data);
1017 host->card_present = true;
1019 sh_mmcif_set_power(host, ios);
1022 sh_mmcif_clock_control(host, 0);
1024 if (host->card_present) {
1025 sh_mmcif_release_dma(host);
1026 host->card_present = false;
1029 if (host->power) {
1030 pm_runtime_put_sync(&host->pd->dev);
1031 clk_disable_unprepare(host->hclk);
1032 host->power = false;
1034 sh_mmcif_set_power(host, ios);
1036 host->state = STATE_IDLE;
1041 if (!host->power) {
1042 sh_mmcif_clk_update(host);
1043 pm_runtime_get_sync(&host->pd->dev);
1044 host->power = true;
1045 sh_mmcif_sync_reset(host);
1047 sh_mmcif_clock_control(host, ios->clock);
1050 host->timing = ios->timing;
1051 host->bus_width = ios->bus_width;
1052 host->state = STATE_IDLE;
1057 struct sh_mmcif_host *host = mmc_priv(mmc);
1058 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
1067 return p->get_cd(host->pd);
1076 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1078 struct mmc_command *cmd = host->mrq->cmd;
1079 struct mmc_data *data = host->mrq->data;
1082 if (host->sd_error) {
1090 cmd->error = sh_mmcif_error_manage(host);
1093 dev_dbg(&host->pd->dev, "CMD%d error %d\n",
1095 host->sd_error = false;
1103 sh_mmcif_get_response(host, cmd);
1112 init_completion(&host->dma_complete);
1115 if (host->chan_rx)
1116 sh_mmcif_start_dma_rx(host);
1118 if (host->chan_tx)
1119 sh_mmcif_start_dma_tx(host);
1122 if (!host->dma_active) {
1123 data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1128 time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1129 host->timeout);
1132 dma_unmap_sg(host->chan_rx->device->dev,
1136 dma_unmap_sg(host->chan_tx->device->dev,
1140 if (host->sd_error) {
1141 dev_err(host->mmc->parent,
1144 data->error = sh_mmcif_error_manage(host);
1146 dev_err(host->mmc->parent, "DMA timeout!\n");
1149 dev_err(host->mmc->parent,
1153 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1155 host->dma_active = false;
1161 dmaengine_terminate_all(host->chan_rx);
1163 dmaengine_terminate_all(host->chan_tx);
1171 struct sh_mmcif_host *host = dev_id;
1175 cancel_delayed_work_sync(&host->timeout_work);
1177 mutex_lock(&host->thread_lock);
1179 mrq = host->mrq;
1181 dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1182 host->state, host->wait_for);
1183 mutex_unlock(&host->thread_lock);
1191 switch (host->wait_for) {
1194 mutex_unlock(&host->thread_lock);
1198 wait = sh_mmcif_end_cmd(host);
1202 wait = sh_mmcif_mread_block(host);
1206 wait = sh_mmcif_read_block(host);
1210 wait = sh_mmcif_mwrite_block(host);
1214 wait = sh_mmcif_write_block(host);
1217 if (host->sd_error) {
1218 mrq->stop->error = sh_mmcif_error_manage(host);
1219 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
1222 sh_mmcif_get_cmd12response(host, mrq->stop);
1227 if (host->sd_error) {
1228 mrq->data->error = sh_mmcif_error_manage(host);
1229 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
1237 schedule_delayed_work(&host->timeout_work, host->timeout);
1239 mutex_unlock(&host->thread_lock);
1243 if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1250 sh_mmcif_stop_cmd(host, mrq);
1252 schedule_delayed_work(&host->timeout_work, host->timeout);
1253 mutex_unlock(&host->thread_lock);
1259 host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1260 host->state = STATE_IDLE;
1261 host->mrq = NULL;
1262 mmc_request_done(host->mmc, mrq);
1264 mutex_unlock(&host->thread_lock);
1271 struct sh_mmcif_host *host = dev_id;
1274 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1275 mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
1276 if (host->ccs_enable)
1277 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
1279 sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
1280 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
1283 dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
1287 host->sd_error = true;
1288 dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
1291 if (!host->mrq)
1292 dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
1293 if (!host->dma_active)
1295 else if (host->sd_error)
1296 mmcif_dma_complete(host);
1298 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
1307 struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1308 struct mmc_request *mrq = host->mrq;
1311 if (host->dying)
1315 dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
1316 host->wait_for, mrq->cmd->opcode);
1318 spin_lock_irqsave(&host->lock, flags);
1319 if (host->state == STATE_IDLE) {
1320 spin_unlock_irqrestore(&host->lock, flags);
1324 host->state = STATE_TIMEOUT;
1325 spin_unlock_irqrestore(&host->lock, flags);
1331 switch (host->wait_for) {
1333 mrq->cmd->error = sh_mmcif_error_manage(host);
1336 mrq->stop->error = sh_mmcif_error_manage(host);
1344 mrq->data->error = sh_mmcif_error_manage(host);
1350 host->state = STATE_IDLE;
1351 host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1352 host->mrq = NULL;
1353 mmc_request_done(host->mmc, mrq);
1356 static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1358 struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
1359 struct mmc_host *mmc = host->mmc;
1376 struct sh_mmcif_host *host;
1402 host = mmc_priv(mmc);
1403 host->mmc = mmc;
1404 host->addr = reg;
1405 host->timeout = msecs_to_jiffies(1000);
1406 host->ccs_enable = !pd || !pd->ccs_unsupported;
1407 host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
1409 host->pd = pdev;
1411 spin_lock_init(&host->lock);
1414 sh_mmcif_init_ocr(host);
1425 platform_set_drvdata(pdev, host);
1428 host->power = false;
1430 host->hclk = devm_clk_get(&pdev->dev, NULL);
1431 if (IS_ERR(host->hclk)) {
1432 ret = PTR_ERR(host->hclk);
1436 ret = sh_mmcif_clk_update(host);
1444 INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
1446 sh_mmcif_sync_reset(host);
1447 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1451 sh_mmcif_irqt, 0, name, host);
1459 0, "sh_mmc:int", host);
1472 mutex_init(&host->thread_lock);
1481 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
1482 clk_get_rate(host->hclk) / 1000000UL);
1484 clk_disable_unprepare(host->hclk);
1488 clk_disable_unprepare(host->hclk);
1498 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1500 host->dying = true;
1501 clk_prepare_enable(host->hclk);
1506 mmc_remove_host(host->mmc);
1507 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1514 cancel_delayed_work_sync(&host->timeout_work);
1516 clk_disable_unprepare(host->hclk);
1517 mmc_free_host(host->mmc);
1527 struct sh_mmcif_host *host = dev_get_drvdata(dev);
1529 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);