Lines Matching refs:host

2  * drivers/mmc/host/omap_hsmmc.c
36 #include <linux/mmc/host.h>
158 #define mmc_slot(host) (host->pdata->slots[host->slot_id])
231 static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host);
235 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
236 struct omap_mmc_platform_data *mmc = host->pdata;
244 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
245 struct omap_mmc_platform_data *mmc = host->pdata;
253 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
254 struct omap_mmc_platform_data *mmc = host->pdata;
264 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
265 struct omap_mmc_platform_data *mmc = host->pdata;
273 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
274 struct omap_mmc_platform_data *mmc = host->pdata;
292 struct omap_hsmmc_host *host =
300 if (!host->vcc)
303 if (mmc_slot(host).before_set_reg)
304 mmc_slot(host).before_set_reg(dev, slot, power_on, vdd);
306 if (host->pbias) {
307 if (host->pbias_enabled == 1) {
308 ret = regulator_disable(host->pbias);
310 host->pbias_enabled = 0;
312 regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0);
329 if (host->vcc)
330 ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
332 if (ret == 0 && host->vcc_aux) {
333 ret = regulator_enable(host->vcc_aux);
334 if (ret < 0 && host->vcc)
335 ret = mmc_regulator_set_ocr(host->mmc,
336 host->vcc, 0);
340 if (host->vcc_aux)
341 ret = regulator_disable(host->vcc_aux);
342 if (host->vcc) {
344 ret = mmc_regulator_set_ocr(host->mmc,
345 host->vcc, 0);
349 if (host->pbias) {
351 ret = regulator_set_voltage(host->pbias, VDD_1V8,
354 ret = regulator_set_voltage(host->pbias, VDD_3V0,
359 if (host->pbias_enabled == 0) {
360 ret = regulator_enable(host->pbias);
362 host->pbias_enabled = 1;
366 if (mmc_slot(host).after_set_reg)
367 mmc_slot(host).after_set_reg(dev, slot, power_on, vdd);
373 static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
378 reg = devm_regulator_get(host->dev, "vmmc");
380 dev_err(host->dev, "unable to get vmmc regulator %ld\n",
384 host->vcc = reg;
386 if (!mmc_slot(host).ocr_mask) {
387 mmc_slot(host).ocr_mask = ocr_value;
389 if (!(mmc_slot(host).ocr_mask & ocr_value)) {
390 dev_err(host->dev, "ocrmask %x is not supported\n",
391 mmc_slot(host).ocr_mask);
392 mmc_slot(host).ocr_mask = 0;
397 mmc_slot(host).set_power = omap_hsmmc_set_power;
400 reg = devm_regulator_get_optional(host->dev, "vmmc_aux");
401 host->vcc_aux = IS_ERR(reg) ? NULL : reg;
403 reg = devm_regulator_get_optional(host->dev, "pbias");
404 host->pbias = IS_ERR(reg) ? NULL : reg;
407 if (mmc_slot(host).no_regulator_off_init)
413 if ((host->vcc && regulator_is_enabled(host->vcc) > 0) ||
414 (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) {
415 int vdd = ffs(mmc_slot(host).ocr_mask) - 1;
417 mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd);
418 mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
424 static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
426 mmc_slot(host).set_power = NULL;
436 static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
441 static inline void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
506 static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host)
508 OMAP_HSMMC_WRITE(host->base, SYSCTL,
509 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
515 static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
517 OMAP_HSMMC_WRITE(host->base, SYSCTL,
518 OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
519 if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0)
520 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n");
523 static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host,
529 if (host->use_dma)
536 spin_lock_irqsave(&host->irq_lock, flags);
537 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
538 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
541 if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
543 OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
544 spin_unlock_irqrestore(&host->irq_lock, flags);
547 static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
552 spin_lock_irqsave(&host->irq_lock, flags);
554 if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
556 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
557 OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
558 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
559 spin_unlock_irqrestore(&host->irq_lock, flags);
563 static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
568 dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
576 static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
578 struct mmc_ios *ios = &host->mmc->ios;
583 dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
585 omap_hsmmc_stop_clock(host);
587 regval = OMAP_HSMMC_READ(host->base, SYSCTL);
589 clkdiv = calc_divisor(host, ios);
591 OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
592 OMAP_HSMMC_WRITE(host->base, SYSCTL,
593 OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
597 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
610 if ((mmc_slot(host).features & HSMMC_HAS_HSPE_SUPPORT) &&
612 ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) {
613 regval = OMAP_HSMMC_READ(host->base, HCTL);
614 if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000)
619 OMAP_HSMMC_WRITE(host->base, HCTL, regval);
622 omap_hsmmc_start_clock(host);
625 static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
627 struct mmc_ios *ios = &host->mmc->ios;
630 con = OMAP_HSMMC_READ(host->base, CON);
637 OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
640 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
641 OMAP_HSMMC_WRITE(host->base, HCTL,
642 OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
645 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
646 OMAP_HSMMC_WRITE(host->base, HCTL,
647 OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
652 static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
654 struct mmc_ios *ios = &host->mmc->ios;
657 con = OMAP_HSMMC_READ(host->base, CON);
659 OMAP_HSMMC_WRITE(host->base, CON, con | OD);
661 OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
667 * Restore the MMC host context, if it was lost as result of a
670 static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
672 struct mmc_ios *ios = &host->mmc->ios;
676 if (host->con == OMAP_HSMMC_READ(host->base, CON) &&
677 host->hctl == OMAP_HSMMC_READ(host->base, HCTL) &&
678 host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) &&
679 host->capa == OMAP_HSMMC_READ(host->base, CAPA))
682 host->context_loss++;
684 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
685 if (host->power_mode != MMC_POWER_OFF &&
696 if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
699 OMAP_HSMMC_WRITE(host->base, HCTL,
700 OMAP_HSMMC_READ(host->base, HCTL) | hctl);
702 OMAP_HSMMC_WRITE(host->base, CAPA,
703 OMAP_HSMMC_READ(host->base, CAPA) | capa);
705 OMAP_HSMMC_WRITE(host->base, HCTL,
706 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
709 while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP
713 OMAP_HSMMC_WRITE(host->base, ISE, 0);
714 OMAP_HSMMC_WRITE(host->base, IE, 0);
715 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
718 if (host->power_mode == MMC_POWER_OFF)
721 omap_hsmmc_set_bus_width(host);
723 omap_hsmmc_set_clock(host);
725 omap_hsmmc_set_bus_mode(host);
728 dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n",
729 host->context_loss);
734 * Save the MMC host context (store the number of power state changes so far).
736 static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
738 host->con = OMAP_HSMMC_READ(host->base, CON);
739 host->hctl = OMAP_HSMMC_READ(host->base, HCTL);
740 host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL);
741 host->capa = OMAP_HSMMC_READ(host->base, CAPA);
746 static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
751 static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
761 static void send_init_stream(struct omap_hsmmc_host *host)
766 if (host->protect_card)
769 disable_irq(host->irq);
771 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
772 OMAP_HSMMC_WRITE(host->base, CON,
773 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
774 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
778 reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN;
780 OMAP_HSMMC_WRITE(host->base, CON,
781 OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM);
783 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
784 OMAP_HSMMC_READ(host->base, STAT);
786 enable_irq(host->irq);
790 int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host)
794 if (mmc_slot(host).get_cover_state)
795 r = mmc_slot(host).get_cover_state(host->dev, host->slot_id);
804 struct omap_hsmmc_host *host = mmc_priv(mmc);
807 omap_hsmmc_cover_is_closed(host) ? "closed" : "open");
817 struct omap_hsmmc_host *host = mmc_priv(mmc);
819 return sprintf(buf, "%s\n", mmc_slot(host).name);
828 omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
833 dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
834 mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
835 host->cmd = cmd;
837 omap_hsmmc_enable_irq(host, cmd);
839 host->response_busy = 0;
845 host->response_busy = 1;
855 if (cmd == host->mrq->stop)
860 if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) &&
861 host->mrq->sbc) {
863 OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg);
873 if (host->use_dma)
876 host->req_in_progress = 1;
878 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
879 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
883 omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
891 static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
894 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
897 static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
902 spin_lock_irqsave(&host->irq_lock, flags);
903 host->req_in_progress = 0;
904 dma_ch = host->dma_ch;
905 spin_unlock_irqrestore(&host->irq_lock, flags);
907 omap_hsmmc_disable_irq(host);
909 if (mrq->data && host->use_dma && dma_ch != -1)
911 host->mrq = NULL;
912 mmc_request_done(host->mmc, mrq);
919 omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
922 struct mmc_request *mrq = host->mrq;
925 if (host->cmd && host->cmd->opcode == 6 &&
926 host->response_busy) {
927 host->response_busy = 0;
931 omap_hsmmc_request_done(host, mrq);
935 host->data = NULL;
942 if (data->stop && (data->error || !host->mrq->sbc))
943 omap_hsmmc_start_command(host, data->stop, NULL);
945 omap_hsmmc_request_done(host, data->mrq);
952 omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
954 if (host->mrq->sbc && (host->cmd == host->mrq->sbc) &&
955 !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) {
956 host->cmd = NULL;
957 omap_hsmmc_start_dma_transfer(host);
958 omap_hsmmc_start_command(host, host->mrq->cmd,
959 host->mrq->data);
963 host->cmd = NULL;
968 cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10);
969 cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32);
970 cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54);
971 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76);
974 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
977 if ((host->data == NULL && !host->response_busy) || cmd->error)
978 omap_hsmmc_request_done(host, host->mrq);
984 static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
989 host->data->error = errno;
991 spin_lock_irqsave(&host->irq_lock, flags);
992 dma_ch = host->dma_ch;
993 host->dma_ch = -1;
994 spin_unlock_irqrestore(&host->irq_lock, flags);
996 if (host->use_dma && dma_ch != -1) {
997 struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
1001 host->data->sg, host->data->sg_len,
1002 omap_hsmmc_get_dma_dir(host, host->data));
1004 host->data->host_cookie = 0;
1006 host->data = NULL;
1013 static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
1035 dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
1038 static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
1051 static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
1057 OMAP_HSMMC_WRITE(host->base, SYSCTL,
1058 OMAP_HSMMC_READ(host->base, SYSCTL) | bit);
1064 if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) {
1065 while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
1071 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&
1075 if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit)
1076 dev_err(mmc_dev(host->mmc),
1081 static void hsmmc_command_incomplete(struct omap_hsmmc_host *host,
1085 omap_hsmmc_reset_controller_fsm(host, SRC);
1086 if (host->cmd)
1087 host->cmd->error = err;
1090 if (host->data) {
1091 omap_hsmmc_reset_controller_fsm(host, SRD);
1092 omap_hsmmc_dma_cleanup(host, err);
1093 } else if (host->mrq && host->mrq->cmd)
1094 host->mrq->cmd->error = err;
1097 static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1103 data = host->data;
1104 dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
1107 omap_hsmmc_dbg_report_irq(host, status);
1112 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
1114 hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
1118 ac12 = OMAP_HSMMC_READ(host->base, AC12);
1119 if (!(ac12 & ACNE) && host->mrq->sbc) {
1125 host->mrq->sbc->error = error;
1126 hsmmc_command_incomplete(host, error, end_cmd);
1128 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
1130 if (host->data || host->response_busy) {
1132 host->response_busy = 0;
1136 OMAP_HSMMC_WRITE(host->base, STAT, status);
1137 if (end_cmd || ((status & CC_EN) && host->cmd))
1138 omap_hsmmc_cmd_done(host, host->cmd);
1139 if ((end_trans || (status & TC_EN)) && host->mrq)
1140 omap_hsmmc_xfer_done(host, data);
1148 struct omap_hsmmc_host *host = dev_id;
1151 status = OMAP_HSMMC_READ(host->base, STAT);
1153 if (host->req_in_progress)
1154 omap_hsmmc_do_irq(host, status);
1157 mmc_signal_sdio_irq(host->mmc);
1160 status = OMAP_HSMMC_READ(host->base, STAT);
1168 struct omap_hsmmc_host *host = dev_id;
1171 spin_lock(&host->irq_lock);
1172 if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
1173 disable_irq_nosync(host->wake_irq);
1174 host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
1176 spin_unlock(&host->irq_lock);
1177 pm_request_resume(host->dev); /* no use counter */
1182 static void set_sd_bus_power(struct omap_hsmmc_host *host)
1186 OMAP_HSMMC_WRITE(host->base, HCTL,
1187 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
1189 if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP)
1202 static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1208 pm_runtime_put_sync(host->dev);
1209 if (host->dbclk)
1210 clk_disable_unprepare(host->dbclk);
1213 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
1217 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
1219 pm_runtime_get_sync(host->dev);
1220 if (host->dbclk)
1221 clk_prepare_enable(host->dbclk);
1226 OMAP_HSMMC_WRITE(host->base, HCTL,
1227 OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR);
1228 reg_val = OMAP_HSMMC_READ(host->base, HCTL);
1250 OMAP_HSMMC_WRITE(host->base, HCTL, reg_val);
1251 set_sd_bus_power(host);
1255 dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n");
1260 static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
1262 if (!mmc_slot(host).get_cover_state)
1265 host->reqs_blocked = 0;
1266 if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) {
1267 if (host->protect_card) {
1268 dev_info(host->dev, "%s: cover is closed, "
1270 mmc_hostname(host->mmc));
1271 host->protect_card = 0;
1274 if (!host->protect_card) {
1275 dev_info(host->dev, "%s: cover is open, "
1277 mmc_hostname(host->mmc));
1278 host->protect_card = 1;
1288 struct omap_hsmmc_host *host = dev_id;
1289 struct omap_mmc_slot_data *slot = &mmc_slot(host);
1292 sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
1295 carddetect = slot->card_detect(host->dev, host->slot_id);
1297 omap_hsmmc_protect_card(host);
1302 mmc_detect_change(host->mmc, (HZ * 200) / 1000);
1304 mmc_detect_change(host->mmc, (HZ * 50) / 1000);
1310 struct omap_hsmmc_host *host = param;
1315 spin_lock_irq(&host->irq_lock);
1316 if (host->dma_ch < 0) {
1317 spin_unlock_irq(&host->irq_lock);
1321 data = host->mrq->data;
1322 chan = omap_hsmmc_get_dma_chan(host, data);
1326 omap_hsmmc_get_dma_dir(host, data));
1328 req_in_progress = host->req_in_progress;
1329 host->dma_ch = -1;
1330 spin_unlock_irq(&host->irq_lock);
1334 struct mmc_request *mrq = host->mrq;
1336 host->mrq = NULL;
1337 mmc_request_done(host->mmc, mrq);
1341 static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1349 data->host_cookie != host->next_data.cookie) {
1350 dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d"
1351 " host->next_data.cookie %d\n",
1352 __func__, data->host_cookie, host->next_data.cookie);
1357 if (next || data->host_cookie != host->next_data.cookie) {
1359 omap_hsmmc_get_dma_dir(host, data));
1362 dma_len = host->next_data.dma_len;
1363 host->next_data.dma_len = 0;
1374 host->dma_len = dma_len;
1382 static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
1405 BUG_ON(host->dma_ch != -1);
1407 chan = omap_hsmmc_get_dma_chan(host, data);
1409 cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
1410 cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
1420 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
1428 dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
1434 tx->callback_param = host;
1439 host->dma_ch = 1;
1444 static void set_data_timeout(struct omap_hsmmc_host *host,
1451 reg = OMAP_HSMMC_READ(host->base, SYSCTL);
1456 cycle_ns = 1000000000 / (host->clk_rate / clkd);
1478 OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
1481 static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
1483 struct mmc_request *req = host->mrq;
1488 OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
1490 set_data_timeout(host, req->data->timeout_ns,
1492 chan = omap_hsmmc_get_dma_chan(host, req->data);
1500 omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
1503 host->data = req->data;
1506 OMAP_HSMMC_WRITE(host->base, BLK, 0);
1512 set_data_timeout(host, 100000000U, 0);
1516 if (host->use_dma) {
1517 ret = omap_hsmmc_setup_dma_transfer(host, req);
1519 dev_err(mmc_dev(host->mmc), "MMC start dma failure\n");
1529 struct omap_hsmmc_host *host = mmc_priv(mmc);
1532 if (host->use_dma && data->host_cookie) {
1533 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
1536 omap_hsmmc_get_dma_dir(host, data));
1544 struct omap_hsmmc_host *host = mmc_priv(mmc);
1551 if (host->use_dma) {
1552 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
1554 if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
1555 &host->next_data, c))
1565 struct omap_hsmmc_host *host = mmc_priv(mmc);
1568 BUG_ON(host->req_in_progress);
1569 BUG_ON(host->dma_ch != -1);
1570 if (host->protect_card) {
1571 if (host->reqs_blocked < 3) {
1577 omap_hsmmc_reset_controller_fsm(host, SRD);
1578 omap_hsmmc_reset_controller_fsm(host, SRC);
1579 host->reqs_blocked += 1;
1587 } else if (host->reqs_blocked)
1588 host->reqs_blocked = 0;
1589 WARN_ON(host->mrq != NULL);
1590 host->mrq = req;
1591 host->clk_rate = clk_get_rate(host->fclk);
1592 err = omap_hsmmc_prepare_data(host, req);
1597 host->mrq = NULL;
1601 if (req->sbc && !(host->flags & AUTO_CMD23)) {
1602 omap_hsmmc_start_command(host, req->sbc, NULL);
1606 omap_hsmmc_start_dma_transfer(host);
1607 omap_hsmmc_start_command(host, req->cmd, req->data);
1613 struct omap_hsmmc_host *host = mmc_priv(mmc);
1616 pm_runtime_get_sync(host->dev);
1618 if (ios->power_mode != host->power_mode) {
1621 mmc_slot(host).set_power(host->dev, host->slot_id,
1625 mmc_slot(host).set_power(host->dev, host->slot_id,
1632 host->power_mode = ios->power_mode;
1637 omap_hsmmc_set_bus_width(host);
1639 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1643 if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) &&
1651 if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0)
1652 dev_dbg(mmc_dev(host->mmc),
1657 omap_hsmmc_set_clock(host);
1660 send_init_stream(host);
1662 omap_hsmmc_set_bus_mode(host);
1664 pm_runtime_put_autosuspend(host->dev);
1669 struct omap_hsmmc_host *host = mmc_priv(mmc);
1671 if (!mmc_slot(host).card_detect)
1673 return mmc_slot(host).card_detect(host->dev, host->slot_id);
1678 struct omap_hsmmc_host *host = mmc_priv(mmc);
1680 if (!mmc_slot(host).get_ro)
1682 return mmc_slot(host).get_ro(host->dev, 0);
1687 struct omap_hsmmc_host *host = mmc_priv(mmc);
1689 if (mmc_slot(host).init_card)
1690 mmc_slot(host).init_card(card);
1695 struct omap_hsmmc_host *host = mmc_priv(mmc);
1699 spin_lock_irqsave(&host->irq_lock, flags);
1701 con = OMAP_HSMMC_READ(host->base, CON);
1702 irq_mask = OMAP_HSMMC_READ(host->base, ISE);
1704 host->flags |= HSMMC_SDIO_IRQ_ENABLED;
1708 host->flags &= ~HSMMC_SDIO_IRQ_ENABLED;
1712 OMAP_HSMMC_WRITE(host->base, CON, con);
1713 OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
1719 if (!host->req_in_progress || !enable)
1720 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
1723 OMAP_HSMMC_READ(host->base, IE);
1725 spin_unlock_irqrestore(&host->irq_lock, flags);
1728 static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
1730 struct mmc_host *mmc = host->mmc;
1739 if (!host->dev->of_node || !host->wake_irq)
1743 irq_set_status_flags(host->wake_irq, IRQ_NOAUTOEN);
1744 ret = devm_request_irq(host->dev, host->wake_irq, omap_hsmmc_wake_irq,
1746 mmc_hostname(mmc), host);
1748 dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n");
1756 if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
1757 struct pinctrl *p = devm_pinctrl_get(host->dev);
1763 dev_info(host->dev, "missing default pinctrl state\n");
1770 dev_info(host->dev, "missing idle pinctrl state\n");
1778 OMAP_HSMMC_WRITE(host->base, HCTL,
1779 OMAP_HSMMC_READ(host->base, HCTL) | IWE);
1783 devm_free_irq(host->dev, host->wake_irq, host);
1785 dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n");
1786 host->wake_irq = 0;
1790 static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
1795 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1803 value = OMAP_HSMMC_READ(host->base, HCTL) & ~SDVS_MASK;
1804 OMAP_HSMMC_WRITE(host->base, HCTL, value | hctl);
1806 value = OMAP_HSMMC_READ(host->base, CAPA);
1807 OMAP_HSMMC_WRITE(host->base, CAPA, value | capa);
1810 set_sd_bus_power(host);
1815 struct omap_hsmmc_host *host = mmc_priv(mmc);
1817 pm_runtime_get_sync(host->dev);
1824 struct omap_hsmmc_host *host = mmc_priv(mmc);
1826 pm_runtime_mark_last_busy(host->dev);
1827 pm_runtime_put_autosuspend(host->dev);
1860 struct omap_hsmmc_host *host = mmc_priv(mmc);
1868 (host->flags & HSMMC_SDIO_IRQ_ENABLED) ? "enabled"
1871 seq_printf(s, "ctx_loss:\t%d\n", host->context_loss);
1873 pm_runtime_get_sync(host->dev);
1876 OMAP_HSMMC_READ(host->base, CON));
1878 OMAP_HSMMC_READ(host->base, PSTATE));
1880 OMAP_HSMMC_READ(host->base, HCTL));
1882 OMAP_HSMMC_READ(host->base, SYSCTL));
1884 OMAP_HSMMC_READ(host->base, IE));
1886 OMAP_HSMMC_READ(host->base, ISE));
1888 OMAP_HSMMC_READ(host->base, CAPA));
1890 pm_runtime_mark_last_busy(host->dev);
1891 pm_runtime_put_autosuspend(host->dev);
2023 struct omap_hsmmc_host *host = NULL;
2075 host = mmc_priv(mmc);
2076 host->mmc = mmc;
2077 host->pdata = pdata;
2078 host->dev = &pdev->dev;
2079 host->use_dma = 1;
2080 host->dma_ch = -1;
2081 host->irq = irq;
2082 host->slot_id = 0;
2083 host->mapbase = res->start + pdata->reg_offset;
2084 host->base = base + pdata->reg_offset;
2085 host->power_mode = MMC_POWER_OFF;
2086 host->next_data.cookie = 1;
2087 host->pbias_enabled = 0;
2089 platform_set_drvdata(pdev, host);
2092 host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1);
2103 spin_lock_init(&host->irq_lock);
2105 host->fclk = devm_clk_get(&pdev->dev, "fck");
2106 if (IS_ERR(host->fclk)) {
2107 ret = PTR_ERR(host->fclk);
2108 host->fclk = NULL;
2112 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
2117 pm_runtime_enable(host->dev);
2118 pm_runtime_get_sync(host->dev);
2119 pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
2120 pm_runtime_use_autosuspend(host->dev);
2122 omap_hsmmc_context_save(host);
2124 host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck");
2128 if (IS_ERR(host->dbclk)) {
2129 host->dbclk = NULL;
2130 } else if (clk_prepare_enable(host->dbclk) != 0) {
2131 dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
2132 host->dbclk = NULL;
2147 mmc->caps |= mmc_slot(host).caps;
2151 if (mmc_slot(host).nonremovable)
2154 mmc->pm_caps = mmc_slot(host).pm_caps;
2156 omap_hsmmc_conf_bus_power(host);
2161 dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
2169 dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
2179 host->rx_chan =
2183 if (!host->rx_chan) {
2184 dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
2189 host->tx_chan =
2193 if (!host->tx_chan) {
2194 dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
2200 ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
2201 mmc_hostname(mmc), host);
2203 dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
2209 dev_err(mmc_dev(host->mmc),
2215 if (omap_hsmmc_have_reg() && !mmc_slot(host).set_power) {
2216 ret = omap_hsmmc_reg_get(host);
2219 host->use_reg = 1;
2222 mmc->ocr_avail = mmc_slot(host).ocr_mask;
2225 if ((mmc_slot(host).card_detect_irq)) {
2227 mmc_slot(host).card_detect_irq,
2230 mmc_hostname(mmc), host);
2232 dev_err(mmc_dev(host->mmc),
2240 omap_hsmmc_disable_irq(host);
2250 ret = omap_hsmmc_configure_wake_irq(host);
2254 omap_hsmmc_protect_card(host);
2258 if (mmc_slot(host).name != NULL) {
2263 if (mmc_slot(host).card_detect_irq && mmc_slot(host).get_cover_state) {
2271 pm_runtime_mark_last_busy(host->dev);
2272 pm_runtime_put_autosuspend(host->dev);
2279 if (host->use_reg)
2280 omap_hsmmc_reg_put(host);
2282 if (host->pdata->cleanup)
2283 host->pdata->cleanup(&pdev->dev);
2285 if (host->tx_chan)
2286 dma_release_channel(host->tx_chan);
2287 if (host->rx_chan)
2288 dma_release_channel(host->rx_chan);
2289 pm_runtime_put_sync(host->dev);
2290 pm_runtime_disable(host->dev);
2291 if (host->dbclk)
2292 clk_disable_unprepare(host->dbclk);
2303 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2305 pm_runtime_get_sync(host->dev);
2306 mmc_remove_host(host->mmc);
2307 if (host->use_reg)
2308 omap_hsmmc_reg_put(host);
2309 if (host->pdata->cleanup)
2310 host->pdata->cleanup(&pdev->dev);
2312 if (host->tx_chan)
2313 dma_release_channel(host->tx_chan);
2314 if (host->rx_chan)
2315 dma_release_channel(host->rx_chan);
2317 pm_runtime_put_sync(host->dev);
2318 pm_runtime_disable(host->dev);
2319 if (host->dbclk)
2320 clk_disable_unprepare(host->dbclk);
2322 omap_hsmmc_gpio_free(host->pdata);
2323 mmc_free_host(host->mmc);
2331 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2333 if (host->pdata->suspend)
2334 return host->pdata->suspend(dev, host->slot_id);
2341 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2343 if (host->pdata->resume)
2344 host->pdata->resume(dev, host->slot_id);
2350 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2352 if (!host)
2355 pm_runtime_get_sync(host->dev);
2357 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
2358 OMAP_HSMMC_WRITE(host->base, ISE, 0);
2359 OMAP_HSMMC_WRITE(host->base, IE, 0);
2360 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
2361 OMAP_HSMMC_WRITE(host->base, HCTL,
2362 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2366 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
2367 !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
2368 disable_irq(host->wake_irq);
2370 if (host->dbclk)
2371 clk_disable_unprepare(host->dbclk);
2373 pm_runtime_put_sync(host->dev);
2380 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2382 if (!host)
2385 pm_runtime_get_sync(host->dev);
2387 if (host->dbclk)
2388 clk_prepare_enable(host->dbclk);
2390 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
2391 omap_hsmmc_conf_bus_power(host);
2393 omap_hsmmc_protect_card(host);
2395 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
2396 !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
2397 enable_irq(host->wake_irq);
2399 pm_runtime_mark_last_busy(host->dev);
2400 pm_runtime_put_autosuspend(host->dev);
2413 struct omap_hsmmc_host *host;
2417 host = platform_get_drvdata(to_platform_device(dev));
2418 omap_hsmmc_context_save(host);
2421 spin_lock_irqsave(&host->irq_lock, flags);
2422 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
2423 (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
2425 OMAP_HSMMC_WRITE(host->base, ISE, 0);
2426 OMAP_HSMMC_WRITE(host->base, IE, 0);
2428 if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) {
2435 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
2436 OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
2437 OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
2445 WARN_ON(host->flags & HSMMC_WAKE_IRQ_ENABLED);
2446 enable_irq(host->wake_irq);
2447 host->flags |= HSMMC_WAKE_IRQ_ENABLED;
2453 spin_unlock_irqrestore(&host->irq_lock, flags);
2459 struct omap_hsmmc_host *host;
2462 host = platform_get_drvdata(to_platform_device(dev));
2463 omap_hsmmc_context_restore(host);
2466 spin_lock_irqsave(&host->irq_lock, flags);
2467 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
2468 (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
2470 if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
2471 disable_irq_nosync(host->wake_irq);
2472 host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
2475 pinctrl_pm_select_default_state(host->dev);
2478 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
2479 OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
2480 OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
2482 pinctrl_pm_select_default_state(host->dev);
2484 spin_unlock_irqrestore(&host->irq_lock, flags);