1/* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13#include <linux/module.h> 14#include <linux/init.h> 15#include <linux/interrupt.h> 16#include <linux/completion.h> 17#include <linux/device.h> 18#include <linux/delay.h> 19#include <linux/pagemap.h> 20#include <linux/err.h> 21#include <linux/leds.h> 22#include <linux/scatterlist.h> 23#include <linux/log2.h> 24#include <linux/regulator/consumer.h> 25#include <linux/pm_runtime.h> 26#include <linux/suspend.h> 27#include <linux/fault-inject.h> 28#include <linux/random.h> 29#include <linux/wakelock.h> 30 31#include <linux/mmc/card.h> 32#include <linux/mmc/host.h> 33#include <linux/mmc/mmc.h> 34#include <linux/mmc/sd.h> 35 36#include "core.h" 37#include "bus.h" 38#include "host.h" 39#include "sdio_bus.h" 40 41#include "mmc_ops.h" 42#include "sd_ops.h" 43#include "sdio_ops.h" 44 45static struct workqueue_struct *workqueue; 46 47/* 48 * Enabling software CRCs on the data blocks can be a significant (30%) 49 * performance cost, and for other reasons may not always be desired. 50 * So we allow it it to be disabled. 51 */ 52bool use_spi_crc = 1; 53module_param(use_spi_crc, bool, 0); 54 55/* 56 * We normally treat cards as removed during suspend if they are not 57 * known to be on a non-removable bus, to avoid the risk of writing 58 * back data to a different card after resume. Allow this to be 59 * overridden if necessary. 60 */ 61#ifdef CONFIG_MMC_UNSAFE_RESUME 62bool mmc_assume_removable; 63#else 64bool mmc_assume_removable = 1; 65#endif 66EXPORT_SYMBOL(mmc_assume_removable); 67module_param_named(removable, mmc_assume_removable, bool, 0644); 68MODULE_PARM_DESC( 69 removable, 70 "MMC/SD cards are removable and may be removed during suspend"); 71 72/* 73 * Internal function. Schedule delayed work in the MMC work queue. 74 */ 75static int mmc_schedule_delayed_work(struct delayed_work *work, 76 unsigned long delay) 77{ 78 return queue_delayed_work(workqueue, work, delay); 79} 80 81/* 82 * Internal function. Flush all scheduled work from the MMC work queue. 83 */ 84static void mmc_flush_scheduled_work(void) 85{ 86 flush_workqueue(workqueue); 87} 88 89#ifdef CONFIG_FAIL_MMC_REQUEST 90 91/* 92 * Internal function. Inject random data errors. 93 * If mmc_data is NULL no errors are injected. 94 */ 95static void mmc_should_fail_request(struct mmc_host *host, 96 struct mmc_request *mrq) 97{ 98 struct mmc_command *cmd = mrq->cmd; 99 struct mmc_data *data = mrq->data; 100 static const int data_errors[] = { 101 -ETIMEDOUT, 102 -EILSEQ, 103 -EIO, 104 }; 105 106 if (!data) 107 return; 108 109 if (cmd->error || data->error || 110 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) 111 return; 112 113 data->error = data_errors[random32() % ARRAY_SIZE(data_errors)]; 114 data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9; 115} 116 117#else /* CONFIG_FAIL_MMC_REQUEST */ 118 119static inline void mmc_should_fail_request(struct mmc_host *host, 120 struct mmc_request *mrq) 121{ 122} 123 124#endif /* CONFIG_FAIL_MMC_REQUEST */ 125 126/** 127 * mmc_request_done - finish processing an MMC request 128 * @host: MMC host which completed request 129 * @mrq: MMC request which request 130 * 131 * MMC drivers should call this function when they have completed 132 * their processing of a request. 133 */ 134void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 135{ 136 struct mmc_command *cmd = mrq->cmd; 137 int err = cmd->error; 138 139 if (err && cmd->retries && mmc_host_is_spi(host)) { 140 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 141 cmd->retries = 0; 142 } 143 144 if (err && cmd->retries && !mmc_card_removed(host->card)) { 145 /* 146 * Request starter must handle retries - see 147 * mmc_wait_for_req_done(). 148 */ 149 if (mrq->done) 150 mrq->done(mrq); 151 } else { 152 mmc_should_fail_request(host, mrq); 153 154 led_trigger_event(host->led, LED_OFF); 155 156 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 157 mmc_hostname(host), cmd->opcode, err, 158 cmd->resp[0], cmd->resp[1], 159 cmd->resp[2], cmd->resp[3]); 160 161 if (mrq->data) { 162 pr_debug("%s: %d bytes transferred: %d\n", 163 mmc_hostname(host), 164 mrq->data->bytes_xfered, mrq->data->error); 165 } 166 167 if (mrq->stop) { 168 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 169 mmc_hostname(host), mrq->stop->opcode, 170 mrq->stop->error, 171 mrq->stop->resp[0], mrq->stop->resp[1], 172 mrq->stop->resp[2], mrq->stop->resp[3]); 173 } 174 175 if (mrq->done) 176 mrq->done(mrq); 177 178 mmc_host_clk_release(host); 179 } 180} 181 182EXPORT_SYMBOL(mmc_request_done); 183 184static void 185mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 186{ 187#ifdef CONFIG_MMC_DEBUG 188 unsigned int i, sz; 189 struct scatterlist *sg; 190#endif 191 192 if (mrq->sbc) { 193 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", 194 mmc_hostname(host), mrq->sbc->opcode, 195 mrq->sbc->arg, mrq->sbc->flags); 196 } 197 198 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 199 mmc_hostname(host), mrq->cmd->opcode, 200 mrq->cmd->arg, mrq->cmd->flags); 201 202 if (mrq->data) { 203 pr_debug("%s: blksz %d blocks %d flags %08x " 204 "tsac %d ms nsac %d\n", 205 mmc_hostname(host), mrq->data->blksz, 206 mrq->data->blocks, mrq->data->flags, 207 mrq->data->timeout_ns / 1000000, 208 mrq->data->timeout_clks); 209 } 210 211 if (mrq->stop) { 212 pr_debug("%s: CMD%u arg %08x flags %08x\n", 213 mmc_hostname(host), mrq->stop->opcode, 214 mrq->stop->arg, mrq->stop->flags); 215 } 216 217 WARN_ON(!host->claimed); 218 219 mrq->cmd->error = 0; 220 mrq->cmd->mrq = mrq; 221 if (mrq->data) { 222 BUG_ON(mrq->data->blksz > host->max_blk_size); 223 BUG_ON(mrq->data->blocks > host->max_blk_count); 224 BUG_ON(mrq->data->blocks * mrq->data->blksz > 225 host->max_req_size); 226 227#ifdef CONFIG_MMC_DEBUG 228 sz = 0; 229 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 230 sz += sg->length; 231 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 232#endif 233 234 mrq->cmd->data = mrq->data; 235 mrq->data->error = 0; 236 mrq->data->mrq = mrq; 237 if (mrq->stop) { 238 mrq->data->stop = mrq->stop; 239 mrq->stop->error = 0; 240 mrq->stop->mrq = mrq; 241 } 242 } 243 mmc_host_clk_hold(host); 244 led_trigger_event(host->led, LED_FULL); 245 host->ops->request(host, mrq); 246} 247 248static void mmc_wait_done(struct mmc_request *mrq) 249{ 250 complete(&mrq->completion); 251} 252 253static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 254{ 255 init_completion(&mrq->completion); 256 mrq->done = mmc_wait_done; 257 if (mmc_card_removed(host->card)) { 258 mrq->cmd->error = -ENOMEDIUM; 259 complete(&mrq->completion); 260 return -ENOMEDIUM; 261 } 262 mmc_start_request(host, mrq); 263 return 0; 264} 265 266static void mmc_wait_for_req_done(struct mmc_host *host, 267 struct mmc_request *mrq) 268{ 269 struct mmc_command *cmd; 270 271 while (1) { 272 wait_for_completion(&mrq->completion); 273 274 cmd = mrq->cmd; 275 if (!cmd->error || !cmd->retries || 276 mmc_card_removed(host->card)) 277 break; 278 279 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 280 mmc_hostname(host), cmd->opcode, cmd->error); 281 cmd->retries--; 282 cmd->error = 0; 283 host->ops->request(host, mrq); 284 } 285} 286 287/** 288 * mmc_pre_req - Prepare for a new request 289 * @host: MMC host to prepare command 290 * @mrq: MMC request to prepare for 291 * @is_first_req: true if there is no previous started request 292 * that may run in parellel to this call, otherwise false 293 * 294 * mmc_pre_req() is called in prior to mmc_start_req() to let 295 * host prepare for the new request. Preparation of a request may be 296 * performed while another request is running on the host. 297 */ 298static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, 299 bool is_first_req) 300{ 301 if (host->ops->pre_req) { 302 mmc_host_clk_hold(host); 303 host->ops->pre_req(host, mrq, is_first_req); 304 mmc_host_clk_release(host); 305 } 306} 307 308/** 309 * mmc_post_req - Post process a completed request 310 * @host: MMC host to post process command 311 * @mrq: MMC request to post process for 312 * @err: Error, if non zero, clean up any resources made in pre_req 313 * 314 * Let the host post process a completed request. Post processing of 315 * a request may be performed while another reuqest is running. 316 */ 317static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 318 int err) 319{ 320 if (host->ops->post_req) { 321 mmc_host_clk_hold(host); 322 host->ops->post_req(host, mrq, err); 323 mmc_host_clk_release(host); 324 } 325} 326 327/** 328 * mmc_start_req - start a non-blocking request 329 * @host: MMC host to start command 330 * @areq: async request to start 331 * @error: out parameter returns 0 for success, otherwise non zero 332 * 333 * Start a new MMC custom command request for a host. 334 * If there is on ongoing async request wait for completion 335 * of that request and start the new one and return. 336 * Does not wait for the new request to complete. 337 * 338 * Returns the completed request, NULL in case of none completed. 339 * Wait for the an ongoing request (previoulsy started) to complete and 340 * return the completed request. If there is no ongoing request, NULL 341 * is returned without waiting. NULL is not an error condition. 342 */ 343struct mmc_async_req *mmc_start_req(struct mmc_host *host, 344 struct mmc_async_req *areq, int *error) 345{ 346 int err = 0; 347 int start_err = 0; 348 struct mmc_async_req *data = host->areq; 349 350 /* Prepare a new request */ 351 if (areq) 352 mmc_pre_req(host, areq->mrq, !host->areq); 353 354 if (host->areq) { 355 mmc_wait_for_req_done(host, host->areq->mrq); 356 err = host->areq->err_check(host->card, host->areq); 357 } 358 359 if (!err && areq) 360 start_err = __mmc_start_req(host, areq->mrq); 361 362 if (host->areq) 363 mmc_post_req(host, host->areq->mrq, 0); 364 365 /* Cancel a prepared request if it was not started. */ 366 if ((err || start_err) && areq) 367 mmc_post_req(host, areq->mrq, -EINVAL); 368 369 if (err) 370 host->areq = NULL; 371 else 372 host->areq = areq; 373 374 if (error) 375 *error = err; 376 return data; 377} 378EXPORT_SYMBOL(mmc_start_req); 379 380/** 381 * mmc_wait_for_req - start a request and wait for completion 382 * @host: MMC host to start command 383 * @mrq: MMC request to start 384 * 385 * Start a new MMC custom command request for a host, and wait 386 * for the command to complete. Does not attempt to parse the 387 * response. 388 */ 389void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 390{ 391 __mmc_start_req(host, mrq); 392 mmc_wait_for_req_done(host, mrq); 393} 394EXPORT_SYMBOL(mmc_wait_for_req); 395 396/** 397 * mmc_interrupt_hpi - Issue for High priority Interrupt 398 * @card: the MMC card associated with the HPI transfer 399 * 400 * Issued High Priority Interrupt, and check for card status 401 * util out-of prg-state. 402 */ 403int mmc_interrupt_hpi(struct mmc_card *card) 404{ 405 int err; 406 u32 status; 407 408 BUG_ON(!card); 409 410 if (!card->ext_csd.hpi_en) { 411 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 412 return 1; 413 } 414 415 mmc_claim_host(card->host); 416 err = mmc_send_status(card, &status); 417 if (err) { 418 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 419 goto out; 420 } 421 422 /* 423 * If the card status is in PRG-state, we can send the HPI command. 424 */ 425 if (R1_CURRENT_STATE(status) == R1_STATE_PRG) { 426 do { 427 /* 428 * We don't know when the HPI command will finish 429 * processing, so we need to resend HPI until out 430 * of prg-state, and keep checking the card status 431 * with SEND_STATUS. If a timeout error occurs when 432 * sending the HPI command, we are already out of 433 * prg-state. 434 */ 435 err = mmc_send_hpi_cmd(card, &status); 436 if (err) 437 pr_debug("%s: abort HPI (%d error)\n", 438 mmc_hostname(card->host), err); 439 440 err = mmc_send_status(card, &status); 441 if (err) 442 break; 443 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); 444 } else 445 pr_debug("%s: Left prg-state\n", mmc_hostname(card->host)); 446 447out: 448 mmc_release_host(card->host); 449 return err; 450} 451EXPORT_SYMBOL(mmc_interrupt_hpi); 452 453/** 454 * mmc_wait_for_cmd - start a command and wait for completion 455 * @host: MMC host to start command 456 * @cmd: MMC command to start 457 * @retries: maximum number of retries 458 * 459 * Start a new MMC command for a host, and wait for the command 460 * to complete. Return any error that occurred while the command 461 * was executing. Do not attempt to parse the response. 462 */ 463int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 464{ 465 struct mmc_request mrq = {NULL}; 466 467 WARN_ON(!host->claimed); 468 469 memset(cmd->resp, 0, sizeof(cmd->resp)); 470 cmd->retries = retries; 471 472 mrq.cmd = cmd; 473 cmd->data = NULL; 474 475 mmc_wait_for_req(host, &mrq); 476 477 return cmd->error; 478} 479 480EXPORT_SYMBOL(mmc_wait_for_cmd); 481 482/** 483 * mmc_set_data_timeout - set the timeout for a data command 484 * @data: data phase for command 485 * @card: the MMC card associated with the data transfer 486 * 487 * Computes the data timeout parameters according to the 488 * correct algorithm given the card type. 489 */ 490void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 491{ 492 unsigned int mult; 493 494 /* 495 * SDIO cards only define an upper 1 s limit on access. 496 */ 497 if (mmc_card_sdio(card)) { 498 data->timeout_ns = 1000000000; 499 data->timeout_clks = 0; 500 return; 501 } 502 503 /* 504 * SD cards use a 100 multiplier rather than 10 505 */ 506 mult = mmc_card_sd(card) ? 100 : 10; 507 508 /* 509 * Scale up the multiplier (and therefore the timeout) by 510 * the r2w factor for writes. 511 */ 512 if (data->flags & MMC_DATA_WRITE) 513 mult <<= card->csd.r2w_factor; 514 515 data->timeout_ns = card->csd.tacc_ns * mult; 516 data->timeout_clks = card->csd.tacc_clks * mult; 517 518 /* 519 * SD cards also have an upper limit on the timeout. 520 */ 521 if (mmc_card_sd(card)) { 522 unsigned int timeout_us, limit_us; 523 524 timeout_us = data->timeout_ns / 1000; 525 if (mmc_host_clk_rate(card->host)) 526 timeout_us += data->timeout_clks * 1000 / 527 (mmc_host_clk_rate(card->host) / 1000); 528 529 if (data->flags & MMC_DATA_WRITE) 530 /* 531 * The MMC spec "It is strongly recommended 532 * for hosts to implement more than 500ms 533 * timeout value even if the card indicates 534 * the 250ms maximum busy length." Even the 535 * previous value of 300ms is known to be 536 * insufficient for some cards. 537 */ 538 limit_us = 3000000; 539 else 540 limit_us = 100000; 541 542 /* 543 * SDHC cards always use these fixed values. 544 */ 545 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 546 data->timeout_ns = limit_us * 1000; 547 data->timeout_clks = 0; 548 } 549 } 550 551 /* 552 * Some cards require longer data read timeout than indicated in CSD. 553 * Address this by setting the read timeout to a "reasonably high" 554 * value. For the cards tested, 300ms has proven enough. If necessary, 555 * this value can be increased if other problematic cards require this. 556 */ 557 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { 558 data->timeout_ns = 300000000; 559 data->timeout_clks = 0; 560 } 561 562 /* 563 * Some cards need very high timeouts if driven in SPI mode. 564 * The worst observed timeout was 900ms after writing a 565 * continuous stream of data until the internal logic 566 * overflowed. 567 */ 568 if (mmc_host_is_spi(card->host)) { 569 if (data->flags & MMC_DATA_WRITE) { 570 if (data->timeout_ns < 1000000000) 571 data->timeout_ns = 1000000000; /* 1s */ 572 } else { 573 if (data->timeout_ns < 100000000) 574 data->timeout_ns = 100000000; /* 100ms */ 575 } 576 } 577} 578EXPORT_SYMBOL(mmc_set_data_timeout); 579 580/** 581 * mmc_align_data_size - pads a transfer size to a more optimal value 582 * @card: the MMC card associated with the data transfer 583 * @sz: original transfer size 584 * 585 * Pads the original data size with a number of extra bytes in 586 * order to avoid controller bugs and/or performance hits 587 * (e.g. some controllers revert to PIO for certain sizes). 588 * 589 * Returns the improved size, which might be unmodified. 590 * 591 * Note that this function is only relevant when issuing a 592 * single scatter gather entry. 593 */ 594unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 595{ 596 /* 597 * FIXME: We don't have a system for the controller to tell 598 * the core about its problems yet, so for now we just 32-bit 599 * align the size. 600 */ 601 sz = ((sz + 3) / 4) * 4; 602 603 return sz; 604} 605EXPORT_SYMBOL(mmc_align_data_size); 606 607/** 608 * __mmc_claim_host - exclusively claim a host 609 * @host: mmc host to claim 610 * @abort: whether or not the operation should be aborted 611 * 612 * Claim a host for a set of operations. If @abort is non null and 613 * dereference a non-zero value then this will return prematurely with 614 * that non-zero value without acquiring the lock. Returns zero 615 * with the lock held otherwise. 616 */ 617int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 618{ 619 DECLARE_WAITQUEUE(wait, current); 620 unsigned long flags; 621 int stop; 622 623 might_sleep(); 624 625 add_wait_queue(&host->wq, &wait); 626 spin_lock_irqsave(&host->lock, flags); 627 while (1) { 628 set_current_state(TASK_UNINTERRUPTIBLE); 629 stop = abort ? atomic_read(abort) : 0; 630 if (stop || !host->claimed || host->claimer == current) 631 break; 632 spin_unlock_irqrestore(&host->lock, flags); 633 schedule(); 634 spin_lock_irqsave(&host->lock, flags); 635 } 636 set_current_state(TASK_RUNNING); 637 if (!stop) { 638 host->claimed = 1; 639 host->claimer = current; 640 host->claim_cnt += 1; 641 } else 642 wake_up(&host->wq); 643 spin_unlock_irqrestore(&host->lock, flags); 644 remove_wait_queue(&host->wq, &wait); 645 if (host->ops->enable && !stop && host->claim_cnt == 1) 646 host->ops->enable(host); 647 return stop; 648} 649 650EXPORT_SYMBOL(__mmc_claim_host); 651 652/** 653 * mmc_try_claim_host - try exclusively to claim a host 654 * @host: mmc host to claim 655 * 656 * Returns %1 if the host is claimed, %0 otherwise. 657 */ 658int mmc_try_claim_host(struct mmc_host *host) 659{ 660 int claimed_host = 0; 661 unsigned long flags; 662 663 spin_lock_irqsave(&host->lock, flags); 664 if (!host->claimed || host->claimer == current) { 665 host->claimed = 1; 666 host->claimer = current; 667 host->claim_cnt += 1; 668 claimed_host = 1; 669 } 670 spin_unlock_irqrestore(&host->lock, flags); 671 if (host->ops->enable && claimed_host && host->claim_cnt == 1) 672 host->ops->enable(host); 673 return claimed_host; 674} 675EXPORT_SYMBOL(mmc_try_claim_host); 676 677/** 678 * mmc_release_host - release a host 679 * @host: mmc host to release 680 * 681 * Release a MMC host, allowing others to claim the host 682 * for their operations. 683 */ 684void mmc_release_host(struct mmc_host *host) 685{ 686 unsigned long flags; 687 688 WARN_ON(!host->claimed); 689 690 if (host->ops->disable && host->claim_cnt == 1) 691 host->ops->disable(host); 692 693 spin_lock_irqsave(&host->lock, flags); 694 if (--host->claim_cnt) { 695 /* Release for nested claim */ 696 spin_unlock_irqrestore(&host->lock, flags); 697 } else { 698 host->claimed = 0; 699 host->claimer = NULL; 700 spin_unlock_irqrestore(&host->lock, flags); 701 wake_up(&host->wq); 702 } 703} 704EXPORT_SYMBOL(mmc_release_host); 705 706/* 707 * Internal function that does the actual ios call to the host driver, 708 * optionally printing some debug output. 709 */ 710static inline void mmc_set_ios(struct mmc_host *host) 711{ 712 struct mmc_ios *ios = &host->ios; 713 714 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 715 "width %u timing %u\n", 716 mmc_hostname(host), ios->clock, ios->bus_mode, 717 ios->power_mode, ios->chip_select, ios->vdd, 718 ios->bus_width, ios->timing); 719 720 if (ios->clock > 0) 721 mmc_set_ungated(host); 722 host->ops->set_ios(host, ios); 723} 724 725/* 726 * Control chip select pin on a host. 727 */ 728void mmc_set_chip_select(struct mmc_host *host, int mode) 729{ 730 mmc_host_clk_hold(host); 731 host->ios.chip_select = mode; 732 mmc_set_ios(host); 733 mmc_host_clk_release(host); 734} 735 736/* 737 * Sets the host clock to the highest possible frequency that 738 * is below "hz". 739 */ 740static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) 741{ 742 WARN_ON(hz < host->f_min); 743 744 if (hz > host->f_max) 745 hz = host->f_max; 746 747 host->ios.clock = hz; 748 mmc_set_ios(host); 749} 750 751void mmc_set_clock(struct mmc_host *host, unsigned int hz) 752{ 753 mmc_host_clk_hold(host); 754 __mmc_set_clock(host, hz); 755 mmc_host_clk_release(host); 756} 757 758#ifdef CONFIG_MMC_CLKGATE 759/* 760 * This gates the clock by setting it to 0 Hz. 761 */ 762void mmc_gate_clock(struct mmc_host *host) 763{ 764 unsigned long flags; 765 766 spin_lock_irqsave(&host->clk_lock, flags); 767 host->clk_old = host->ios.clock; 768 host->ios.clock = 0; 769 host->clk_gated = true; 770 spin_unlock_irqrestore(&host->clk_lock, flags); 771 mmc_set_ios(host); 772} 773 774/* 775 * This restores the clock from gating by using the cached 776 * clock value. 777 */ 778void mmc_ungate_clock(struct mmc_host *host) 779{ 780 /* 781 * We should previously have gated the clock, so the clock shall 782 * be 0 here! The clock may however be 0 during initialization, 783 * when some request operations are performed before setting 784 * the frequency. When ungate is requested in that situation 785 * we just ignore the call. 786 */ 787 if (host->clk_old) { 788 BUG_ON(host->ios.clock); 789 /* This call will also set host->clk_gated to false */ 790 __mmc_set_clock(host, host->clk_old); 791 } 792} 793 794void mmc_set_ungated(struct mmc_host *host) 795{ 796 unsigned long flags; 797 798 /* 799 * We've been given a new frequency while the clock is gated, 800 * so make sure we regard this as ungating it. 801 */ 802 spin_lock_irqsave(&host->clk_lock, flags); 803 host->clk_gated = false; 804 spin_unlock_irqrestore(&host->clk_lock, flags); 805} 806 807#else 808void mmc_set_ungated(struct mmc_host *host) 809{ 810} 811#endif 812 813/* 814 * Change the bus mode (open drain/push-pull) of a host. 815 */ 816void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 817{ 818 mmc_host_clk_hold(host); 819 host->ios.bus_mode = mode; 820 mmc_set_ios(host); 821 mmc_host_clk_release(host); 822} 823 824/* 825 * Change data bus width of a host. 826 */ 827void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 828{ 829 mmc_host_clk_hold(host); 830 host->ios.bus_width = width; 831 mmc_set_ios(host); 832 mmc_host_clk_release(host); 833} 834 835/** 836 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 837 * @vdd: voltage (mV) 838 * @low_bits: prefer low bits in boundary cases 839 * 840 * This function returns the OCR bit number according to the provided @vdd 841 * value. If conversion is not possible a negative errno value returned. 842 * 843 * Depending on the @low_bits flag the function prefers low or high OCR bits 844 * on boundary voltages. For example, 845 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 846 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 847 * 848 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 849 */ 850static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 851{ 852 const int max_bit = ilog2(MMC_VDD_35_36); 853 int bit; 854 855 if (vdd < 1650 || vdd > 3600) 856 return -EINVAL; 857 858 if (vdd >= 1650 && vdd <= 1950) 859 return ilog2(MMC_VDD_165_195); 860 861 if (low_bits) 862 vdd -= 1; 863 864 /* Base 2000 mV, step 100 mV, bit's base 8. */ 865 bit = (vdd - 2000) / 100 + 8; 866 if (bit > max_bit) 867 return max_bit; 868 return bit; 869} 870 871/** 872 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 873 * @vdd_min: minimum voltage value (mV) 874 * @vdd_max: maximum voltage value (mV) 875 * 876 * This function returns the OCR mask bits according to the provided @vdd_min 877 * and @vdd_max values. If conversion is not possible the function returns 0. 878 * 879 * Notes wrt boundary cases: 880 * This function sets the OCR bits for all boundary voltages, for example 881 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 882 * MMC_VDD_34_35 mask. 883 */ 884u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 885{ 886 u32 mask = 0; 887 888 if (vdd_max < vdd_min) 889 return 0; 890 891 /* Prefer high bits for the boundary vdd_max values. */ 892 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 893 if (vdd_max < 0) 894 return 0; 895 896 /* Prefer low bits for the boundary vdd_min values. */ 897 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 898 if (vdd_min < 0) 899 return 0; 900 901 /* Fill the mask, from max bit to min bit. */ 902 while (vdd_max >= vdd_min) 903 mask |= 1 << vdd_max--; 904 905 return mask; 906} 907EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 908 909#ifdef CONFIG_REGULATOR 910 911/** 912 * mmc_regulator_get_ocrmask - return mask of supported voltages 913 * @supply: regulator to use 914 * 915 * This returns either a negative errno, or a mask of voltages that 916 * can be provided to MMC/SD/SDIO devices using the specified voltage 917 * regulator. This would normally be called before registering the 918 * MMC host adapter. 919 */ 920int mmc_regulator_get_ocrmask(struct regulator *supply) 921{ 922 int result = 0; 923 int count; 924 int i; 925 926 count = regulator_count_voltages(supply); 927 if (count < 0) 928 return count; 929 930 for (i = 0; i < count; i++) { 931 int vdd_uV; 932 int vdd_mV; 933 934 vdd_uV = regulator_list_voltage(supply, i); 935 if (vdd_uV <= 0) 936 continue; 937 938 vdd_mV = vdd_uV / 1000; 939 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 940 } 941 942 return result; 943} 944EXPORT_SYMBOL(mmc_regulator_get_ocrmask); 945 946/** 947 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 948 * @mmc: the host to regulate 949 * @supply: regulator to use 950 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 951 * 952 * Returns zero on success, else negative errno. 953 * 954 * MMC host drivers may use this to enable or disable a regulator using 955 * a particular supply voltage. This would normally be called from the 956 * set_ios() method. 957 */ 958int mmc_regulator_set_ocr(struct mmc_host *mmc, 959 struct regulator *supply, 960 unsigned short vdd_bit) 961{ 962 int result = 0; 963 int min_uV, max_uV; 964 965 if (vdd_bit) { 966 int tmp; 967 int voltage; 968 969 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 970 * bits this regulator doesn't quite support ... don't 971 * be too picky, most cards and regulators are OK with 972 * a 0.1V range goof (it's a small error percentage). 973 */ 974 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 975 if (tmp == 0) { 976 min_uV = 1650 * 1000; 977 max_uV = 1950 * 1000; 978 } else { 979 min_uV = 1900 * 1000 + tmp * 100 * 1000; 980 max_uV = min_uV + 100 * 1000; 981 } 982 983 /* avoid needless changes to this voltage; the regulator 984 * might not allow this operation 985 */ 986 voltage = regulator_get_voltage(supply); 987 988 if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE) 989 min_uV = max_uV = voltage; 990 991 if (voltage < 0) 992 result = voltage; 993 else if (voltage < min_uV || voltage > max_uV) 994 result = regulator_set_voltage(supply, min_uV, max_uV); 995 else 996 result = 0; 997 998 if (result == 0 && !mmc->regulator_enabled) { 999 result = regulator_enable(supply); 1000 if (!result) 1001 mmc->regulator_enabled = true; 1002 } 1003 } else if (mmc->regulator_enabled) { 1004 result = regulator_disable(supply); 1005 if (result == 0) 1006 mmc->regulator_enabled = false; 1007 } 1008 1009 if (result) 1010 dev_err(mmc_dev(mmc), 1011 "could not set regulator OCR (%d)\n", result); 1012 return result; 1013} 1014EXPORT_SYMBOL(mmc_regulator_set_ocr); 1015 1016#endif /* CONFIG_REGULATOR */ 1017 1018/* 1019 * Mask off any voltages we don't support and select 1020 * the lowest voltage 1021 */ 1022u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 1023{ 1024 int bit; 1025 1026 ocr &= host->ocr_avail; 1027 1028 bit = ffs(ocr); 1029 if (bit) { 1030 bit -= 1; 1031 1032 ocr &= 3 << bit; 1033 1034 mmc_host_clk_hold(host); 1035 host->ios.vdd = bit; 1036 mmc_set_ios(host); 1037 mmc_host_clk_release(host); 1038 } else { 1039 pr_warning("%s: host doesn't support card's voltages\n", 1040 mmc_hostname(host)); 1041 ocr = 0; 1042 } 1043 1044 return ocr; 1045} 1046 1047int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11) 1048{ 1049 struct mmc_command cmd = {0}; 1050 int err = 0; 1051 1052 BUG_ON(!host); 1053 1054 /* 1055 * Send CMD11 only if the request is to switch the card to 1056 * 1.8V signalling. 1057 */ 1058 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) { 1059 cmd.opcode = SD_SWITCH_VOLTAGE; 1060 cmd.arg = 0; 1061 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1062 1063 err = mmc_wait_for_cmd(host, &cmd, 0); 1064 if (err) 1065 return err; 1066 1067 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) 1068 return -EIO; 1069 } 1070 1071 host->ios.signal_voltage = signal_voltage; 1072 1073 if (host->ops->start_signal_voltage_switch) { 1074 mmc_host_clk_hold(host); 1075 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1076 mmc_host_clk_release(host); 1077 } 1078 1079 return err; 1080} 1081 1082/* 1083 * Select timing parameters for host. 1084 */ 1085void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1086{ 1087 mmc_host_clk_hold(host); 1088 host->ios.timing = timing; 1089 mmc_set_ios(host); 1090 mmc_host_clk_release(host); 1091} 1092 1093/* 1094 * Select appropriate driver type for host. 1095 */ 1096void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1097{ 1098 mmc_host_clk_hold(host); 1099 host->ios.drv_type = drv_type; 1100 mmc_set_ios(host); 1101 mmc_host_clk_release(host); 1102} 1103 1104static void mmc_poweroff_notify(struct mmc_host *host) 1105{ 1106 struct mmc_card *card; 1107 unsigned int timeout; 1108 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION; 1109 int err = 0; 1110 1111 card = host->card; 1112 mmc_claim_host(host); 1113 1114 /* 1115 * Send power notify command only if card 1116 * is mmc and notify state is powered ON 1117 */ 1118 if (card && mmc_card_mmc(card) && 1119 (card->poweroff_notify_state == MMC_POWERED_ON)) { 1120 1121 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) { 1122 notify_type = EXT_CSD_POWER_OFF_SHORT; 1123 timeout = card->ext_csd.generic_cmd6_time; 1124 card->poweroff_notify_state = MMC_POWEROFF_SHORT; 1125 } else { 1126 notify_type = EXT_CSD_POWER_OFF_LONG; 1127 timeout = card->ext_csd.power_off_longtime; 1128 card->poweroff_notify_state = MMC_POWEROFF_LONG; 1129 } 1130 1131 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1132 EXT_CSD_POWER_OFF_NOTIFICATION, 1133 notify_type, timeout); 1134 1135 if (err && err != -EBADMSG) 1136 pr_err("Device failed to respond within %d poweroff " 1137 "time. Forcefully powering down the device\n", 1138 timeout); 1139 1140 /* Set the card state to no notification after the poweroff */ 1141 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION; 1142 } 1143 mmc_release_host(host); 1144} 1145 1146/* 1147 * Apply power to the MMC stack. This is a two-stage process. 1148 * First, we enable power to the card without the clock running. 1149 * We then wait a bit for the power to stabilise. Finally, 1150 * enable the bus drivers and clock to the card. 1151 * 1152 * We must _NOT_ enable the clock prior to power stablising. 1153 * 1154 * If a host does all the power sequencing itself, ignore the 1155 * initial MMC_POWER_UP stage. 1156 */ 1157static void mmc_power_up(struct mmc_host *host) 1158{ 1159 int bit; 1160 1161 mmc_host_clk_hold(host); 1162 1163 /* If ocr is set, we use it */ 1164 if (host->ocr) 1165 bit = ffs(host->ocr) - 1; 1166 else 1167 bit = fls(host->ocr_avail) - 1; 1168 1169 host->ios.vdd = bit; 1170 if (mmc_host_is_spi(host)) 1171 host->ios.chip_select = MMC_CS_HIGH; 1172 else 1173 host->ios.chip_select = MMC_CS_DONTCARE; 1174 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1175 host->ios.power_mode = MMC_POWER_UP; 1176 host->ios.bus_width = MMC_BUS_WIDTH_1; 1177 host->ios.timing = MMC_TIMING_LEGACY; 1178 mmc_set_ios(host); 1179 1180 /* 1181 * This delay should be sufficient to allow the power supply 1182 * to reach the minimum voltage. 1183 */ 1184 mmc_delay(10); 1185 1186 host->ios.clock = host->f_init; 1187 1188 host->ios.power_mode = MMC_POWER_ON; 1189 mmc_set_ios(host); 1190 1191 /* 1192 * This delay must be at least 74 clock sizes, or 1 ms, or the 1193 * time required to reach a stable voltage. 1194 */ 1195 mmc_delay(10); 1196 1197 mmc_host_clk_release(host); 1198} 1199 1200void mmc_power_off(struct mmc_host *host) 1201{ 1202 int err = 0; 1203 mmc_host_clk_hold(host); 1204 1205 host->ios.clock = 0; 1206 host->ios.vdd = 0; 1207 1208 /* 1209 * For eMMC 4.5 device send AWAKE command before 1210 * POWER_OFF_NOTIFY command, because in sleep state 1211 * eMMC 4.5 devices respond to only RESET and AWAKE cmd 1212 */ 1213 if (host->card && mmc_card_is_sleep(host->card) && 1214 host->bus_ops->resume) { 1215 err = host->bus_ops->resume(host); 1216 1217 if (!err) 1218 mmc_poweroff_notify(host); 1219 else 1220 pr_warning("%s: error %d during resume " 1221 "(continue with poweroff sequence)\n", 1222 mmc_hostname(host), err); 1223 } 1224 1225 /* 1226 * Reset ocr mask to be the highest possible voltage supported for 1227 * this mmc host. This value will be used at next power up. 1228 */ 1229 host->ocr = 1 << (fls(host->ocr_avail) - 1); 1230 1231 if (!mmc_host_is_spi(host)) { 1232 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1233 host->ios.chip_select = MMC_CS_DONTCARE; 1234 } 1235 host->ios.power_mode = MMC_POWER_OFF; 1236 host->ios.bus_width = MMC_BUS_WIDTH_1; 1237 host->ios.timing = MMC_TIMING_LEGACY; 1238 mmc_set_ios(host); 1239 1240 /* 1241 * Some configurations, such as the 802.11 SDIO card in the OLPC 1242 * XO-1.5, require a short delay after poweroff before the card 1243 * can be successfully turned on again. 1244 */ 1245 mmc_delay(1); 1246 1247 mmc_host_clk_release(host); 1248} 1249 1250/* 1251 * Cleanup when the last reference to the bus operator is dropped. 1252 */ 1253static void __mmc_release_bus(struct mmc_host *host) 1254{ 1255 BUG_ON(!host); 1256 BUG_ON(host->bus_refs); 1257 BUG_ON(!host->bus_dead); 1258 1259 host->bus_ops = NULL; 1260} 1261 1262/* 1263 * Increase reference count of bus operator 1264 */ 1265static inline void mmc_bus_get(struct mmc_host *host) 1266{ 1267 unsigned long flags; 1268 1269 spin_lock_irqsave(&host->lock, flags); 1270 host->bus_refs++; 1271 spin_unlock_irqrestore(&host->lock, flags); 1272} 1273 1274/* 1275 * Decrease reference count of bus operator and free it if 1276 * it is the last reference. 1277 */ 1278static inline void mmc_bus_put(struct mmc_host *host) 1279{ 1280 unsigned long flags; 1281 1282 spin_lock_irqsave(&host->lock, flags); 1283 host->bus_refs--; 1284 if ((host->bus_refs == 0) && host->bus_ops) 1285 __mmc_release_bus(host); 1286 spin_unlock_irqrestore(&host->lock, flags); 1287} 1288 1289int mmc_resume_bus(struct mmc_host *host) 1290{ 1291 unsigned long flags; 1292 1293 if (!mmc_bus_needs_resume(host)) 1294 return -EINVAL; 1295 1296 printk("%s: Starting deferred resume\n", mmc_hostname(host)); 1297 spin_lock_irqsave(&host->lock, flags); 1298 host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME; 1299 host->rescan_disable = 0; 1300 spin_unlock_irqrestore(&host->lock, flags); 1301 1302 mmc_bus_get(host); 1303 if (host->bus_ops && !host->bus_dead) { 1304 mmc_power_up(host); 1305 BUG_ON(!host->bus_ops->resume); 1306 host->bus_ops->resume(host); 1307 } 1308 1309 if (host->bus_ops->detect && !host->bus_dead) 1310 host->bus_ops->detect(host); 1311 1312 mmc_bus_put(host); 1313 printk("%s: Deferred resume completed\n", mmc_hostname(host)); 1314 return 0; 1315} 1316 1317EXPORT_SYMBOL(mmc_resume_bus); 1318 1319/* 1320 * Assign a mmc bus handler to a host. Only one bus handler may control a 1321 * host at any given time. 1322 */ 1323void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1324{ 1325 unsigned long flags; 1326 1327 BUG_ON(!host); 1328 BUG_ON(!ops); 1329 1330 WARN_ON(!host->claimed); 1331 1332 spin_lock_irqsave(&host->lock, flags); 1333 1334 BUG_ON(host->bus_ops); 1335 BUG_ON(host->bus_refs); 1336 1337 host->bus_ops = ops; 1338 host->bus_refs = 1; 1339 host->bus_dead = 0; 1340 1341 spin_unlock_irqrestore(&host->lock, flags); 1342} 1343 1344/* 1345 * Remove the current bus handler from a host. 1346 */ 1347void mmc_detach_bus(struct mmc_host *host) 1348{ 1349 unsigned long flags; 1350 1351 BUG_ON(!host); 1352 1353 WARN_ON(!host->claimed); 1354 WARN_ON(!host->bus_ops); 1355 1356 spin_lock_irqsave(&host->lock, flags); 1357 1358 host->bus_dead = 1; 1359 1360 spin_unlock_irqrestore(&host->lock, flags); 1361 1362 mmc_bus_put(host); 1363} 1364 1365/** 1366 * mmc_detect_change - process change of state on a MMC socket 1367 * @host: host which changed state. 1368 * @delay: optional delay to wait before detection (jiffies) 1369 * 1370 * MMC drivers should call this when they detect a card has been 1371 * inserted or removed. The MMC layer will confirm that any 1372 * present card is still functional, and initialize any newly 1373 * inserted. 1374 */ 1375void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1376{ 1377#ifdef CONFIG_MMC_DEBUG 1378 unsigned long flags; 1379 spin_lock_irqsave(&host->lock, flags); 1380 WARN_ON(host->removed); 1381 spin_unlock_irqrestore(&host->lock, flags); 1382#endif 1383 host->detect_change = 1; 1384 1385 wake_lock(&host->detect_wake_lock); 1386 mmc_schedule_delayed_work(&host->detect, delay); 1387} 1388 1389EXPORT_SYMBOL(mmc_detect_change); 1390 1391void mmc_init_erase(struct mmc_card *card) 1392{ 1393 unsigned int sz; 1394 1395 if (is_power_of_2(card->erase_size)) 1396 card->erase_shift = ffs(card->erase_size) - 1; 1397 else 1398 card->erase_shift = 0; 1399 1400 /* 1401 * It is possible to erase an arbitrarily large area of an SD or MMC 1402 * card. That is not desirable because it can take a long time 1403 * (minutes) potentially delaying more important I/O, and also the 1404 * timeout calculations become increasingly hugely over-estimated. 1405 * Consequently, 'pref_erase' is defined as a guide to limit erases 1406 * to that size and alignment. 1407 * 1408 * For SD cards that define Allocation Unit size, limit erases to one 1409 * Allocation Unit at a time. For MMC cards that define High Capacity 1410 * Erase Size, whether it is switched on or not, limit to that size. 1411 * Otherwise just have a stab at a good value. For modern cards it 1412 * will end up being 4MiB. Note that if the value is too small, it 1413 * can end up taking longer to erase. 1414 */ 1415 if (mmc_card_sd(card) && card->ssr.au) { 1416 card->pref_erase = card->ssr.au; 1417 card->erase_shift = ffs(card->ssr.au) - 1; 1418 } else if (card->ext_csd.hc_erase_size) { 1419 card->pref_erase = card->ext_csd.hc_erase_size; 1420 } else { 1421 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1422 if (sz < 128) 1423 card->pref_erase = 512 * 1024 / 512; 1424 else if (sz < 512) 1425 card->pref_erase = 1024 * 1024 / 512; 1426 else if (sz < 1024) 1427 card->pref_erase = 2 * 1024 * 1024 / 512; 1428 else 1429 card->pref_erase = 4 * 1024 * 1024 / 512; 1430 if (card->pref_erase < card->erase_size) 1431 card->pref_erase = card->erase_size; 1432 else { 1433 sz = card->pref_erase % card->erase_size; 1434 if (sz) 1435 card->pref_erase += card->erase_size - sz; 1436 } 1437 } 1438} 1439 1440static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, 1441 unsigned int arg, unsigned int qty) 1442{ 1443 unsigned int erase_timeout; 1444 1445 if (arg == MMC_DISCARD_ARG || 1446 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { 1447 erase_timeout = card->ext_csd.trim_timeout; 1448 } else if (card->ext_csd.erase_group_def & 1) { 1449 /* High Capacity Erase Group Size uses HC timeouts */ 1450 if (arg == MMC_TRIM_ARG) 1451 erase_timeout = card->ext_csd.trim_timeout; 1452 else 1453 erase_timeout = card->ext_csd.hc_erase_timeout; 1454 } else { 1455 /* CSD Erase Group Size uses write timeout */ 1456 unsigned int mult = (10 << card->csd.r2w_factor); 1457 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1458 unsigned int timeout_us; 1459 1460 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1461 if (card->csd.tacc_ns < 1000000) 1462 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1463 else 1464 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1465 1466 /* 1467 * ios.clock is only a target. The real clock rate might be 1468 * less but not that much less, so fudge it by multiplying by 2. 1469 */ 1470 timeout_clks <<= 1; 1471 timeout_us += (timeout_clks * 1000) / 1472 (mmc_host_clk_rate(card->host) / 1000); 1473 1474 erase_timeout = timeout_us / 1000; 1475 1476 /* 1477 * Theoretically, the calculation could underflow so round up 1478 * to 1ms in that case. 1479 */ 1480 if (!erase_timeout) 1481 erase_timeout = 1; 1482 } 1483 1484 /* Multiplier for secure operations */ 1485 if (arg & MMC_SECURE_ARGS) { 1486 if (arg == MMC_SECURE_ERASE_ARG) 1487 erase_timeout *= card->ext_csd.sec_erase_mult; 1488 else 1489 erase_timeout *= card->ext_csd.sec_trim_mult; 1490 } 1491 1492 erase_timeout *= qty; 1493 1494 /* 1495 * Ensure at least a 1 second timeout for SPI as per 1496 * 'mmc_set_data_timeout()' 1497 */ 1498 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1499 erase_timeout = 1000; 1500 1501 return erase_timeout; 1502} 1503 1504static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, 1505 unsigned int arg, 1506 unsigned int qty) 1507{ 1508 unsigned int erase_timeout; 1509 1510 if (card->ssr.erase_timeout) { 1511 /* Erase timeout specified in SD Status Register (SSR) */ 1512 erase_timeout = card->ssr.erase_timeout * qty + 1513 card->ssr.erase_offset; 1514 } else { 1515 /* 1516 * Erase timeout not specified in SD Status Register (SSR) so 1517 * use 250ms per write block. 1518 */ 1519 erase_timeout = 250 * qty; 1520 } 1521 1522 /* Must not be less than 1 second */ 1523 if (erase_timeout < 1000) 1524 erase_timeout = 1000; 1525 1526 return erase_timeout; 1527} 1528 1529static unsigned int mmc_erase_timeout(struct mmc_card *card, 1530 unsigned int arg, 1531 unsigned int qty) 1532{ 1533 if (mmc_card_sd(card)) 1534 return mmc_sd_erase_timeout(card, arg, qty); 1535 else 1536 return mmc_mmc_erase_timeout(card, arg, qty); 1537} 1538 1539static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1540 unsigned int to, unsigned int arg) 1541{ 1542 struct mmc_command cmd = {0}; 1543 unsigned int qty = 0; 1544 int err; 1545 1546 /* 1547 * qty is used to calculate the erase timeout which depends on how many 1548 * erase groups (or allocation units in SD terminology) are affected. 1549 * We count erasing part of an erase group as one erase group. 1550 * For SD, the allocation units are always a power of 2. For MMC, the 1551 * erase group size is almost certainly also power of 2, but it does not 1552 * seem to insist on that in the JEDEC standard, so we fall back to 1553 * division in that case. SD may not specify an allocation unit size, 1554 * in which case the timeout is based on the number of write blocks. 1555 * 1556 * Note that the timeout for secure trim 2 will only be correct if the 1557 * number of erase groups specified is the same as the total of all 1558 * preceding secure trim 1 commands. Since the power may have been 1559 * lost since the secure trim 1 commands occurred, it is generally 1560 * impossible to calculate the secure trim 2 timeout correctly. 1561 */ 1562 if (card->erase_shift) 1563 qty += ((to >> card->erase_shift) - 1564 (from >> card->erase_shift)) + 1; 1565 else if (mmc_card_sd(card)) 1566 qty += to - from + 1; 1567 else 1568 qty += ((to / card->erase_size) - 1569 (from / card->erase_size)) + 1; 1570 1571 if (!mmc_card_blockaddr(card)) { 1572 from <<= 9; 1573 to <<= 9; 1574 } 1575 1576 if (mmc_card_sd(card)) 1577 cmd.opcode = SD_ERASE_WR_BLK_START; 1578 else 1579 cmd.opcode = MMC_ERASE_GROUP_START; 1580 cmd.arg = from; 1581 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1582 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1583 if (err) { 1584 pr_err("mmc_erase: group start error %d, " 1585 "status %#x\n", err, cmd.resp[0]); 1586 err = -EIO; 1587 goto out; 1588 } 1589 1590 memset(&cmd, 0, sizeof(struct mmc_command)); 1591 if (mmc_card_sd(card)) 1592 cmd.opcode = SD_ERASE_WR_BLK_END; 1593 else 1594 cmd.opcode = MMC_ERASE_GROUP_END; 1595 cmd.arg = to; 1596 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1597 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1598 if (err) { 1599 pr_err("mmc_erase: group end error %d, status %#x\n", 1600 err, cmd.resp[0]); 1601 err = -EIO; 1602 goto out; 1603 } 1604 1605 memset(&cmd, 0, sizeof(struct mmc_command)); 1606 cmd.opcode = MMC_ERASE; 1607 cmd.arg = arg; 1608 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1609 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); 1610 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1611 if (err) { 1612 pr_err("mmc_erase: erase error %d, status %#x\n", 1613 err, cmd.resp[0]); 1614 err = -EIO; 1615 goto out; 1616 } 1617 1618 if (mmc_host_is_spi(card->host)) 1619 goto out; 1620 1621 do { 1622 memset(&cmd, 0, sizeof(struct mmc_command)); 1623 cmd.opcode = MMC_SEND_STATUS; 1624 cmd.arg = card->rca << 16; 1625 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1626 /* Do not retry else we can't see errors */ 1627 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1628 if (err || (cmd.resp[0] & 0xFDF92000)) { 1629 pr_err("error %d requesting status %#x\n", 1630 err, cmd.resp[0]); 1631 err = -EIO; 1632 goto out; 1633 } 1634 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1635 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG); 1636out: 1637 return err; 1638} 1639 1640/** 1641 * mmc_erase - erase sectors. 1642 * @card: card to erase 1643 * @from: first sector to erase 1644 * @nr: number of sectors to erase 1645 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1646 * 1647 * Caller must claim host before calling this function. 1648 */ 1649int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1650 unsigned int arg) 1651{ 1652 unsigned int rem, to = from + nr; 1653 1654 if (!(card->host->caps & MMC_CAP_ERASE) || 1655 !(card->csd.cmdclass & CCC_ERASE)) 1656 return -EOPNOTSUPP; 1657 1658 if (!card->erase_size) 1659 return -EOPNOTSUPP; 1660 1661 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1662 return -EOPNOTSUPP; 1663 1664 if ((arg & MMC_SECURE_ARGS) && 1665 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1666 return -EOPNOTSUPP; 1667 1668 if ((arg & MMC_TRIM_ARGS) && 1669 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1670 return -EOPNOTSUPP; 1671 1672 if (arg == MMC_SECURE_ERASE_ARG) { 1673 if (from % card->erase_size || nr % card->erase_size) 1674 return -EINVAL; 1675 } 1676 1677 if (arg == MMC_ERASE_ARG) { 1678 rem = from % card->erase_size; 1679 if (rem) { 1680 rem = card->erase_size - rem; 1681 from += rem; 1682 if (nr > rem) 1683 nr -= rem; 1684 else 1685 return 0; 1686 } 1687 rem = nr % card->erase_size; 1688 if (rem) 1689 nr -= rem; 1690 } 1691 1692 if (nr == 0) 1693 return 0; 1694 1695 to = from + nr; 1696 1697 if (to <= from) 1698 return -EINVAL; 1699 1700 /* 'from' and 'to' are inclusive */ 1701 to -= 1; 1702 1703 return mmc_do_erase(card, from, to, arg); 1704} 1705EXPORT_SYMBOL(mmc_erase); 1706 1707int mmc_can_erase(struct mmc_card *card) 1708{ 1709 if ((card->host->caps & MMC_CAP_ERASE) && 1710 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 1711 return 1; 1712 return 0; 1713} 1714EXPORT_SYMBOL(mmc_can_erase); 1715 1716int mmc_can_trim(struct mmc_card *card) 1717{ 1718 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1719 return 1; 1720 return 0; 1721} 1722EXPORT_SYMBOL(mmc_can_trim); 1723 1724int mmc_can_discard(struct mmc_card *card) 1725{ 1726 /* 1727 * As there's no way to detect the discard support bit at v4.5 1728 * use the s/w feature support filed. 1729 */ 1730 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) 1731 return 1; 1732 return 0; 1733} 1734EXPORT_SYMBOL(mmc_can_discard); 1735 1736int mmc_can_sanitize(struct mmc_card *card) 1737{ 1738 if (!mmc_can_trim(card) && !mmc_can_erase(card)) 1739 return 0; 1740 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) 1741 return 1; 1742 return 0; 1743} 1744EXPORT_SYMBOL(mmc_can_sanitize); 1745 1746int mmc_can_secure_erase_trim(struct mmc_card *card) 1747{ 1748 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 1749 return 1; 1750 return 0; 1751} 1752EXPORT_SYMBOL(mmc_can_secure_erase_trim); 1753 1754int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 1755 unsigned int nr) 1756{ 1757 if (!card->erase_size) 1758 return 0; 1759 if (from % card->erase_size || nr % card->erase_size) 1760 return 0; 1761 return 1; 1762} 1763EXPORT_SYMBOL(mmc_erase_group_aligned); 1764 1765static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, 1766 unsigned int arg) 1767{ 1768 struct mmc_host *host = card->host; 1769 unsigned int max_discard, x, y, qty = 0, max_qty, timeout; 1770 unsigned int last_timeout = 0; 1771 1772 if (card->erase_shift) 1773 max_qty = UINT_MAX >> card->erase_shift; 1774 else if (mmc_card_sd(card)) 1775 max_qty = UINT_MAX; 1776 else 1777 max_qty = UINT_MAX / card->erase_size; 1778 1779 /* Find the largest qty with an OK timeout */ 1780 do { 1781 y = 0; 1782 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { 1783 timeout = mmc_erase_timeout(card, arg, qty + x); 1784 if (timeout > host->max_discard_to) 1785 break; 1786 if (timeout < last_timeout) 1787 break; 1788 last_timeout = timeout; 1789 y = x; 1790 } 1791 qty += y; 1792 } while (y); 1793 1794 if (!qty) 1795 return 0; 1796 1797 if (qty == 1) 1798 return 1; 1799 1800 /* Convert qty to sectors */ 1801 if (card->erase_shift) 1802 max_discard = --qty << card->erase_shift; 1803 else if (mmc_card_sd(card)) 1804 max_discard = qty; 1805 else 1806 max_discard = --qty * card->erase_size; 1807 1808 return max_discard; 1809} 1810 1811unsigned int mmc_calc_max_discard(struct mmc_card *card) 1812{ 1813 struct mmc_host *host = card->host; 1814 unsigned int max_discard, max_trim; 1815 1816 if (!host->max_discard_to) 1817 return UINT_MAX; 1818 1819 /* 1820 * Without erase_group_def set, MMC erase timeout depends on clock 1821 * frequence which can change. In that case, the best choice is 1822 * just the preferred erase size. 1823 */ 1824 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) 1825 return card->pref_erase; 1826 1827 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); 1828 if (mmc_can_trim(card)) { 1829 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); 1830 if (max_trim < max_discard) 1831 max_discard = max_trim; 1832 } else if (max_discard < card->erase_size) { 1833 max_discard = 0; 1834 } 1835 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", 1836 mmc_hostname(host), max_discard, host->max_discard_to); 1837 return max_discard; 1838} 1839EXPORT_SYMBOL(mmc_calc_max_discard); 1840 1841int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1842{ 1843 struct mmc_command cmd = {0}; 1844 1845 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 1846 return 0; 1847 1848 cmd.opcode = MMC_SET_BLOCKLEN; 1849 cmd.arg = blocklen; 1850 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1851 return mmc_wait_for_cmd(card->host, &cmd, 5); 1852} 1853EXPORT_SYMBOL(mmc_set_blocklen); 1854 1855static void mmc_hw_reset_for_init(struct mmc_host *host) 1856{ 1857 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1858 return; 1859 mmc_host_clk_hold(host); 1860 host->ops->hw_reset(host); 1861 mmc_host_clk_release(host); 1862} 1863 1864int mmc_can_reset(struct mmc_card *card) 1865{ 1866 u8 rst_n_function; 1867 1868 if (!mmc_card_mmc(card)) 1869 return 0; 1870 rst_n_function = card->ext_csd.rst_n_function; 1871 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED) 1872 return 0; 1873 return 1; 1874} 1875EXPORT_SYMBOL(mmc_can_reset); 1876 1877static int mmc_do_hw_reset(struct mmc_host *host, int check) 1878{ 1879 struct mmc_card *card = host->card; 1880 1881 if (!host->bus_ops->power_restore) 1882 return -EOPNOTSUPP; 1883 1884 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1885 return -EOPNOTSUPP; 1886 1887 if (!card) 1888 return -EINVAL; 1889 1890 if (!mmc_can_reset(card)) 1891 return -EOPNOTSUPP; 1892 1893 mmc_host_clk_hold(host); 1894 mmc_set_clock(host, host->f_init); 1895 1896 host->ops->hw_reset(host); 1897 1898 /* If the reset has happened, then a status command will fail */ 1899 if (check) { 1900 struct mmc_command cmd = {0}; 1901 int err; 1902 1903 cmd.opcode = MMC_SEND_STATUS; 1904 if (!mmc_host_is_spi(card->host)) 1905 cmd.arg = card->rca << 16; 1906 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 1907 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1908 if (!err) { 1909 mmc_host_clk_release(host); 1910 return -ENOSYS; 1911 } 1912 } 1913 1914 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR); 1915 if (mmc_host_is_spi(host)) { 1916 host->ios.chip_select = MMC_CS_HIGH; 1917 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1918 } else { 1919 host->ios.chip_select = MMC_CS_DONTCARE; 1920 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1921 } 1922 host->ios.bus_width = MMC_BUS_WIDTH_1; 1923 host->ios.timing = MMC_TIMING_LEGACY; 1924 mmc_set_ios(host); 1925 1926 mmc_host_clk_release(host); 1927 1928 return host->bus_ops->power_restore(host); 1929} 1930 1931int mmc_hw_reset(struct mmc_host *host) 1932{ 1933 return mmc_do_hw_reset(host, 0); 1934} 1935EXPORT_SYMBOL(mmc_hw_reset); 1936 1937int mmc_hw_reset_check(struct mmc_host *host) 1938{ 1939 return mmc_do_hw_reset(host, 1); 1940} 1941EXPORT_SYMBOL(mmc_hw_reset_check); 1942 1943static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 1944{ 1945 host->f_init = freq; 1946 1947#ifdef CONFIG_MMC_DEBUG 1948 pr_info("%s: %s: trying to init card at %u Hz\n", 1949 mmc_hostname(host), __func__, host->f_init); 1950#endif 1951 mmc_power_up(host); 1952 1953 /* 1954 * Some eMMCs (with VCCQ always on) may not be reset after power up, so 1955 * do a hardware reset if possible. 1956 */ 1957 mmc_hw_reset_for_init(host); 1958 1959 /* Initialization should be done at 3.3 V I/O voltage. */ 1960 mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); 1961 1962 /* 1963 * sdio_reset sends CMD52 to reset card. Since we do not know 1964 * if the card is being re-initialized, just send it. CMD52 1965 * should be ignored by SD/eMMC cards. 1966 */ 1967 sdio_reset(host); 1968 mmc_go_idle(host); 1969 1970 mmc_send_if_cond(host, host->ocr_avail); 1971 1972 /* Order's important: probe SDIO, then SD, then MMC */ 1973 if (!mmc_attach_sdio(host)) 1974 return 0; 1975 if (!mmc_attach_sd(host)) 1976 return 0; 1977 if (!mmc_attach_mmc(host)) 1978 return 0; 1979 1980 mmc_power_off(host); 1981 return -EIO; 1982} 1983 1984int _mmc_detect_card_removed(struct mmc_host *host) 1985{ 1986 int ret; 1987 1988 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive) 1989 return 0; 1990 1991 if (!host->card || mmc_card_removed(host->card)) 1992 return 1; 1993 1994 ret = host->bus_ops->alive(host); 1995 if (ret) { 1996 mmc_card_set_removed(host->card); 1997 pr_debug("%s: card remove detected\n", mmc_hostname(host)); 1998 } 1999 2000 return ret; 2001} 2002 2003int mmc_detect_card_removed(struct mmc_host *host) 2004{ 2005 struct mmc_card *card = host->card; 2006 int ret; 2007 2008 WARN_ON(!host->claimed); 2009 2010 if (!card) 2011 return 1; 2012 2013 ret = mmc_card_removed(card); 2014 /* 2015 * The card will be considered unchanged unless we have been asked to 2016 * detect a change or host requires polling to provide card detection. 2017 */ 2018 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) && 2019 !(host->caps2 & MMC_CAP2_DETECT_ON_ERR)) 2020 return ret; 2021 2022 host->detect_change = 0; 2023 if (!ret) { 2024 ret = _mmc_detect_card_removed(host); 2025 if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) { 2026 /* 2027 * Schedule a detect work as soon as possible to let a 2028 * rescan handle the card removal. 2029 */ 2030 cancel_delayed_work(&host->detect); 2031 mmc_detect_change(host, 0); 2032 } 2033 } 2034 2035 return ret; 2036} 2037EXPORT_SYMBOL(mmc_detect_card_removed); 2038 2039void mmc_rescan(struct work_struct *work) 2040{ 2041 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 2042 struct mmc_host *host = 2043 container_of(work, struct mmc_host, detect.work); 2044 int i; 2045 bool extend_wakelock = false; 2046 2047 if (host->rescan_disable) 2048 return; 2049 2050 mmc_bus_get(host); 2051 2052 /* 2053 * if there is a _removable_ card registered, check whether it is 2054 * still present 2055 */ 2056 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 2057 && !(host->caps & MMC_CAP_NONREMOVABLE)) 2058 host->bus_ops->detect(host); 2059 2060 host->detect_change = 0; 2061 2062 /* If the card was removed the bus will be marked 2063 * as dead - extend the wakelock so userspace 2064 * can respond */ 2065 if (host->bus_dead) 2066 extend_wakelock = 1; 2067 2068 /* 2069 * Let mmc_bus_put() free the bus/bus_ops if we've found that 2070 * the card is no longer present. 2071 */ 2072 mmc_bus_put(host); 2073 mmc_bus_get(host); 2074 2075 /* if there still is a card present, stop here */ 2076 if (host->bus_ops != NULL) { 2077 mmc_bus_put(host); 2078 goto out; 2079 } 2080 2081 /* 2082 * Only we can add a new handler, so it's safe to 2083 * release the lock here. 2084 */ 2085 mmc_bus_put(host); 2086 2087 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 2088 goto out; 2089 2090 mmc_claim_host(host); 2091 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 2092 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) { 2093 extend_wakelock = true; 2094 break; 2095 } 2096 if (freqs[i] <= host->f_min) 2097 break; 2098 } 2099 mmc_release_host(host); 2100 2101 out: 2102 if (extend_wakelock) 2103 wake_lock_timeout(&host->detect_wake_lock, HZ / 2); 2104 else 2105 wake_unlock(&host->detect_wake_lock); 2106 if (host->caps & MMC_CAP_NEEDS_POLL) { 2107 wake_lock(&host->detect_wake_lock); 2108 mmc_schedule_delayed_work(&host->detect, HZ); 2109 } 2110} 2111 2112void mmc_start_host(struct mmc_host *host) 2113{ 2114 mmc_power_off(host); 2115 mmc_detect_change(host, 0); 2116} 2117 2118void mmc_stop_host(struct mmc_host *host) 2119{ 2120#ifdef CONFIG_MMC_DEBUG 2121 unsigned long flags; 2122 spin_lock_irqsave(&host->lock, flags); 2123 host->removed = 1; 2124 spin_unlock_irqrestore(&host->lock, flags); 2125#endif 2126 2127 if (cancel_delayed_work_sync(&host->detect)) 2128 wake_unlock(&host->detect_wake_lock); 2129 mmc_flush_scheduled_work(); 2130 2131 /* clear pm flags now and let card drivers set them as needed */ 2132 host->pm_flags = 0; 2133 2134 mmc_bus_get(host); 2135 if (host->bus_ops && !host->bus_dead) { 2136 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2137 if (host->bus_ops->remove) 2138 host->bus_ops->remove(host); 2139 2140 mmc_claim_host(host); 2141 mmc_detach_bus(host); 2142 mmc_power_off(host); 2143 mmc_release_host(host); 2144 mmc_bus_put(host); 2145 return; 2146 } 2147 mmc_bus_put(host); 2148 2149 BUG_ON(host->card); 2150 2151 mmc_power_off(host); 2152} 2153 2154int mmc_power_save_host(struct mmc_host *host) 2155{ 2156 int ret = 0; 2157 2158#ifdef CONFIG_MMC_DEBUG 2159 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); 2160#endif 2161 2162 mmc_bus_get(host); 2163 2164 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2165 mmc_bus_put(host); 2166 return -EINVAL; 2167 } 2168 2169 if (host->bus_ops->power_save) 2170 ret = host->bus_ops->power_save(host); 2171 2172 mmc_bus_put(host); 2173 2174 mmc_power_off(host); 2175 2176 return ret; 2177} 2178EXPORT_SYMBOL(mmc_power_save_host); 2179 2180int mmc_power_restore_host(struct mmc_host *host) 2181{ 2182 int ret; 2183 2184#ifdef CONFIG_MMC_DEBUG 2185 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); 2186#endif 2187 2188 mmc_bus_get(host); 2189 2190 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2191 mmc_bus_put(host); 2192 return -EINVAL; 2193 } 2194 2195 mmc_power_up(host); 2196 ret = host->bus_ops->power_restore(host); 2197 2198 mmc_bus_put(host); 2199 2200 return ret; 2201} 2202EXPORT_SYMBOL(mmc_power_restore_host); 2203 2204int mmc_card_awake(struct mmc_host *host) 2205{ 2206 int err = -ENOSYS; 2207 2208 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) 2209 return 0; 2210 2211 mmc_bus_get(host); 2212 2213 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 2214 err = host->bus_ops->awake(host); 2215 2216 mmc_bus_put(host); 2217 2218 return err; 2219} 2220EXPORT_SYMBOL(mmc_card_awake); 2221 2222int mmc_card_sleep(struct mmc_host *host) 2223{ 2224 int err = -ENOSYS; 2225 2226 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) 2227 return 0; 2228 2229 mmc_bus_get(host); 2230 2231 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep) 2232 err = host->bus_ops->sleep(host); 2233 2234 mmc_bus_put(host); 2235 2236 return err; 2237} 2238EXPORT_SYMBOL(mmc_card_sleep); 2239 2240int mmc_card_can_sleep(struct mmc_host *host) 2241{ 2242 struct mmc_card *card = host->card; 2243 2244 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 2245 return 1; 2246 return 0; 2247} 2248EXPORT_SYMBOL(mmc_card_can_sleep); 2249 2250/* 2251 * Flush the cache to the non-volatile storage. 2252 */ 2253int mmc_flush_cache(struct mmc_card *card) 2254{ 2255 struct mmc_host *host = card->host; 2256 int err = 0; 2257 2258 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL)) 2259 return err; 2260 2261 if (mmc_card_mmc(card) && 2262 (card->ext_csd.cache_size > 0) && 2263 (card->ext_csd.cache_ctrl & 1)) { 2264 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2265 EXT_CSD_FLUSH_CACHE, 1, 0); 2266 if (err) 2267 pr_err("%s: cache flush error %d\n", 2268 mmc_hostname(card->host), err); 2269 } 2270 2271 return err; 2272} 2273EXPORT_SYMBOL(mmc_flush_cache); 2274 2275/* 2276 * Turn the cache ON/OFF. 2277 * Turning the cache OFF shall trigger flushing of the data 2278 * to the non-volatile storage. 2279 */ 2280int mmc_cache_ctrl(struct mmc_host *host, u8 enable) 2281{ 2282 struct mmc_card *card = host->card; 2283 unsigned int timeout; 2284 int err = 0; 2285 2286 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) || 2287 mmc_card_is_removable(host)) 2288 return err; 2289 2290 mmc_claim_host(host); 2291 if (card && mmc_card_mmc(card) && 2292 (card->ext_csd.cache_size > 0)) { 2293 enable = !!enable; 2294 2295 if (card->ext_csd.cache_ctrl ^ enable) { 2296 timeout = enable ? card->ext_csd.generic_cmd6_time : 0; 2297 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2298 EXT_CSD_CACHE_CTRL, enable, timeout); 2299 if (err) 2300 pr_err("%s: cache %s error %d\n", 2301 mmc_hostname(card->host), 2302 enable ? "on" : "off", 2303 err); 2304 else 2305 card->ext_csd.cache_ctrl = enable; 2306 } 2307 } 2308 mmc_release_host(host); 2309 2310 return err; 2311} 2312EXPORT_SYMBOL(mmc_cache_ctrl); 2313 2314#ifdef CONFIG_PM 2315 2316/** 2317 * mmc_suspend_host - suspend a host 2318 * @host: mmc host 2319 */ 2320int mmc_suspend_host(struct mmc_host *host) 2321{ 2322 int err = 0; 2323 2324 if (mmc_bus_needs_resume(host)) 2325 return 0; 2326 2327 if (cancel_delayed_work(&host->detect)) 2328 wake_unlock(&host->detect_wake_lock); 2329 mmc_flush_scheduled_work(); 2330 2331 err = mmc_cache_ctrl(host, 0); 2332 if (err) 2333 goto out; 2334 2335 mmc_bus_get(host); 2336 if (host->bus_ops && !host->bus_dead) { 2337 2338 if (host->bus_ops->suspend) 2339 err = host->bus_ops->suspend(host); 2340 2341 if (err == -ENOSYS || !host->bus_ops->resume) { 2342 /* 2343 * We simply "remove" the card in this case. 2344 * It will be redetected on resume. (Calling 2345 * bus_ops->remove() with a claimed host can 2346 * deadlock.) 2347 */ 2348 if (host->bus_ops->remove) 2349 host->bus_ops->remove(host); 2350 mmc_claim_host(host); 2351 mmc_detach_bus(host); 2352 mmc_power_off(host); 2353 mmc_release_host(host); 2354 host->pm_flags = 0; 2355 err = 0; 2356 } 2357 } 2358 mmc_bus_put(host); 2359 2360 if (!err && !mmc_card_keep_power(host)) 2361 mmc_power_off(host); 2362 2363out: 2364 return err; 2365} 2366 2367EXPORT_SYMBOL(mmc_suspend_host); 2368 2369/** 2370 * mmc_resume_host - resume a previously suspended host 2371 * @host: mmc host 2372 */ 2373int mmc_resume_host(struct mmc_host *host) 2374{ 2375 int err = 0; 2376 2377 mmc_bus_get(host); 2378 if (mmc_bus_manual_resume(host)) { 2379 host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME; 2380 mmc_bus_put(host); 2381 return 0; 2382 } 2383 2384 if (host->bus_ops && !host->bus_dead) { 2385 if (!mmc_card_keep_power(host)) { 2386 mmc_power_up(host); 2387 mmc_select_voltage(host, host->ocr); 2388 /* 2389 * Tell runtime PM core we just powered up the card, 2390 * since it still believes the card is powered off. 2391 * Note that currently runtime PM is only enabled 2392 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD 2393 */ 2394 if (mmc_card_sdio(host->card) && 2395 (host->caps & MMC_CAP_POWER_OFF_CARD)) { 2396 pm_runtime_disable(&host->card->dev); 2397 pm_runtime_set_active(&host->card->dev); 2398 pm_runtime_enable(&host->card->dev); 2399 } 2400 } 2401 BUG_ON(!host->bus_ops->resume); 2402 err = host->bus_ops->resume(host); 2403 if (err) { 2404 pr_warning("%s: error %d during resume " 2405 "(card was removed?)\n", 2406 mmc_hostname(host), err); 2407 err = 0; 2408 } 2409 } 2410 host->pm_flags &= ~MMC_PM_KEEP_POWER; 2411 mmc_bus_put(host); 2412 2413 return err; 2414} 2415EXPORT_SYMBOL(mmc_resume_host); 2416 2417/* Do the card removal on suspend if card is assumed removeable 2418 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 2419 to sync the card. 2420*/ 2421int mmc_pm_notify(struct notifier_block *notify_block, 2422 unsigned long mode, void *unused) 2423{ 2424 struct mmc_host *host = container_of( 2425 notify_block, struct mmc_host, pm_notify); 2426 unsigned long flags; 2427 2428 2429 switch (mode) { 2430 case PM_HIBERNATION_PREPARE: 2431 case PM_SUSPEND_PREPARE: 2432 2433 spin_lock_irqsave(&host->lock, flags); 2434 if (mmc_bus_needs_resume(host)) { 2435 spin_unlock_irqrestore(&host->lock, flags); 2436 break; 2437 } 2438 host->rescan_disable = 1; 2439 host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; 2440 spin_unlock_irqrestore(&host->lock, flags); 2441 if (cancel_delayed_work_sync(&host->detect)) 2442 wake_unlock(&host->detect_wake_lock); 2443 2444 if (!host->bus_ops || host->bus_ops->suspend) 2445 break; 2446 2447 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2448 if (host->bus_ops->remove) 2449 host->bus_ops->remove(host); 2450 2451 mmc_claim_host(host); 2452 mmc_detach_bus(host); 2453 mmc_power_off(host); 2454 mmc_release_host(host); 2455 host->pm_flags = 0; 2456 break; 2457 2458 case PM_POST_SUSPEND: 2459 case PM_POST_HIBERNATION: 2460 case PM_POST_RESTORE: 2461 2462 spin_lock_irqsave(&host->lock, flags); 2463 if (mmc_bus_manual_resume(host)) { 2464 spin_unlock_irqrestore(&host->lock, flags); 2465 break; 2466 } 2467 host->rescan_disable = 0; 2468 host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG; 2469 spin_unlock_irqrestore(&host->lock, flags); 2470 mmc_detect_change(host, 0); 2471 2472 } 2473 2474 return 0; 2475} 2476#endif 2477 2478#ifdef CONFIG_MMC_EMBEDDED_SDIO 2479void mmc_set_embedded_sdio_data(struct mmc_host *host, 2480 struct sdio_cis *cis, 2481 struct sdio_cccr *cccr, 2482 struct sdio_embedded_func *funcs, 2483 int num_funcs) 2484{ 2485 host->embedded_sdio_data.cis = cis; 2486 host->embedded_sdio_data.cccr = cccr; 2487 host->embedded_sdio_data.funcs = funcs; 2488 host->embedded_sdio_data.num_funcs = num_funcs; 2489} 2490 2491EXPORT_SYMBOL(mmc_set_embedded_sdio_data); 2492#endif 2493 2494static int __init mmc_init(void) 2495{ 2496 int ret; 2497 2498 workqueue = alloc_ordered_workqueue("kmmcd", 0); 2499 if (!workqueue) 2500 return -ENOMEM; 2501 2502 ret = mmc_register_bus(); 2503 if (ret) 2504 goto destroy_workqueue; 2505 2506 ret = mmc_register_host_class(); 2507 if (ret) 2508 goto unregister_bus; 2509 2510 ret = sdio_register_bus(); 2511 if (ret) 2512 goto unregister_host_class; 2513 2514 return 0; 2515 2516unregister_host_class: 2517 mmc_unregister_host_class(); 2518unregister_bus: 2519 mmc_unregister_bus(); 2520destroy_workqueue: 2521 destroy_workqueue(workqueue); 2522 2523 return ret; 2524} 2525 2526static void __exit mmc_exit(void) 2527{ 2528 sdio_unregister_bus(); 2529 mmc_unregister_host_class(); 2530 mmc_unregister_bus(); 2531 destroy_workqueue(workqueue); 2532} 2533 2534subsys_initcall(mmc_init); 2535module_exit(mmc_exit); 2536 2537MODULE_LICENSE("GPL"); 2538