core.c revision 71d7d3d190fe77588269a8febf93cd739bd91eb3
1/* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13#include <linux/module.h> 14#include <linux/init.h> 15#include <linux/interrupt.h> 16#include <linux/completion.h> 17#include <linux/device.h> 18#include <linux/delay.h> 19#include <linux/pagemap.h> 20#include <linux/err.h> 21#include <linux/leds.h> 22#include <linux/scatterlist.h> 23#include <linux/log2.h> 24#include <linux/regulator/consumer.h> 25 26#include <linux/mmc/card.h> 27#include <linux/mmc/host.h> 28#include <linux/mmc/mmc.h> 29#include <linux/mmc/sd.h> 30 31#include "core.h" 32#include "bus.h" 33#include "host.h" 34#include "sdio_bus.h" 35 36#include "mmc_ops.h" 37#include "sd_ops.h" 38#include "sdio_ops.h" 39 40static struct workqueue_struct *workqueue; 41 42/* 43 * Enabling software CRCs on the data blocks can be a significant (30%) 44 * performance cost, and for other reasons may not always be desired. 45 * So we allow it it to be disabled. 46 */ 47int use_spi_crc = 1; 48module_param(use_spi_crc, bool, 0); 49 50/* 51 * We normally treat cards as removed during suspend if they are not 52 * known to be on a non-removable bus, to avoid the risk of writing 53 * back data to a different card after resume. Allow this to be 54 * overridden if necessary. 55 */ 56#ifdef CONFIG_MMC_UNSAFE_RESUME 57int mmc_assume_removable; 58#else 59int mmc_assume_removable = 1; 60#endif 61EXPORT_SYMBOL(mmc_assume_removable); 62module_param_named(removable, mmc_assume_removable, bool, 0644); 63MODULE_PARM_DESC( 64 removable, 65 "MMC/SD cards are removable and may be removed during suspend"); 66 67/* 68 * Internal function. Schedule delayed work in the MMC work queue. 69 */ 70static int mmc_schedule_delayed_work(struct delayed_work *work, 71 unsigned long delay) 72{ 73 return queue_delayed_work(workqueue, work, delay); 74} 75 76/* 77 * Internal function. Flush all scheduled work from the MMC work queue. 78 */ 79static void mmc_flush_scheduled_work(void) 80{ 81 flush_workqueue(workqueue); 82} 83 84/** 85 * mmc_request_done - finish processing an MMC request 86 * @host: MMC host which completed request 87 * @mrq: MMC request which request 88 * 89 * MMC drivers should call this function when they have completed 90 * their processing of a request. 91 */ 92void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 93{ 94 struct mmc_command *cmd = mrq->cmd; 95 int err = cmd->error; 96 97 if (err && cmd->retries && mmc_host_is_spi(host)) { 98 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 99 cmd->retries = 0; 100 } 101 102 if (err && cmd->retries) { 103 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 104 mmc_hostname(host), cmd->opcode, err); 105 106 cmd->retries--; 107 cmd->error = 0; 108 host->ops->request(host, mrq); 109 } else { 110 led_trigger_event(host->led, LED_OFF); 111 112 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 113 mmc_hostname(host), cmd->opcode, err, 114 cmd->resp[0], cmd->resp[1], 115 cmd->resp[2], cmd->resp[3]); 116 117 if (mrq->data) { 118 pr_debug("%s: %d bytes transferred: %d\n", 119 mmc_hostname(host), 120 mrq->data->bytes_xfered, mrq->data->error); 121 } 122 123 if (mrq->stop) { 124 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 125 mmc_hostname(host), mrq->stop->opcode, 126 mrq->stop->error, 127 mrq->stop->resp[0], mrq->stop->resp[1], 128 mrq->stop->resp[2], mrq->stop->resp[3]); 129 } 130 131 if (mrq->done) 132 mrq->done(mrq); 133 } 134} 135 136EXPORT_SYMBOL(mmc_request_done); 137 138static void 139mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 140{ 141#ifdef CONFIG_MMC_DEBUG 142 unsigned int i, sz; 143 struct scatterlist *sg; 144#endif 145 146 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 147 mmc_hostname(host), mrq->cmd->opcode, 148 mrq->cmd->arg, mrq->cmd->flags); 149 150 if (mrq->data) { 151 pr_debug("%s: blksz %d blocks %d flags %08x " 152 "tsac %d ms nsac %d\n", 153 mmc_hostname(host), mrq->data->blksz, 154 mrq->data->blocks, mrq->data->flags, 155 mrq->data->timeout_ns / 1000000, 156 mrq->data->timeout_clks); 157 } 158 159 if (mrq->stop) { 160 pr_debug("%s: CMD%u arg %08x flags %08x\n", 161 mmc_hostname(host), mrq->stop->opcode, 162 mrq->stop->arg, mrq->stop->flags); 163 } 164 165 WARN_ON(!host->claimed); 166 167 led_trigger_event(host->led, LED_FULL); 168 169 mrq->cmd->error = 0; 170 mrq->cmd->mrq = mrq; 171 if (mrq->data) { 172 BUG_ON(mrq->data->blksz > host->max_blk_size); 173 BUG_ON(mrq->data->blocks > host->max_blk_count); 174 BUG_ON(mrq->data->blocks * mrq->data->blksz > 175 host->max_req_size); 176 177#ifdef CONFIG_MMC_DEBUG 178 sz = 0; 179 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 180 sz += sg->length; 181 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 182#endif 183 184 mrq->cmd->data = mrq->data; 185 mrq->data->error = 0; 186 mrq->data->mrq = mrq; 187 if (mrq->stop) { 188 mrq->data->stop = mrq->stop; 189 mrq->stop->error = 0; 190 mrq->stop->mrq = mrq; 191 } 192 } 193 host->ops->request(host, mrq); 194} 195 196static void mmc_wait_done(struct mmc_request *mrq) 197{ 198 complete(mrq->done_data); 199} 200 201/** 202 * mmc_wait_for_req - start a request and wait for completion 203 * @host: MMC host to start command 204 * @mrq: MMC request to start 205 * 206 * Start a new MMC custom command request for a host, and wait 207 * for the command to complete. Does not attempt to parse the 208 * response. 209 */ 210void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 211{ 212 DECLARE_COMPLETION_ONSTACK(complete); 213 214 mrq->done_data = &complete; 215 mrq->done = mmc_wait_done; 216 217 mmc_start_request(host, mrq); 218 219 wait_for_completion(&complete); 220} 221 222EXPORT_SYMBOL(mmc_wait_for_req); 223 224/** 225 * mmc_wait_for_cmd - start a command and wait for completion 226 * @host: MMC host to start command 227 * @cmd: MMC command to start 228 * @retries: maximum number of retries 229 * 230 * Start a new MMC command for a host, and wait for the command 231 * to complete. Return any error that occurred while the command 232 * was executing. Do not attempt to parse the response. 233 */ 234int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 235{ 236 struct mmc_request mrq; 237 238 WARN_ON(!host->claimed); 239 240 memset(&mrq, 0, sizeof(struct mmc_request)); 241 242 memset(cmd->resp, 0, sizeof(cmd->resp)); 243 cmd->retries = retries; 244 245 mrq.cmd = cmd; 246 cmd->data = NULL; 247 248 mmc_wait_for_req(host, &mrq); 249 250 return cmd->error; 251} 252 253EXPORT_SYMBOL(mmc_wait_for_cmd); 254 255/** 256 * mmc_set_data_timeout - set the timeout for a data command 257 * @data: data phase for command 258 * @card: the MMC card associated with the data transfer 259 * 260 * Computes the data timeout parameters according to the 261 * correct algorithm given the card type. 262 */ 263void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 264{ 265 unsigned int mult; 266 267 /* 268 * SDIO cards only define an upper 1 s limit on access. 269 */ 270 if (mmc_card_sdio(card)) { 271 data->timeout_ns = 1000000000; 272 data->timeout_clks = 0; 273 return; 274 } 275 276 /* 277 * SD cards use a 100 multiplier rather than 10 278 */ 279 mult = mmc_card_sd(card) ? 100 : 10; 280 281 /* 282 * Scale up the multiplier (and therefore the timeout) by 283 * the r2w factor for writes. 284 */ 285 if (data->flags & MMC_DATA_WRITE) 286 mult <<= card->csd.r2w_factor; 287 288 data->timeout_ns = card->csd.tacc_ns * mult; 289 data->timeout_clks = card->csd.tacc_clks * mult; 290 291 /* 292 * SD cards also have an upper limit on the timeout. 293 */ 294 if (mmc_card_sd(card)) { 295 unsigned int timeout_us, limit_us; 296 297 timeout_us = data->timeout_ns / 1000; 298 timeout_us += data->timeout_clks * 1000 / 299 (card->host->ios.clock / 1000); 300 301 if (data->flags & MMC_DATA_WRITE) 302 /* 303 * The limit is really 250 ms, but that is 304 * insufficient for some crappy cards. 305 */ 306 limit_us = 300000; 307 else 308 limit_us = 100000; 309 310 /* 311 * SDHC cards always use these fixed values. 312 */ 313 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 314 data->timeout_ns = limit_us * 1000; 315 data->timeout_clks = 0; 316 } 317 } 318 /* 319 * Some cards need very high timeouts if driven in SPI mode. 320 * The worst observed timeout was 900ms after writing a 321 * continuous stream of data until the internal logic 322 * overflowed. 323 */ 324 if (mmc_host_is_spi(card->host)) { 325 if (data->flags & MMC_DATA_WRITE) { 326 if (data->timeout_ns < 1000000000) 327 data->timeout_ns = 1000000000; /* 1s */ 328 } else { 329 if (data->timeout_ns < 100000000) 330 data->timeout_ns = 100000000; /* 100ms */ 331 } 332 } 333} 334EXPORT_SYMBOL(mmc_set_data_timeout); 335 336/** 337 * mmc_align_data_size - pads a transfer size to a more optimal value 338 * @card: the MMC card associated with the data transfer 339 * @sz: original transfer size 340 * 341 * Pads the original data size with a number of extra bytes in 342 * order to avoid controller bugs and/or performance hits 343 * (e.g. some controllers revert to PIO for certain sizes). 344 * 345 * Returns the improved size, which might be unmodified. 346 * 347 * Note that this function is only relevant when issuing a 348 * single scatter gather entry. 349 */ 350unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 351{ 352 /* 353 * FIXME: We don't have a system for the controller to tell 354 * the core about its problems yet, so for now we just 32-bit 355 * align the size. 356 */ 357 sz = ((sz + 3) / 4) * 4; 358 359 return sz; 360} 361EXPORT_SYMBOL(mmc_align_data_size); 362 363/** 364 * mmc_host_enable - enable a host. 365 * @host: mmc host to enable 366 * 367 * Hosts that support power saving can use the 'enable' and 'disable' 368 * methods to exit and enter power saving states. For more information 369 * see comments for struct mmc_host_ops. 370 */ 371int mmc_host_enable(struct mmc_host *host) 372{ 373 if (!(host->caps & MMC_CAP_DISABLE)) 374 return 0; 375 376 if (host->en_dis_recurs) 377 return 0; 378 379 if (host->nesting_cnt++) 380 return 0; 381 382 cancel_delayed_work_sync(&host->disable); 383 384 if (host->enabled) 385 return 0; 386 387 if (host->ops->enable) { 388 int err; 389 390 host->en_dis_recurs = 1; 391 err = host->ops->enable(host); 392 host->en_dis_recurs = 0; 393 394 if (err) { 395 pr_debug("%s: enable error %d\n", 396 mmc_hostname(host), err); 397 return err; 398 } 399 } 400 host->enabled = 1; 401 return 0; 402} 403EXPORT_SYMBOL(mmc_host_enable); 404 405static int mmc_host_do_disable(struct mmc_host *host, int lazy) 406{ 407 if (host->ops->disable) { 408 int err; 409 410 host->en_dis_recurs = 1; 411 err = host->ops->disable(host, lazy); 412 host->en_dis_recurs = 0; 413 414 if (err < 0) { 415 pr_debug("%s: disable error %d\n", 416 mmc_hostname(host), err); 417 return err; 418 } 419 if (err > 0) { 420 unsigned long delay = msecs_to_jiffies(err); 421 422 mmc_schedule_delayed_work(&host->disable, delay); 423 } 424 } 425 host->enabled = 0; 426 return 0; 427} 428 429/** 430 * mmc_host_disable - disable a host. 431 * @host: mmc host to disable 432 * 433 * Hosts that support power saving can use the 'enable' and 'disable' 434 * methods to exit and enter power saving states. For more information 435 * see comments for struct mmc_host_ops. 436 */ 437int mmc_host_disable(struct mmc_host *host) 438{ 439 int err; 440 441 if (!(host->caps & MMC_CAP_DISABLE)) 442 return 0; 443 444 if (host->en_dis_recurs) 445 return 0; 446 447 if (--host->nesting_cnt) 448 return 0; 449 450 if (!host->enabled) 451 return 0; 452 453 err = mmc_host_do_disable(host, 0); 454 return err; 455} 456EXPORT_SYMBOL(mmc_host_disable); 457 458/** 459 * __mmc_claim_host - exclusively claim a host 460 * @host: mmc host to claim 461 * @abort: whether or not the operation should be aborted 462 * 463 * Claim a host for a set of operations. If @abort is non null and 464 * dereference a non-zero value then this will return prematurely with 465 * that non-zero value without acquiring the lock. Returns zero 466 * with the lock held otherwise. 467 */ 468int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 469{ 470 DECLARE_WAITQUEUE(wait, current); 471 unsigned long flags; 472 int stop; 473 474 might_sleep(); 475 476 add_wait_queue(&host->wq, &wait); 477 spin_lock_irqsave(&host->lock, flags); 478 while (1) { 479 set_current_state(TASK_UNINTERRUPTIBLE); 480 stop = abort ? atomic_read(abort) : 0; 481 if (stop || !host->claimed || host->claimer == current) 482 break; 483 spin_unlock_irqrestore(&host->lock, flags); 484 schedule(); 485 spin_lock_irqsave(&host->lock, flags); 486 } 487 set_current_state(TASK_RUNNING); 488 if (!stop) { 489 host->claimed = 1; 490 host->claimer = current; 491 host->claim_cnt += 1; 492 } else 493 wake_up(&host->wq); 494 spin_unlock_irqrestore(&host->lock, flags); 495 remove_wait_queue(&host->wq, &wait); 496 if (!stop) 497 mmc_host_enable(host); 498 return stop; 499} 500 501EXPORT_SYMBOL(__mmc_claim_host); 502 503/** 504 * mmc_try_claim_host - try exclusively to claim a host 505 * @host: mmc host to claim 506 * 507 * Returns %1 if the host is claimed, %0 otherwise. 508 */ 509int mmc_try_claim_host(struct mmc_host *host) 510{ 511 int claimed_host = 0; 512 unsigned long flags; 513 514 spin_lock_irqsave(&host->lock, flags); 515 if (!host->claimed || host->claimer == current) { 516 host->claimed = 1; 517 host->claimer = current; 518 host->claim_cnt += 1; 519 claimed_host = 1; 520 } 521 spin_unlock_irqrestore(&host->lock, flags); 522 return claimed_host; 523} 524EXPORT_SYMBOL(mmc_try_claim_host); 525 526static void mmc_do_release_host(struct mmc_host *host) 527{ 528 unsigned long flags; 529 530 spin_lock_irqsave(&host->lock, flags); 531 if (--host->claim_cnt) { 532 /* Release for nested claim */ 533 spin_unlock_irqrestore(&host->lock, flags); 534 } else { 535 host->claimed = 0; 536 host->claimer = NULL; 537 spin_unlock_irqrestore(&host->lock, flags); 538 wake_up(&host->wq); 539 } 540} 541 542void mmc_host_deeper_disable(struct work_struct *work) 543{ 544 struct mmc_host *host = 545 container_of(work, struct mmc_host, disable.work); 546 547 /* If the host is claimed then we do not want to disable it anymore */ 548 if (!mmc_try_claim_host(host)) 549 return; 550 mmc_host_do_disable(host, 1); 551 mmc_do_release_host(host); 552} 553 554/** 555 * mmc_host_lazy_disable - lazily disable a host. 556 * @host: mmc host to disable 557 * 558 * Hosts that support power saving can use the 'enable' and 'disable' 559 * methods to exit and enter power saving states. For more information 560 * see comments for struct mmc_host_ops. 561 */ 562int mmc_host_lazy_disable(struct mmc_host *host) 563{ 564 if (!(host->caps & MMC_CAP_DISABLE)) 565 return 0; 566 567 if (host->en_dis_recurs) 568 return 0; 569 570 if (--host->nesting_cnt) 571 return 0; 572 573 if (!host->enabled) 574 return 0; 575 576 if (host->disable_delay) { 577 mmc_schedule_delayed_work(&host->disable, 578 msecs_to_jiffies(host->disable_delay)); 579 return 0; 580 } else 581 return mmc_host_do_disable(host, 1); 582} 583EXPORT_SYMBOL(mmc_host_lazy_disable); 584 585/** 586 * mmc_release_host - release a host 587 * @host: mmc host to release 588 * 589 * Release a MMC host, allowing others to claim the host 590 * for their operations. 591 */ 592void mmc_release_host(struct mmc_host *host) 593{ 594 WARN_ON(!host->claimed); 595 596 mmc_host_lazy_disable(host); 597 598 mmc_do_release_host(host); 599} 600 601EXPORT_SYMBOL(mmc_release_host); 602 603/* 604 * Internal function that does the actual ios call to the host driver, 605 * optionally printing some debug output. 606 */ 607static inline void mmc_set_ios(struct mmc_host *host) 608{ 609 struct mmc_ios *ios = &host->ios; 610 611 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 612 "width %u timing %u\n", 613 mmc_hostname(host), ios->clock, ios->bus_mode, 614 ios->power_mode, ios->chip_select, ios->vdd, 615 ios->bus_width, ios->timing); 616 617 host->ops->set_ios(host, ios); 618} 619 620/* 621 * Control chip select pin on a host. 622 */ 623void mmc_set_chip_select(struct mmc_host *host, int mode) 624{ 625 host->ios.chip_select = mode; 626 mmc_set_ios(host); 627} 628 629/* 630 * Sets the host clock to the highest possible frequency that 631 * is below "hz". 632 */ 633void mmc_set_clock(struct mmc_host *host, unsigned int hz) 634{ 635 WARN_ON(hz < host->f_min); 636 637 if (hz > host->f_max) 638 hz = host->f_max; 639 640 host->ios.clock = hz; 641 mmc_set_ios(host); 642} 643 644/* 645 * Change the bus mode (open drain/push-pull) of a host. 646 */ 647void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 648{ 649 host->ios.bus_mode = mode; 650 mmc_set_ios(host); 651} 652 653/* 654 * Change data bus width of a host. 655 */ 656void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 657{ 658 host->ios.bus_width = width; 659 mmc_set_ios(host); 660} 661 662/** 663 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 664 * @vdd: voltage (mV) 665 * @low_bits: prefer low bits in boundary cases 666 * 667 * This function returns the OCR bit number according to the provided @vdd 668 * value. If conversion is not possible a negative errno value returned. 669 * 670 * Depending on the @low_bits flag the function prefers low or high OCR bits 671 * on boundary voltages. For example, 672 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 673 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 674 * 675 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 676 */ 677static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 678{ 679 const int max_bit = ilog2(MMC_VDD_35_36); 680 int bit; 681 682 if (vdd < 1650 || vdd > 3600) 683 return -EINVAL; 684 685 if (vdd >= 1650 && vdd <= 1950) 686 return ilog2(MMC_VDD_165_195); 687 688 if (low_bits) 689 vdd -= 1; 690 691 /* Base 2000 mV, step 100 mV, bit's base 8. */ 692 bit = (vdd - 2000) / 100 + 8; 693 if (bit > max_bit) 694 return max_bit; 695 return bit; 696} 697 698/** 699 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 700 * @vdd_min: minimum voltage value (mV) 701 * @vdd_max: maximum voltage value (mV) 702 * 703 * This function returns the OCR mask bits according to the provided @vdd_min 704 * and @vdd_max values. If conversion is not possible the function returns 0. 705 * 706 * Notes wrt boundary cases: 707 * This function sets the OCR bits for all boundary voltages, for example 708 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 709 * MMC_VDD_34_35 mask. 710 */ 711u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 712{ 713 u32 mask = 0; 714 715 if (vdd_max < vdd_min) 716 return 0; 717 718 /* Prefer high bits for the boundary vdd_max values. */ 719 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 720 if (vdd_max < 0) 721 return 0; 722 723 /* Prefer low bits for the boundary vdd_min values. */ 724 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 725 if (vdd_min < 0) 726 return 0; 727 728 /* Fill the mask, from max bit to min bit. */ 729 while (vdd_max >= vdd_min) 730 mask |= 1 << vdd_max--; 731 732 return mask; 733} 734EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 735 736#ifdef CONFIG_REGULATOR 737 738/** 739 * mmc_regulator_get_ocrmask - return mask of supported voltages 740 * @supply: regulator to use 741 * 742 * This returns either a negative errno, or a mask of voltages that 743 * can be provided to MMC/SD/SDIO devices using the specified voltage 744 * regulator. This would normally be called before registering the 745 * MMC host adapter. 746 */ 747int mmc_regulator_get_ocrmask(struct regulator *supply) 748{ 749 int result = 0; 750 int count; 751 int i; 752 753 count = regulator_count_voltages(supply); 754 if (count < 0) 755 return count; 756 757 for (i = 0; i < count; i++) { 758 int vdd_uV; 759 int vdd_mV; 760 761 vdd_uV = regulator_list_voltage(supply, i); 762 if (vdd_uV <= 0) 763 continue; 764 765 vdd_mV = vdd_uV / 1000; 766 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 767 } 768 769 return result; 770} 771EXPORT_SYMBOL(mmc_regulator_get_ocrmask); 772 773/** 774 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 775 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 776 * @supply: regulator to use 777 * 778 * Returns zero on success, else negative errno. 779 * 780 * MMC host drivers may use this to enable or disable a regulator using 781 * a particular supply voltage. This would normally be called from the 782 * set_ios() method. 783 */ 784int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit) 785{ 786 int result = 0; 787 int min_uV, max_uV; 788 int enabled; 789 790 enabled = regulator_is_enabled(supply); 791 if (enabled < 0) 792 return enabled; 793 794 if (vdd_bit) { 795 int tmp; 796 int voltage; 797 798 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 799 * bits this regulator doesn't quite support ... don't 800 * be too picky, most cards and regulators are OK with 801 * a 0.1V range goof (it's a small error percentage). 802 */ 803 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 804 if (tmp == 0) { 805 min_uV = 1650 * 1000; 806 max_uV = 1950 * 1000; 807 } else { 808 min_uV = 1900 * 1000 + tmp * 100 * 1000; 809 max_uV = min_uV + 100 * 1000; 810 } 811 812 /* avoid needless changes to this voltage; the regulator 813 * might not allow this operation 814 */ 815 voltage = regulator_get_voltage(supply); 816 if (voltage < 0) 817 result = voltage; 818 else if (voltage < min_uV || voltage > max_uV) 819 result = regulator_set_voltage(supply, min_uV, max_uV); 820 else 821 result = 0; 822 823 if (result == 0 && !enabled) 824 result = regulator_enable(supply); 825 } else if (enabled) { 826 result = regulator_disable(supply); 827 } 828 829 return result; 830} 831EXPORT_SYMBOL(mmc_regulator_set_ocr); 832 833#endif 834 835/* 836 * Mask off any voltages we don't support and select 837 * the lowest voltage 838 */ 839u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 840{ 841 int bit; 842 843 ocr &= host->ocr_avail; 844 845 bit = ffs(ocr); 846 if (bit) { 847 bit -= 1; 848 849 ocr &= 3 << bit; 850 851 host->ios.vdd = bit; 852 mmc_set_ios(host); 853 } else { 854 pr_warning("%s: host doesn't support card's voltages\n", 855 mmc_hostname(host)); 856 ocr = 0; 857 } 858 859 return ocr; 860} 861 862/* 863 * Select timing parameters for host. 864 */ 865void mmc_set_timing(struct mmc_host *host, unsigned int timing) 866{ 867 host->ios.timing = timing; 868 mmc_set_ios(host); 869} 870 871/* 872 * Apply power to the MMC stack. This is a two-stage process. 873 * First, we enable power to the card without the clock running. 874 * We then wait a bit for the power to stabilise. Finally, 875 * enable the bus drivers and clock to the card. 876 * 877 * We must _NOT_ enable the clock prior to power stablising. 878 * 879 * If a host does all the power sequencing itself, ignore the 880 * initial MMC_POWER_UP stage. 881 */ 882static void mmc_power_up(struct mmc_host *host) 883{ 884 int bit; 885 886 /* If ocr is set, we use it */ 887 if (host->ocr) 888 bit = ffs(host->ocr) - 1; 889 else 890 bit = fls(host->ocr_avail) - 1; 891 892 host->ios.vdd = bit; 893 if (mmc_host_is_spi(host)) { 894 host->ios.chip_select = MMC_CS_HIGH; 895 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 896 } else { 897 host->ios.chip_select = MMC_CS_DONTCARE; 898 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 899 } 900 host->ios.power_mode = MMC_POWER_UP; 901 host->ios.bus_width = MMC_BUS_WIDTH_1; 902 host->ios.timing = MMC_TIMING_LEGACY; 903 mmc_set_ios(host); 904 905 /* 906 * This delay should be sufficient to allow the power supply 907 * to reach the minimum voltage. 908 */ 909 mmc_delay(10); 910 911 if (host->f_min > 400000) { 912 pr_warning("%s: Minimum clock frequency too high for " 913 "identification mode\n", mmc_hostname(host)); 914 host->ios.clock = host->f_min; 915 } else 916 host->ios.clock = 400000; 917 918 host->ios.power_mode = MMC_POWER_ON; 919 mmc_set_ios(host); 920 921 /* 922 * This delay must be at least 74 clock sizes, or 1 ms, or the 923 * time required to reach a stable voltage. 924 */ 925 mmc_delay(10); 926} 927 928static void mmc_power_off(struct mmc_host *host) 929{ 930 host->ios.clock = 0; 931 host->ios.vdd = 0; 932 if (!mmc_host_is_spi(host)) { 933 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 934 host->ios.chip_select = MMC_CS_DONTCARE; 935 } 936 host->ios.power_mode = MMC_POWER_OFF; 937 host->ios.bus_width = MMC_BUS_WIDTH_1; 938 host->ios.timing = MMC_TIMING_LEGACY; 939 mmc_set_ios(host); 940} 941 942/* 943 * Cleanup when the last reference to the bus operator is dropped. 944 */ 945static void __mmc_release_bus(struct mmc_host *host) 946{ 947 BUG_ON(!host); 948 BUG_ON(host->bus_refs); 949 BUG_ON(!host->bus_dead); 950 951 host->bus_ops = NULL; 952} 953 954/* 955 * Increase reference count of bus operator 956 */ 957static inline void mmc_bus_get(struct mmc_host *host) 958{ 959 unsigned long flags; 960 961 spin_lock_irqsave(&host->lock, flags); 962 host->bus_refs++; 963 spin_unlock_irqrestore(&host->lock, flags); 964} 965 966/* 967 * Decrease reference count of bus operator and free it if 968 * it is the last reference. 969 */ 970static inline void mmc_bus_put(struct mmc_host *host) 971{ 972 unsigned long flags; 973 974 spin_lock_irqsave(&host->lock, flags); 975 host->bus_refs--; 976 if ((host->bus_refs == 0) && host->bus_ops) 977 __mmc_release_bus(host); 978 spin_unlock_irqrestore(&host->lock, flags); 979} 980 981/* 982 * Assign a mmc bus handler to a host. Only one bus handler may control a 983 * host at any given time. 984 */ 985void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 986{ 987 unsigned long flags; 988 989 BUG_ON(!host); 990 BUG_ON(!ops); 991 992 WARN_ON(!host->claimed); 993 994 spin_lock_irqsave(&host->lock, flags); 995 996 BUG_ON(host->bus_ops); 997 BUG_ON(host->bus_refs); 998 999 host->bus_ops = ops; 1000 host->bus_refs = 1; 1001 host->bus_dead = 0; 1002 1003 spin_unlock_irqrestore(&host->lock, flags); 1004} 1005 1006/* 1007 * Remove the current bus handler from a host. Assumes that there are 1008 * no interesting cards left, so the bus is powered down. 1009 */ 1010void mmc_detach_bus(struct mmc_host *host) 1011{ 1012 unsigned long flags; 1013 1014 BUG_ON(!host); 1015 1016 WARN_ON(!host->claimed); 1017 WARN_ON(!host->bus_ops); 1018 1019 spin_lock_irqsave(&host->lock, flags); 1020 1021 host->bus_dead = 1; 1022 1023 spin_unlock_irqrestore(&host->lock, flags); 1024 1025 mmc_power_off(host); 1026 1027 mmc_bus_put(host); 1028} 1029 1030/** 1031 * mmc_detect_change - process change of state on a MMC socket 1032 * @host: host which changed state. 1033 * @delay: optional delay to wait before detection (jiffies) 1034 * 1035 * MMC drivers should call this when they detect a card has been 1036 * inserted or removed. The MMC layer will confirm that any 1037 * present card is still functional, and initialize any newly 1038 * inserted. 1039 */ 1040void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1041{ 1042#ifdef CONFIG_MMC_DEBUG 1043 unsigned long flags; 1044 spin_lock_irqsave(&host->lock, flags); 1045 WARN_ON(host->removed); 1046 spin_unlock_irqrestore(&host->lock, flags); 1047#endif 1048 1049 mmc_schedule_delayed_work(&host->detect, delay); 1050} 1051 1052EXPORT_SYMBOL(mmc_detect_change); 1053 1054void mmc_init_erase(struct mmc_card *card) 1055{ 1056 unsigned int sz; 1057 1058 if (is_power_of_2(card->erase_size)) 1059 card->erase_shift = ffs(card->erase_size) - 1; 1060 else 1061 card->erase_shift = 0; 1062 1063 /* 1064 * It is possible to erase an arbitrarily large area of an SD or MMC 1065 * card. That is not desirable because it can take a long time 1066 * (minutes) potentially delaying more important I/O, and also the 1067 * timeout calculations become increasingly hugely over-estimated. 1068 * Consequently, 'pref_erase' is defined as a guide to limit erases 1069 * to that size and alignment. 1070 * 1071 * For SD cards that define Allocation Unit size, limit erases to one 1072 * Allocation Unit at a time. For MMC cards that define High Capacity 1073 * Erase Size, whether it is switched on or not, limit to that size. 1074 * Otherwise just have a stab at a good value. For modern cards it 1075 * will end up being 4MiB. Note that if the value is too small, it 1076 * can end up taking longer to erase. 1077 */ 1078 if (mmc_card_sd(card) && card->ssr.au) { 1079 card->pref_erase = card->ssr.au; 1080 card->erase_shift = ffs(card->ssr.au) - 1; 1081 } else if (card->ext_csd.hc_erase_size) { 1082 card->pref_erase = card->ext_csd.hc_erase_size; 1083 } else { 1084 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1085 if (sz < 128) 1086 card->pref_erase = 512 * 1024 / 512; 1087 else if (sz < 512) 1088 card->pref_erase = 1024 * 1024 / 512; 1089 else if (sz < 1024) 1090 card->pref_erase = 2 * 1024 * 1024 / 512; 1091 else 1092 card->pref_erase = 4 * 1024 * 1024 / 512; 1093 if (card->pref_erase < card->erase_size) 1094 card->pref_erase = card->erase_size; 1095 else { 1096 sz = card->pref_erase % card->erase_size; 1097 if (sz) 1098 card->pref_erase += card->erase_size - sz; 1099 } 1100 } 1101} 1102 1103static void mmc_set_mmc_erase_timeout(struct mmc_card *card, 1104 struct mmc_command *cmd, 1105 unsigned int arg, unsigned int qty) 1106{ 1107 unsigned int erase_timeout; 1108 1109 if (card->ext_csd.erase_group_def & 1) { 1110 /* High Capacity Erase Group Size uses HC timeouts */ 1111 if (arg == MMC_TRIM_ARG) 1112 erase_timeout = card->ext_csd.trim_timeout; 1113 else 1114 erase_timeout = card->ext_csd.hc_erase_timeout; 1115 } else { 1116 /* CSD Erase Group Size uses write timeout */ 1117 unsigned int mult = (10 << card->csd.r2w_factor); 1118 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1119 unsigned int timeout_us; 1120 1121 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1122 if (card->csd.tacc_ns < 1000000) 1123 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1124 else 1125 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1126 1127 /* 1128 * ios.clock is only a target. The real clock rate might be 1129 * less but not that much less, so fudge it by multiplying by 2. 1130 */ 1131 timeout_clks <<= 1; 1132 timeout_us += (timeout_clks * 1000) / 1133 (card->host->ios.clock / 1000); 1134 1135 erase_timeout = timeout_us / 1000; 1136 1137 /* 1138 * Theoretically, the calculation could underflow so round up 1139 * to 1ms in that case. 1140 */ 1141 if (!erase_timeout) 1142 erase_timeout = 1; 1143 } 1144 1145 /* Multiplier for secure operations */ 1146 if (arg & MMC_SECURE_ARGS) { 1147 if (arg == MMC_SECURE_ERASE_ARG) 1148 erase_timeout *= card->ext_csd.sec_erase_mult; 1149 else 1150 erase_timeout *= card->ext_csd.sec_trim_mult; 1151 } 1152 1153 erase_timeout *= qty; 1154 1155 /* 1156 * Ensure at least a 1 second timeout for SPI as per 1157 * 'mmc_set_data_timeout()' 1158 */ 1159 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1160 erase_timeout = 1000; 1161 1162 cmd->erase_timeout = erase_timeout; 1163} 1164 1165static void mmc_set_sd_erase_timeout(struct mmc_card *card, 1166 struct mmc_command *cmd, unsigned int arg, 1167 unsigned int qty) 1168{ 1169 if (card->ssr.erase_timeout) { 1170 /* Erase timeout specified in SD Status Register (SSR) */ 1171 cmd->erase_timeout = card->ssr.erase_timeout * qty + 1172 card->ssr.erase_offset; 1173 } else { 1174 /* 1175 * Erase timeout not specified in SD Status Register (SSR) so 1176 * use 250ms per write block. 1177 */ 1178 cmd->erase_timeout = 250 * qty; 1179 } 1180 1181 /* Must not be less than 1 second */ 1182 if (cmd->erase_timeout < 1000) 1183 cmd->erase_timeout = 1000; 1184} 1185 1186static void mmc_set_erase_timeout(struct mmc_card *card, 1187 struct mmc_command *cmd, unsigned int arg, 1188 unsigned int qty) 1189{ 1190 if (mmc_card_sd(card)) 1191 mmc_set_sd_erase_timeout(card, cmd, arg, qty); 1192 else 1193 mmc_set_mmc_erase_timeout(card, cmd, arg, qty); 1194} 1195 1196static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1197 unsigned int to, unsigned int arg) 1198{ 1199 struct mmc_command cmd; 1200 unsigned int qty = 0; 1201 int err; 1202 1203 /* 1204 * qty is used to calculate the erase timeout which depends on how many 1205 * erase groups (or allocation units in SD terminology) are affected. 1206 * We count erasing part of an erase group as one erase group. 1207 * For SD, the allocation units are always a power of 2. For MMC, the 1208 * erase group size is almost certainly also power of 2, but it does not 1209 * seem to insist on that in the JEDEC standard, so we fall back to 1210 * division in that case. SD may not specify an allocation unit size, 1211 * in which case the timeout is based on the number of write blocks. 1212 * 1213 * Note that the timeout for secure trim 2 will only be correct if the 1214 * number of erase groups specified is the same as the total of all 1215 * preceding secure trim 1 commands. Since the power may have been 1216 * lost since the secure trim 1 commands occurred, it is generally 1217 * impossible to calculate the secure trim 2 timeout correctly. 1218 */ 1219 if (card->erase_shift) 1220 qty += ((to >> card->erase_shift) - 1221 (from >> card->erase_shift)) + 1; 1222 else if (mmc_card_sd(card)) 1223 qty += to - from + 1; 1224 else 1225 qty += ((to / card->erase_size) - 1226 (from / card->erase_size)) + 1; 1227 1228 if (!mmc_card_blockaddr(card)) { 1229 from <<= 9; 1230 to <<= 9; 1231 } 1232 1233 memset(&cmd, 0, sizeof(struct mmc_command)); 1234 if (mmc_card_sd(card)) 1235 cmd.opcode = SD_ERASE_WR_BLK_START; 1236 else 1237 cmd.opcode = MMC_ERASE_GROUP_START; 1238 cmd.arg = from; 1239 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1240 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1241 if (err) { 1242 printk(KERN_ERR "mmc_erase: group start error %d, " 1243 "status %#x\n", err, cmd.resp[0]); 1244 err = -EINVAL; 1245 goto out; 1246 } 1247 1248 memset(&cmd, 0, sizeof(struct mmc_command)); 1249 if (mmc_card_sd(card)) 1250 cmd.opcode = SD_ERASE_WR_BLK_END; 1251 else 1252 cmd.opcode = MMC_ERASE_GROUP_END; 1253 cmd.arg = to; 1254 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1255 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1256 if (err) { 1257 printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", 1258 err, cmd.resp[0]); 1259 err = -EINVAL; 1260 goto out; 1261 } 1262 1263 memset(&cmd, 0, sizeof(struct mmc_command)); 1264 cmd.opcode = MMC_ERASE; 1265 cmd.arg = arg; 1266 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1267 mmc_set_erase_timeout(card, &cmd, arg, qty); 1268 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1269 if (err) { 1270 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", 1271 err, cmd.resp[0]); 1272 err = -EIO; 1273 goto out; 1274 } 1275 1276 if (mmc_host_is_spi(card->host)) 1277 goto out; 1278 1279 do { 1280 memset(&cmd, 0, sizeof(struct mmc_command)); 1281 cmd.opcode = MMC_SEND_STATUS; 1282 cmd.arg = card->rca << 16; 1283 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1284 /* Do not retry else we can't see errors */ 1285 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1286 if (err || (cmd.resp[0] & 0xFDF92000)) { 1287 printk(KERN_ERR "error %d requesting status %#x\n", 1288 err, cmd.resp[0]); 1289 err = -EIO; 1290 goto out; 1291 } 1292 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1293 R1_CURRENT_STATE(cmd.resp[0]) == 7); 1294out: 1295 return err; 1296} 1297 1298/** 1299 * mmc_erase - erase sectors. 1300 * @card: card to erase 1301 * @from: first sector to erase 1302 * @nr: number of sectors to erase 1303 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1304 * 1305 * Caller must claim host before calling this function. 1306 */ 1307int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1308 unsigned int arg) 1309{ 1310 unsigned int rem, to = from + nr; 1311 1312 if (!(card->host->caps & MMC_CAP_ERASE) || 1313 !(card->csd.cmdclass & CCC_ERASE)) 1314 return -EOPNOTSUPP; 1315 1316 if (!card->erase_size) 1317 return -EOPNOTSUPP; 1318 1319 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1320 return -EOPNOTSUPP; 1321 1322 if ((arg & MMC_SECURE_ARGS) && 1323 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1324 return -EOPNOTSUPP; 1325 1326 if ((arg & MMC_TRIM_ARGS) && 1327 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1328 return -EOPNOTSUPP; 1329 1330 if (arg == MMC_SECURE_ERASE_ARG) { 1331 if (from % card->erase_size || nr % card->erase_size) 1332 return -EINVAL; 1333 } 1334 1335 if (arg == MMC_ERASE_ARG) { 1336 rem = from % card->erase_size; 1337 if (rem) { 1338 rem = card->erase_size - rem; 1339 from += rem; 1340 if (nr > rem) 1341 nr -= rem; 1342 else 1343 return 0; 1344 } 1345 rem = nr % card->erase_size; 1346 if (rem) 1347 nr -= rem; 1348 } 1349 1350 if (nr == 0) 1351 return 0; 1352 1353 to = from + nr; 1354 1355 if (to <= from) 1356 return -EINVAL; 1357 1358 /* 'from' and 'to' are inclusive */ 1359 to -= 1; 1360 1361 return mmc_do_erase(card, from, to, arg); 1362} 1363EXPORT_SYMBOL(mmc_erase); 1364 1365int mmc_can_erase(struct mmc_card *card) 1366{ 1367 if ((card->host->caps & MMC_CAP_ERASE) && 1368 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 1369 return 1; 1370 return 0; 1371} 1372EXPORT_SYMBOL(mmc_can_erase); 1373 1374int mmc_can_trim(struct mmc_card *card) 1375{ 1376 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1377 return 1; 1378 return 0; 1379} 1380EXPORT_SYMBOL(mmc_can_trim); 1381 1382int mmc_can_secure_erase_trim(struct mmc_card *card) 1383{ 1384 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 1385 return 1; 1386 return 0; 1387} 1388EXPORT_SYMBOL(mmc_can_secure_erase_trim); 1389 1390int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 1391 unsigned int nr) 1392{ 1393 if (!card->erase_size) 1394 return 0; 1395 if (from % card->erase_size || nr % card->erase_size) 1396 return 0; 1397 return 1; 1398} 1399EXPORT_SYMBOL(mmc_erase_group_aligned); 1400 1401void mmc_rescan(struct work_struct *work) 1402{ 1403 struct mmc_host *host = 1404 container_of(work, struct mmc_host, detect.work); 1405 u32 ocr; 1406 int err; 1407 unsigned long flags; 1408 1409 spin_lock_irqsave(&host->lock, flags); 1410 1411 if (host->rescan_disable) { 1412 spin_unlock_irqrestore(&host->lock, flags); 1413 return; 1414 } 1415 1416 spin_unlock_irqrestore(&host->lock, flags); 1417 1418 1419 mmc_bus_get(host); 1420 1421 /* if there is a card registered, check whether it is still present */ 1422 if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) 1423 host->bus_ops->detect(host); 1424 1425 mmc_bus_put(host); 1426 1427 1428 mmc_bus_get(host); 1429 1430 /* if there still is a card present, stop here */ 1431 if (host->bus_ops != NULL) { 1432 mmc_bus_put(host); 1433 goto out; 1434 } 1435 1436 /* detect a newly inserted card */ 1437 1438 /* 1439 * Only we can add a new handler, so it's safe to 1440 * release the lock here. 1441 */ 1442 mmc_bus_put(host); 1443 1444 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 1445 goto out; 1446 1447 mmc_claim_host(host); 1448 1449 mmc_power_up(host); 1450 sdio_reset(host); 1451 mmc_go_idle(host); 1452 1453 mmc_send_if_cond(host, host->ocr_avail); 1454 1455 /* 1456 * First we search for SDIO... 1457 */ 1458 err = mmc_send_io_op_cond(host, 0, &ocr); 1459 if (!err) { 1460 if (mmc_attach_sdio(host, ocr)) { 1461 mmc_claim_host(host); 1462 /* try SDMEM (but not MMC) even if SDIO is broken */ 1463 if (mmc_send_app_op_cond(host, 0, &ocr)) 1464 goto out_fail; 1465 1466 if (mmc_attach_sd(host, ocr)) 1467 mmc_power_off(host); 1468 } 1469 goto out; 1470 } 1471 1472 /* 1473 * ...then normal SD... 1474 */ 1475 err = mmc_send_app_op_cond(host, 0, &ocr); 1476 if (!err) { 1477 if (mmc_attach_sd(host, ocr)) 1478 mmc_power_off(host); 1479 goto out; 1480 } 1481 1482 /* 1483 * ...and finally MMC. 1484 */ 1485 err = mmc_send_op_cond(host, 0, &ocr); 1486 if (!err) { 1487 if (mmc_attach_mmc(host, ocr)) 1488 mmc_power_off(host); 1489 goto out; 1490 } 1491 1492out_fail: 1493 mmc_release_host(host); 1494 mmc_power_off(host); 1495 1496out: 1497 if (host->caps & MMC_CAP_NEEDS_POLL) 1498 mmc_schedule_delayed_work(&host->detect, HZ); 1499} 1500 1501void mmc_start_host(struct mmc_host *host) 1502{ 1503 mmc_power_off(host); 1504 mmc_detect_change(host, 0); 1505} 1506 1507void mmc_stop_host(struct mmc_host *host) 1508{ 1509#ifdef CONFIG_MMC_DEBUG 1510 unsigned long flags; 1511 spin_lock_irqsave(&host->lock, flags); 1512 host->removed = 1; 1513 spin_unlock_irqrestore(&host->lock, flags); 1514#endif 1515 1516 if (host->caps & MMC_CAP_DISABLE) 1517 cancel_delayed_work(&host->disable); 1518 cancel_delayed_work(&host->detect); 1519 mmc_flush_scheduled_work(); 1520 1521 /* clear pm flags now and let card drivers set them as needed */ 1522 host->pm_flags = 0; 1523 1524 mmc_bus_get(host); 1525 if (host->bus_ops && !host->bus_dead) { 1526 if (host->bus_ops->remove) 1527 host->bus_ops->remove(host); 1528 1529 mmc_claim_host(host); 1530 mmc_detach_bus(host); 1531 mmc_release_host(host); 1532 mmc_bus_put(host); 1533 return; 1534 } 1535 mmc_bus_put(host); 1536 1537 BUG_ON(host->card); 1538 1539 mmc_power_off(host); 1540} 1541 1542void mmc_power_save_host(struct mmc_host *host) 1543{ 1544 mmc_bus_get(host); 1545 1546 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1547 mmc_bus_put(host); 1548 return; 1549 } 1550 1551 if (host->bus_ops->power_save) 1552 host->bus_ops->power_save(host); 1553 1554 mmc_bus_put(host); 1555 1556 mmc_power_off(host); 1557} 1558EXPORT_SYMBOL(mmc_power_save_host); 1559 1560void mmc_power_restore_host(struct mmc_host *host) 1561{ 1562 mmc_bus_get(host); 1563 1564 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1565 mmc_bus_put(host); 1566 return; 1567 } 1568 1569 mmc_power_up(host); 1570 host->bus_ops->power_restore(host); 1571 1572 mmc_bus_put(host); 1573} 1574EXPORT_SYMBOL(mmc_power_restore_host); 1575 1576int mmc_card_awake(struct mmc_host *host) 1577{ 1578 int err = -ENOSYS; 1579 1580 mmc_bus_get(host); 1581 1582 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1583 err = host->bus_ops->awake(host); 1584 1585 mmc_bus_put(host); 1586 1587 return err; 1588} 1589EXPORT_SYMBOL(mmc_card_awake); 1590 1591int mmc_card_sleep(struct mmc_host *host) 1592{ 1593 int err = -ENOSYS; 1594 1595 mmc_bus_get(host); 1596 1597 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1598 err = host->bus_ops->sleep(host); 1599 1600 mmc_bus_put(host); 1601 1602 return err; 1603} 1604EXPORT_SYMBOL(mmc_card_sleep); 1605 1606int mmc_card_can_sleep(struct mmc_host *host) 1607{ 1608 struct mmc_card *card = host->card; 1609 1610 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 1611 return 1; 1612 return 0; 1613} 1614EXPORT_SYMBOL(mmc_card_can_sleep); 1615 1616#ifdef CONFIG_PM 1617 1618/** 1619 * mmc_suspend_host - suspend a host 1620 * @host: mmc host 1621 */ 1622int mmc_suspend_host(struct mmc_host *host) 1623{ 1624 int err = 0; 1625 1626 if (host->caps & MMC_CAP_DISABLE) 1627 cancel_delayed_work(&host->disable); 1628 cancel_delayed_work(&host->detect); 1629 mmc_flush_scheduled_work(); 1630 1631 mmc_bus_get(host); 1632 if (host->bus_ops && !host->bus_dead) { 1633 if (host->bus_ops->suspend) 1634 err = host->bus_ops->suspend(host); 1635 if (err == -ENOSYS || !host->bus_ops->resume) { 1636 /* 1637 * We simply "remove" the card in this case. 1638 * It will be redetected on resume. 1639 */ 1640 if (host->bus_ops->remove) 1641 host->bus_ops->remove(host); 1642 mmc_claim_host(host); 1643 mmc_detach_bus(host); 1644 mmc_release_host(host); 1645 host->pm_flags = 0; 1646 err = 0; 1647 } 1648 } 1649 mmc_bus_put(host); 1650 1651 if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER)) 1652 mmc_power_off(host); 1653 1654 return err; 1655} 1656 1657EXPORT_SYMBOL(mmc_suspend_host); 1658 1659/** 1660 * mmc_resume_host - resume a previously suspended host 1661 * @host: mmc host 1662 */ 1663int mmc_resume_host(struct mmc_host *host) 1664{ 1665 int err = 0; 1666 1667 mmc_bus_get(host); 1668 if (host->bus_ops && !host->bus_dead) { 1669 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { 1670 mmc_power_up(host); 1671 mmc_select_voltage(host, host->ocr); 1672 } 1673 BUG_ON(!host->bus_ops->resume); 1674 err = host->bus_ops->resume(host); 1675 if (err) { 1676 printk(KERN_WARNING "%s: error %d during resume " 1677 "(card was removed?)\n", 1678 mmc_hostname(host), err); 1679 err = 0; 1680 } 1681 } 1682 mmc_bus_put(host); 1683 1684 return err; 1685} 1686EXPORT_SYMBOL(mmc_resume_host); 1687 1688/* Do the card removal on suspend if card is assumed removeable 1689 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 1690 to sync the card. 1691*/ 1692int mmc_pm_notify(struct notifier_block *notify_block, 1693 unsigned long mode, void *unused) 1694{ 1695 struct mmc_host *host = container_of( 1696 notify_block, struct mmc_host, pm_notify); 1697 unsigned long flags; 1698 1699 1700 switch (mode) { 1701 case PM_HIBERNATION_PREPARE: 1702 case PM_SUSPEND_PREPARE: 1703 1704 spin_lock_irqsave(&host->lock, flags); 1705 host->rescan_disable = 1; 1706 spin_unlock_irqrestore(&host->lock, flags); 1707 cancel_delayed_work_sync(&host->detect); 1708 1709 if (!host->bus_ops || host->bus_ops->suspend) 1710 break; 1711 1712 mmc_claim_host(host); 1713 1714 if (host->bus_ops->remove) 1715 host->bus_ops->remove(host); 1716 1717 mmc_detach_bus(host); 1718 mmc_release_host(host); 1719 host->pm_flags = 0; 1720 break; 1721 1722 case PM_POST_SUSPEND: 1723 case PM_POST_HIBERNATION: 1724 1725 spin_lock_irqsave(&host->lock, flags); 1726 host->rescan_disable = 0; 1727 spin_unlock_irqrestore(&host->lock, flags); 1728 mmc_detect_change(host, 0); 1729 1730 } 1731 1732 return 0; 1733} 1734#endif 1735 1736static int __init mmc_init(void) 1737{ 1738 int ret; 1739 1740 workqueue = create_singlethread_workqueue("kmmcd"); 1741 if (!workqueue) 1742 return -ENOMEM; 1743 1744 ret = mmc_register_bus(); 1745 if (ret) 1746 goto destroy_workqueue; 1747 1748 ret = mmc_register_host_class(); 1749 if (ret) 1750 goto unregister_bus; 1751 1752 ret = sdio_register_bus(); 1753 if (ret) 1754 goto unregister_host_class; 1755 1756 return 0; 1757 1758unregister_host_class: 1759 mmc_unregister_host_class(); 1760unregister_bus: 1761 mmc_unregister_bus(); 1762destroy_workqueue: 1763 destroy_workqueue(workqueue); 1764 1765 return ret; 1766} 1767 1768static void __exit mmc_exit(void) 1769{ 1770 sdio_unregister_bus(); 1771 mmc_unregister_host_class(); 1772 mmc_unregister_bus(); 1773 destroy_workqueue(workqueue); 1774} 1775 1776subsys_initcall(mmc_init); 1777module_exit(mmc_exit); 1778 1779MODULE_LICENSE("GPL"); 1780