core.c revision 807e8e40673d9628fa7dcdd14423424b4ee5f43b
1/* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13#include <linux/module.h> 14#include <linux/init.h> 15#include <linux/interrupt.h> 16#include <linux/completion.h> 17#include <linux/device.h> 18#include <linux/delay.h> 19#include <linux/pagemap.h> 20#include <linux/err.h> 21#include <linux/leds.h> 22#include <linux/scatterlist.h> 23#include <linux/log2.h> 24#include <linux/regulator/consumer.h> 25#include <linux/pm_runtime.h> 26 27#include <linux/mmc/card.h> 28#include <linux/mmc/host.h> 29#include <linux/mmc/mmc.h> 30#include <linux/mmc/sd.h> 31 32#include "core.h" 33#include "bus.h" 34#include "host.h" 35#include "sdio_bus.h" 36 37#include "mmc_ops.h" 38#include "sd_ops.h" 39#include "sdio_ops.h" 40 41static struct workqueue_struct *workqueue; 42 43/* 44 * Enabling software CRCs on the data blocks can be a significant (30%) 45 * performance cost, and for other reasons may not always be desired. 46 * So we allow it it to be disabled. 47 */ 48int use_spi_crc = 1; 49module_param(use_spi_crc, bool, 0); 50 51/* 52 * We normally treat cards as removed during suspend if they are not 53 * known to be on a non-removable bus, to avoid the risk of writing 54 * back data to a different card after resume. Allow this to be 55 * overridden if necessary. 56 */ 57#ifdef CONFIG_MMC_UNSAFE_RESUME 58int mmc_assume_removable; 59#else 60int mmc_assume_removable = 1; 61#endif 62EXPORT_SYMBOL(mmc_assume_removable); 63module_param_named(removable, mmc_assume_removable, bool, 0644); 64MODULE_PARM_DESC( 65 removable, 66 "MMC/SD cards are removable and may be removed during suspend"); 67 68/* 69 * Internal function. Schedule delayed work in the MMC work queue. 70 */ 71static int mmc_schedule_delayed_work(struct delayed_work *work, 72 unsigned long delay) 73{ 74 return queue_delayed_work(workqueue, work, delay); 75} 76 77/* 78 * Internal function. Flush all scheduled work from the MMC work queue. 79 */ 80static void mmc_flush_scheduled_work(void) 81{ 82 flush_workqueue(workqueue); 83} 84 85/** 86 * mmc_request_done - finish processing an MMC request 87 * @host: MMC host which completed request 88 * @mrq: MMC request which request 89 * 90 * MMC drivers should call this function when they have completed 91 * their processing of a request. 92 */ 93void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 94{ 95 struct mmc_command *cmd = mrq->cmd; 96 int err = cmd->error; 97 98 if (err && cmd->retries && mmc_host_is_spi(host)) { 99 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 100 cmd->retries = 0; 101 } 102 103 if (err && cmd->retries) { 104 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 105 mmc_hostname(host), cmd->opcode, err); 106 107 cmd->retries--; 108 cmd->error = 0; 109 host->ops->request(host, mrq); 110 } else { 111 led_trigger_event(host->led, LED_OFF); 112 113 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 114 mmc_hostname(host), cmd->opcode, err, 115 cmd->resp[0], cmd->resp[1], 116 cmd->resp[2], cmd->resp[3]); 117 118 if (mrq->data) { 119 pr_debug("%s: %d bytes transferred: %d\n", 120 mmc_hostname(host), 121 mrq->data->bytes_xfered, mrq->data->error); 122 } 123 124 if (mrq->stop) { 125 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 126 mmc_hostname(host), mrq->stop->opcode, 127 mrq->stop->error, 128 mrq->stop->resp[0], mrq->stop->resp[1], 129 mrq->stop->resp[2], mrq->stop->resp[3]); 130 } 131 132 if (mrq->done) 133 mrq->done(mrq); 134 135 mmc_host_clk_gate(host); 136 } 137} 138 139EXPORT_SYMBOL(mmc_request_done); 140 141static void 142mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 143{ 144#ifdef CONFIG_MMC_DEBUG 145 unsigned int i, sz; 146 struct scatterlist *sg; 147#endif 148 149 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 150 mmc_hostname(host), mrq->cmd->opcode, 151 mrq->cmd->arg, mrq->cmd->flags); 152 153 if (mrq->data) { 154 pr_debug("%s: blksz %d blocks %d flags %08x " 155 "tsac %d ms nsac %d\n", 156 mmc_hostname(host), mrq->data->blksz, 157 mrq->data->blocks, mrq->data->flags, 158 mrq->data->timeout_ns / 1000000, 159 mrq->data->timeout_clks); 160 } 161 162 if (mrq->stop) { 163 pr_debug("%s: CMD%u arg %08x flags %08x\n", 164 mmc_hostname(host), mrq->stop->opcode, 165 mrq->stop->arg, mrq->stop->flags); 166 } 167 168 WARN_ON(!host->claimed); 169 170 led_trigger_event(host->led, LED_FULL); 171 172 mrq->cmd->error = 0; 173 mrq->cmd->mrq = mrq; 174 if (mrq->data) { 175 BUG_ON(mrq->data->blksz > host->max_blk_size); 176 BUG_ON(mrq->data->blocks > host->max_blk_count); 177 BUG_ON(mrq->data->blocks * mrq->data->blksz > 178 host->max_req_size); 179 180#ifdef CONFIG_MMC_DEBUG 181 sz = 0; 182 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 183 sz += sg->length; 184 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 185#endif 186 187 mrq->cmd->data = mrq->data; 188 mrq->data->error = 0; 189 mrq->data->mrq = mrq; 190 if (mrq->stop) { 191 mrq->data->stop = mrq->stop; 192 mrq->stop->error = 0; 193 mrq->stop->mrq = mrq; 194 } 195 } 196 mmc_host_clk_ungate(host); 197 host->ops->request(host, mrq); 198} 199 200static void mmc_wait_done(struct mmc_request *mrq) 201{ 202 complete(mrq->done_data); 203} 204 205/** 206 * mmc_wait_for_req - start a request and wait for completion 207 * @host: MMC host to start command 208 * @mrq: MMC request to start 209 * 210 * Start a new MMC custom command request for a host, and wait 211 * for the command to complete. Does not attempt to parse the 212 * response. 213 */ 214void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 215{ 216 DECLARE_COMPLETION_ONSTACK(complete); 217 218 mrq->done_data = &complete; 219 mrq->done = mmc_wait_done; 220 221 mmc_start_request(host, mrq); 222 223 wait_for_completion(&complete); 224} 225 226EXPORT_SYMBOL(mmc_wait_for_req); 227 228/** 229 * mmc_wait_for_cmd - start a command and wait for completion 230 * @host: MMC host to start command 231 * @cmd: MMC command to start 232 * @retries: maximum number of retries 233 * 234 * Start a new MMC command for a host, and wait for the command 235 * to complete. Return any error that occurred while the command 236 * was executing. Do not attempt to parse the response. 237 */ 238int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 239{ 240 struct mmc_request mrq; 241 242 WARN_ON(!host->claimed); 243 244 memset(&mrq, 0, sizeof(struct mmc_request)); 245 246 memset(cmd->resp, 0, sizeof(cmd->resp)); 247 cmd->retries = retries; 248 249 mrq.cmd = cmd; 250 cmd->data = NULL; 251 252 mmc_wait_for_req(host, &mrq); 253 254 return cmd->error; 255} 256 257EXPORT_SYMBOL(mmc_wait_for_cmd); 258 259/** 260 * mmc_set_data_timeout - set the timeout for a data command 261 * @data: data phase for command 262 * @card: the MMC card associated with the data transfer 263 * 264 * Computes the data timeout parameters according to the 265 * correct algorithm given the card type. 266 */ 267void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 268{ 269 unsigned int mult; 270 271 /* 272 * SDIO cards only define an upper 1 s limit on access. 273 */ 274 if (mmc_card_sdio(card)) { 275 data->timeout_ns = 1000000000; 276 data->timeout_clks = 0; 277 return; 278 } 279 280 /* 281 * SD cards use a 100 multiplier rather than 10 282 */ 283 mult = mmc_card_sd(card) ? 100 : 10; 284 285 /* 286 * Scale up the multiplier (and therefore the timeout) by 287 * the r2w factor for writes. 288 */ 289 if (data->flags & MMC_DATA_WRITE) 290 mult <<= card->csd.r2w_factor; 291 292 data->timeout_ns = card->csd.tacc_ns * mult; 293 data->timeout_clks = card->csd.tacc_clks * mult; 294 295 /* 296 * SD cards also have an upper limit on the timeout. 297 */ 298 if (mmc_card_sd(card)) { 299 unsigned int timeout_us, limit_us; 300 301 timeout_us = data->timeout_ns / 1000; 302 timeout_us += data->timeout_clks * 1000 / 303 (mmc_host_clk_rate(card->host) / 1000); 304 305 if (data->flags & MMC_DATA_WRITE) 306 /* 307 * The limit is really 250 ms, but that is 308 * insufficient for some crappy cards. 309 */ 310 limit_us = 300000; 311 else 312 limit_us = 100000; 313 314 /* 315 * SDHC cards always use these fixed values. 316 */ 317 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 318 data->timeout_ns = limit_us * 1000; 319 data->timeout_clks = 0; 320 } 321 } 322 /* 323 * Some cards need very high timeouts if driven in SPI mode. 324 * The worst observed timeout was 900ms after writing a 325 * continuous stream of data until the internal logic 326 * overflowed. 327 */ 328 if (mmc_host_is_spi(card->host)) { 329 if (data->flags & MMC_DATA_WRITE) { 330 if (data->timeout_ns < 1000000000) 331 data->timeout_ns = 1000000000; /* 1s */ 332 } else { 333 if (data->timeout_ns < 100000000) 334 data->timeout_ns = 100000000; /* 100ms */ 335 } 336 } 337} 338EXPORT_SYMBOL(mmc_set_data_timeout); 339 340/** 341 * mmc_align_data_size - pads a transfer size to a more optimal value 342 * @card: the MMC card associated with the data transfer 343 * @sz: original transfer size 344 * 345 * Pads the original data size with a number of extra bytes in 346 * order to avoid controller bugs and/or performance hits 347 * (e.g. some controllers revert to PIO for certain sizes). 348 * 349 * Returns the improved size, which might be unmodified. 350 * 351 * Note that this function is only relevant when issuing a 352 * single scatter gather entry. 353 */ 354unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 355{ 356 /* 357 * FIXME: We don't have a system for the controller to tell 358 * the core about its problems yet, so for now we just 32-bit 359 * align the size. 360 */ 361 sz = ((sz + 3) / 4) * 4; 362 363 return sz; 364} 365EXPORT_SYMBOL(mmc_align_data_size); 366 367/** 368 * mmc_host_enable - enable a host. 369 * @host: mmc host to enable 370 * 371 * Hosts that support power saving can use the 'enable' and 'disable' 372 * methods to exit and enter power saving states. For more information 373 * see comments for struct mmc_host_ops. 374 */ 375int mmc_host_enable(struct mmc_host *host) 376{ 377 if (!(host->caps & MMC_CAP_DISABLE)) 378 return 0; 379 380 if (host->en_dis_recurs) 381 return 0; 382 383 if (host->nesting_cnt++) 384 return 0; 385 386 cancel_delayed_work_sync(&host->disable); 387 388 if (host->enabled) 389 return 0; 390 391 if (host->ops->enable) { 392 int err; 393 394 host->en_dis_recurs = 1; 395 err = host->ops->enable(host); 396 host->en_dis_recurs = 0; 397 398 if (err) { 399 pr_debug("%s: enable error %d\n", 400 mmc_hostname(host), err); 401 return err; 402 } 403 } 404 host->enabled = 1; 405 return 0; 406} 407EXPORT_SYMBOL(mmc_host_enable); 408 409static int mmc_host_do_disable(struct mmc_host *host, int lazy) 410{ 411 if (host->ops->disable) { 412 int err; 413 414 host->en_dis_recurs = 1; 415 err = host->ops->disable(host, lazy); 416 host->en_dis_recurs = 0; 417 418 if (err < 0) { 419 pr_debug("%s: disable error %d\n", 420 mmc_hostname(host), err); 421 return err; 422 } 423 if (err > 0) { 424 unsigned long delay = msecs_to_jiffies(err); 425 426 mmc_schedule_delayed_work(&host->disable, delay); 427 } 428 } 429 host->enabled = 0; 430 return 0; 431} 432 433/** 434 * mmc_host_disable - disable a host. 435 * @host: mmc host to disable 436 * 437 * Hosts that support power saving can use the 'enable' and 'disable' 438 * methods to exit and enter power saving states. For more information 439 * see comments for struct mmc_host_ops. 440 */ 441int mmc_host_disable(struct mmc_host *host) 442{ 443 int err; 444 445 if (!(host->caps & MMC_CAP_DISABLE)) 446 return 0; 447 448 if (host->en_dis_recurs) 449 return 0; 450 451 if (--host->nesting_cnt) 452 return 0; 453 454 if (!host->enabled) 455 return 0; 456 457 err = mmc_host_do_disable(host, 0); 458 return err; 459} 460EXPORT_SYMBOL(mmc_host_disable); 461 462/** 463 * __mmc_claim_host - exclusively claim a host 464 * @host: mmc host to claim 465 * @abort: whether or not the operation should be aborted 466 * 467 * Claim a host for a set of operations. If @abort is non null and 468 * dereference a non-zero value then this will return prematurely with 469 * that non-zero value without acquiring the lock. Returns zero 470 * with the lock held otherwise. 471 */ 472int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 473{ 474 DECLARE_WAITQUEUE(wait, current); 475 unsigned long flags; 476 int stop; 477 478 might_sleep(); 479 480 add_wait_queue(&host->wq, &wait); 481 spin_lock_irqsave(&host->lock, flags); 482 while (1) { 483 set_current_state(TASK_UNINTERRUPTIBLE); 484 stop = abort ? atomic_read(abort) : 0; 485 if (stop || !host->claimed || host->claimer == current) 486 break; 487 spin_unlock_irqrestore(&host->lock, flags); 488 schedule(); 489 spin_lock_irqsave(&host->lock, flags); 490 } 491 set_current_state(TASK_RUNNING); 492 if (!stop) { 493 host->claimed = 1; 494 host->claimer = current; 495 host->claim_cnt += 1; 496 } else 497 wake_up(&host->wq); 498 spin_unlock_irqrestore(&host->lock, flags); 499 remove_wait_queue(&host->wq, &wait); 500 if (!stop) 501 mmc_host_enable(host); 502 return stop; 503} 504 505EXPORT_SYMBOL(__mmc_claim_host); 506 507/** 508 * mmc_try_claim_host - try exclusively to claim a host 509 * @host: mmc host to claim 510 * 511 * Returns %1 if the host is claimed, %0 otherwise. 512 */ 513int mmc_try_claim_host(struct mmc_host *host) 514{ 515 int claimed_host = 0; 516 unsigned long flags; 517 518 spin_lock_irqsave(&host->lock, flags); 519 if (!host->claimed || host->claimer == current) { 520 host->claimed = 1; 521 host->claimer = current; 522 host->claim_cnt += 1; 523 claimed_host = 1; 524 } 525 spin_unlock_irqrestore(&host->lock, flags); 526 return claimed_host; 527} 528EXPORT_SYMBOL(mmc_try_claim_host); 529 530static void mmc_do_release_host(struct mmc_host *host) 531{ 532 unsigned long flags; 533 534 spin_lock_irqsave(&host->lock, flags); 535 if (--host->claim_cnt) { 536 /* Release for nested claim */ 537 spin_unlock_irqrestore(&host->lock, flags); 538 } else { 539 host->claimed = 0; 540 host->claimer = NULL; 541 spin_unlock_irqrestore(&host->lock, flags); 542 wake_up(&host->wq); 543 } 544} 545 546void mmc_host_deeper_disable(struct work_struct *work) 547{ 548 struct mmc_host *host = 549 container_of(work, struct mmc_host, disable.work); 550 551 /* If the host is claimed then we do not want to disable it anymore */ 552 if (!mmc_try_claim_host(host)) 553 return; 554 mmc_host_do_disable(host, 1); 555 mmc_do_release_host(host); 556} 557 558/** 559 * mmc_host_lazy_disable - lazily disable a host. 560 * @host: mmc host to disable 561 * 562 * Hosts that support power saving can use the 'enable' and 'disable' 563 * methods to exit and enter power saving states. For more information 564 * see comments for struct mmc_host_ops. 565 */ 566int mmc_host_lazy_disable(struct mmc_host *host) 567{ 568 if (!(host->caps & MMC_CAP_DISABLE)) 569 return 0; 570 571 if (host->en_dis_recurs) 572 return 0; 573 574 if (--host->nesting_cnt) 575 return 0; 576 577 if (!host->enabled) 578 return 0; 579 580 if (host->disable_delay) { 581 mmc_schedule_delayed_work(&host->disable, 582 msecs_to_jiffies(host->disable_delay)); 583 return 0; 584 } else 585 return mmc_host_do_disable(host, 1); 586} 587EXPORT_SYMBOL(mmc_host_lazy_disable); 588 589/** 590 * mmc_release_host - release a host 591 * @host: mmc host to release 592 * 593 * Release a MMC host, allowing others to claim the host 594 * for their operations. 595 */ 596void mmc_release_host(struct mmc_host *host) 597{ 598 WARN_ON(!host->claimed); 599 600 mmc_host_lazy_disable(host); 601 602 mmc_do_release_host(host); 603} 604 605EXPORT_SYMBOL(mmc_release_host); 606 607/* 608 * Internal function that does the actual ios call to the host driver, 609 * optionally printing some debug output. 610 */ 611static inline void mmc_set_ios(struct mmc_host *host) 612{ 613 struct mmc_ios *ios = &host->ios; 614 615 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 616 "width %u timing %u\n", 617 mmc_hostname(host), ios->clock, ios->bus_mode, 618 ios->power_mode, ios->chip_select, ios->vdd, 619 ios->bus_width, ios->timing); 620 621 if (ios->clock > 0) 622 mmc_set_ungated(host); 623 host->ops->set_ios(host, ios); 624} 625 626/* 627 * Control chip select pin on a host. 628 */ 629void mmc_set_chip_select(struct mmc_host *host, int mode) 630{ 631 host->ios.chip_select = mode; 632 mmc_set_ios(host); 633} 634 635/* 636 * Sets the host clock to the highest possible frequency that 637 * is below "hz". 638 */ 639void mmc_set_clock(struct mmc_host *host, unsigned int hz) 640{ 641 WARN_ON(hz < host->f_min); 642 643 if (hz > host->f_max) 644 hz = host->f_max; 645 646 host->ios.clock = hz; 647 mmc_set_ios(host); 648} 649 650#ifdef CONFIG_MMC_CLKGATE 651/* 652 * This gates the clock by setting it to 0 Hz. 653 */ 654void mmc_gate_clock(struct mmc_host *host) 655{ 656 unsigned long flags; 657 658 spin_lock_irqsave(&host->clk_lock, flags); 659 host->clk_old = host->ios.clock; 660 host->ios.clock = 0; 661 host->clk_gated = true; 662 spin_unlock_irqrestore(&host->clk_lock, flags); 663 mmc_set_ios(host); 664} 665 666/* 667 * This restores the clock from gating by using the cached 668 * clock value. 669 */ 670void mmc_ungate_clock(struct mmc_host *host) 671{ 672 /* 673 * We should previously have gated the clock, so the clock shall 674 * be 0 here! The clock may however be 0 during initialization, 675 * when some request operations are performed before setting 676 * the frequency. When ungate is requested in that situation 677 * we just ignore the call. 678 */ 679 if (host->clk_old) { 680 BUG_ON(host->ios.clock); 681 /* This call will also set host->clk_gated to false */ 682 mmc_set_clock(host, host->clk_old); 683 } 684} 685 686void mmc_set_ungated(struct mmc_host *host) 687{ 688 unsigned long flags; 689 690 /* 691 * We've been given a new frequency while the clock is gated, 692 * so make sure we regard this as ungating it. 693 */ 694 spin_lock_irqsave(&host->clk_lock, flags); 695 host->clk_gated = false; 696 spin_unlock_irqrestore(&host->clk_lock, flags); 697} 698 699#else 700void mmc_set_ungated(struct mmc_host *host) 701{ 702} 703#endif 704 705/* 706 * Change the bus mode (open drain/push-pull) of a host. 707 */ 708void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 709{ 710 host->ios.bus_mode = mode; 711 mmc_set_ios(host); 712} 713 714/* 715 * Change data bus width and DDR mode of a host. 716 */ 717void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width, 718 unsigned int ddr) 719{ 720 host->ios.bus_width = width; 721 host->ios.ddr = ddr; 722 mmc_set_ios(host); 723} 724 725/* 726 * Change data bus width of a host. 727 */ 728void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 729{ 730 mmc_set_bus_width_ddr(host, width, MMC_SDR_MODE); 731} 732 733/** 734 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 735 * @vdd: voltage (mV) 736 * @low_bits: prefer low bits in boundary cases 737 * 738 * This function returns the OCR bit number according to the provided @vdd 739 * value. If conversion is not possible a negative errno value returned. 740 * 741 * Depending on the @low_bits flag the function prefers low or high OCR bits 742 * on boundary voltages. For example, 743 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 744 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 745 * 746 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 747 */ 748static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 749{ 750 const int max_bit = ilog2(MMC_VDD_35_36); 751 int bit; 752 753 if (vdd < 1650 || vdd > 3600) 754 return -EINVAL; 755 756 if (vdd >= 1650 && vdd <= 1950) 757 return ilog2(MMC_VDD_165_195); 758 759 if (low_bits) 760 vdd -= 1; 761 762 /* Base 2000 mV, step 100 mV, bit's base 8. */ 763 bit = (vdd - 2000) / 100 + 8; 764 if (bit > max_bit) 765 return max_bit; 766 return bit; 767} 768 769/** 770 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 771 * @vdd_min: minimum voltage value (mV) 772 * @vdd_max: maximum voltage value (mV) 773 * 774 * This function returns the OCR mask bits according to the provided @vdd_min 775 * and @vdd_max values. If conversion is not possible the function returns 0. 776 * 777 * Notes wrt boundary cases: 778 * This function sets the OCR bits for all boundary voltages, for example 779 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 780 * MMC_VDD_34_35 mask. 781 */ 782u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 783{ 784 u32 mask = 0; 785 786 if (vdd_max < vdd_min) 787 return 0; 788 789 /* Prefer high bits for the boundary vdd_max values. */ 790 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 791 if (vdd_max < 0) 792 return 0; 793 794 /* Prefer low bits for the boundary vdd_min values. */ 795 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 796 if (vdd_min < 0) 797 return 0; 798 799 /* Fill the mask, from max bit to min bit. */ 800 while (vdd_max >= vdd_min) 801 mask |= 1 << vdd_max--; 802 803 return mask; 804} 805EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 806 807#ifdef CONFIG_REGULATOR 808 809/** 810 * mmc_regulator_get_ocrmask - return mask of supported voltages 811 * @supply: regulator to use 812 * 813 * This returns either a negative errno, or a mask of voltages that 814 * can be provided to MMC/SD/SDIO devices using the specified voltage 815 * regulator. This would normally be called before registering the 816 * MMC host adapter. 817 */ 818int mmc_regulator_get_ocrmask(struct regulator *supply) 819{ 820 int result = 0; 821 int count; 822 int i; 823 824 count = regulator_count_voltages(supply); 825 if (count < 0) 826 return count; 827 828 for (i = 0; i < count; i++) { 829 int vdd_uV; 830 int vdd_mV; 831 832 vdd_uV = regulator_list_voltage(supply, i); 833 if (vdd_uV <= 0) 834 continue; 835 836 vdd_mV = vdd_uV / 1000; 837 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 838 } 839 840 return result; 841} 842EXPORT_SYMBOL(mmc_regulator_get_ocrmask); 843 844/** 845 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 846 * @mmc: the host to regulate 847 * @supply: regulator to use 848 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 849 * 850 * Returns zero on success, else negative errno. 851 * 852 * MMC host drivers may use this to enable or disable a regulator using 853 * a particular supply voltage. This would normally be called from the 854 * set_ios() method. 855 */ 856int mmc_regulator_set_ocr(struct mmc_host *mmc, 857 struct regulator *supply, 858 unsigned short vdd_bit) 859{ 860 int result = 0; 861 int min_uV, max_uV; 862 863 if (vdd_bit) { 864 int tmp; 865 int voltage; 866 867 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 868 * bits this regulator doesn't quite support ... don't 869 * be too picky, most cards and regulators are OK with 870 * a 0.1V range goof (it's a small error percentage). 871 */ 872 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 873 if (tmp == 0) { 874 min_uV = 1650 * 1000; 875 max_uV = 1950 * 1000; 876 } else { 877 min_uV = 1900 * 1000 + tmp * 100 * 1000; 878 max_uV = min_uV + 100 * 1000; 879 } 880 881 /* avoid needless changes to this voltage; the regulator 882 * might not allow this operation 883 */ 884 voltage = regulator_get_voltage(supply); 885 if (voltage < 0) 886 result = voltage; 887 else if (voltage < min_uV || voltage > max_uV) 888 result = regulator_set_voltage(supply, min_uV, max_uV); 889 else 890 result = 0; 891 892 if (result == 0 && !mmc->regulator_enabled) { 893 result = regulator_enable(supply); 894 if (!result) 895 mmc->regulator_enabled = true; 896 } 897 } else if (mmc->regulator_enabled) { 898 result = regulator_disable(supply); 899 if (result == 0) 900 mmc->regulator_enabled = false; 901 } 902 903 if (result) 904 dev_err(mmc_dev(mmc), 905 "could not set regulator OCR (%d)\n", result); 906 return result; 907} 908EXPORT_SYMBOL(mmc_regulator_set_ocr); 909 910#endif /* CONFIG_REGULATOR */ 911 912/* 913 * Mask off any voltages we don't support and select 914 * the lowest voltage 915 */ 916u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 917{ 918 int bit; 919 920 ocr &= host->ocr_avail; 921 922 bit = ffs(ocr); 923 if (bit) { 924 bit -= 1; 925 926 ocr &= 3 << bit; 927 928 host->ios.vdd = bit; 929 mmc_set_ios(host); 930 } else { 931 pr_warning("%s: host doesn't support card's voltages\n", 932 mmc_hostname(host)); 933 ocr = 0; 934 } 935 936 return ocr; 937} 938 939/* 940 * Select timing parameters for host. 941 */ 942void mmc_set_timing(struct mmc_host *host, unsigned int timing) 943{ 944 host->ios.timing = timing; 945 mmc_set_ios(host); 946} 947 948/* 949 * Apply power to the MMC stack. This is a two-stage process. 950 * First, we enable power to the card without the clock running. 951 * We then wait a bit for the power to stabilise. Finally, 952 * enable the bus drivers and clock to the card. 953 * 954 * We must _NOT_ enable the clock prior to power stablising. 955 * 956 * If a host does all the power sequencing itself, ignore the 957 * initial MMC_POWER_UP stage. 958 */ 959static void mmc_power_up(struct mmc_host *host) 960{ 961 int bit; 962 963 /* If ocr is set, we use it */ 964 if (host->ocr) 965 bit = ffs(host->ocr) - 1; 966 else 967 bit = fls(host->ocr_avail) - 1; 968 969 host->ios.vdd = bit; 970 if (mmc_host_is_spi(host)) { 971 host->ios.chip_select = MMC_CS_HIGH; 972 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 973 } else { 974 host->ios.chip_select = MMC_CS_DONTCARE; 975 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 976 } 977 host->ios.power_mode = MMC_POWER_UP; 978 host->ios.bus_width = MMC_BUS_WIDTH_1; 979 host->ios.timing = MMC_TIMING_LEGACY; 980 mmc_set_ios(host); 981 982 /* 983 * This delay should be sufficient to allow the power supply 984 * to reach the minimum voltage. 985 */ 986 mmc_delay(10); 987 988 host->ios.clock = host->f_init; 989 990 host->ios.power_mode = MMC_POWER_ON; 991 mmc_set_ios(host); 992 993 /* 994 * This delay must be at least 74 clock sizes, or 1 ms, or the 995 * time required to reach a stable voltage. 996 */ 997 mmc_delay(10); 998} 999 1000static void mmc_power_off(struct mmc_host *host) 1001{ 1002 host->ios.clock = 0; 1003 host->ios.vdd = 0; 1004 if (!mmc_host_is_spi(host)) { 1005 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1006 host->ios.chip_select = MMC_CS_DONTCARE; 1007 } 1008 host->ios.power_mode = MMC_POWER_OFF; 1009 host->ios.bus_width = MMC_BUS_WIDTH_1; 1010 host->ios.timing = MMC_TIMING_LEGACY; 1011 mmc_set_ios(host); 1012} 1013 1014/* 1015 * Cleanup when the last reference to the bus operator is dropped. 1016 */ 1017static void __mmc_release_bus(struct mmc_host *host) 1018{ 1019 BUG_ON(!host); 1020 BUG_ON(host->bus_refs); 1021 BUG_ON(!host->bus_dead); 1022 1023 host->bus_ops = NULL; 1024} 1025 1026/* 1027 * Increase reference count of bus operator 1028 */ 1029static inline void mmc_bus_get(struct mmc_host *host) 1030{ 1031 unsigned long flags; 1032 1033 spin_lock_irqsave(&host->lock, flags); 1034 host->bus_refs++; 1035 spin_unlock_irqrestore(&host->lock, flags); 1036} 1037 1038/* 1039 * Decrease reference count of bus operator and free it if 1040 * it is the last reference. 1041 */ 1042static inline void mmc_bus_put(struct mmc_host *host) 1043{ 1044 unsigned long flags; 1045 1046 spin_lock_irqsave(&host->lock, flags); 1047 host->bus_refs--; 1048 if ((host->bus_refs == 0) && host->bus_ops) 1049 __mmc_release_bus(host); 1050 spin_unlock_irqrestore(&host->lock, flags); 1051} 1052 1053/* 1054 * Assign a mmc bus handler to a host. Only one bus handler may control a 1055 * host at any given time. 1056 */ 1057void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1058{ 1059 unsigned long flags; 1060 1061 BUG_ON(!host); 1062 BUG_ON(!ops); 1063 1064 WARN_ON(!host->claimed); 1065 1066 spin_lock_irqsave(&host->lock, flags); 1067 1068 BUG_ON(host->bus_ops); 1069 BUG_ON(host->bus_refs); 1070 1071 host->bus_ops = ops; 1072 host->bus_refs = 1; 1073 host->bus_dead = 0; 1074 1075 spin_unlock_irqrestore(&host->lock, flags); 1076} 1077 1078/* 1079 * Remove the current bus handler from a host. Assumes that there are 1080 * no interesting cards left, so the bus is powered down. 1081 */ 1082void mmc_detach_bus(struct mmc_host *host) 1083{ 1084 unsigned long flags; 1085 1086 BUG_ON(!host); 1087 1088 WARN_ON(!host->claimed); 1089 WARN_ON(!host->bus_ops); 1090 1091 spin_lock_irqsave(&host->lock, flags); 1092 1093 host->bus_dead = 1; 1094 1095 spin_unlock_irqrestore(&host->lock, flags); 1096 1097 mmc_power_off(host); 1098 1099 mmc_bus_put(host); 1100} 1101 1102/** 1103 * mmc_detect_change - process change of state on a MMC socket 1104 * @host: host which changed state. 1105 * @delay: optional delay to wait before detection (jiffies) 1106 * 1107 * MMC drivers should call this when they detect a card has been 1108 * inserted or removed. The MMC layer will confirm that any 1109 * present card is still functional, and initialize any newly 1110 * inserted. 1111 */ 1112void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1113{ 1114#ifdef CONFIG_MMC_DEBUG 1115 unsigned long flags; 1116 spin_lock_irqsave(&host->lock, flags); 1117 WARN_ON(host->removed); 1118 spin_unlock_irqrestore(&host->lock, flags); 1119#endif 1120 1121 mmc_schedule_delayed_work(&host->detect, delay); 1122} 1123 1124EXPORT_SYMBOL(mmc_detect_change); 1125 1126void mmc_init_erase(struct mmc_card *card) 1127{ 1128 unsigned int sz; 1129 1130 if (is_power_of_2(card->erase_size)) 1131 card->erase_shift = ffs(card->erase_size) - 1; 1132 else 1133 card->erase_shift = 0; 1134 1135 /* 1136 * It is possible to erase an arbitrarily large area of an SD or MMC 1137 * card. That is not desirable because it can take a long time 1138 * (minutes) potentially delaying more important I/O, and also the 1139 * timeout calculations become increasingly hugely over-estimated. 1140 * Consequently, 'pref_erase' is defined as a guide to limit erases 1141 * to that size and alignment. 1142 * 1143 * For SD cards that define Allocation Unit size, limit erases to one 1144 * Allocation Unit at a time. For MMC cards that define High Capacity 1145 * Erase Size, whether it is switched on or not, limit to that size. 1146 * Otherwise just have a stab at a good value. For modern cards it 1147 * will end up being 4MiB. Note that if the value is too small, it 1148 * can end up taking longer to erase. 1149 */ 1150 if (mmc_card_sd(card) && card->ssr.au) { 1151 card->pref_erase = card->ssr.au; 1152 card->erase_shift = ffs(card->ssr.au) - 1; 1153 } else if (card->ext_csd.hc_erase_size) { 1154 card->pref_erase = card->ext_csd.hc_erase_size; 1155 } else { 1156 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1157 if (sz < 128) 1158 card->pref_erase = 512 * 1024 / 512; 1159 else if (sz < 512) 1160 card->pref_erase = 1024 * 1024 / 512; 1161 else if (sz < 1024) 1162 card->pref_erase = 2 * 1024 * 1024 / 512; 1163 else 1164 card->pref_erase = 4 * 1024 * 1024 / 512; 1165 if (card->pref_erase < card->erase_size) 1166 card->pref_erase = card->erase_size; 1167 else { 1168 sz = card->pref_erase % card->erase_size; 1169 if (sz) 1170 card->pref_erase += card->erase_size - sz; 1171 } 1172 } 1173} 1174 1175static void mmc_set_mmc_erase_timeout(struct mmc_card *card, 1176 struct mmc_command *cmd, 1177 unsigned int arg, unsigned int qty) 1178{ 1179 unsigned int erase_timeout; 1180 1181 if (card->ext_csd.erase_group_def & 1) { 1182 /* High Capacity Erase Group Size uses HC timeouts */ 1183 if (arg == MMC_TRIM_ARG) 1184 erase_timeout = card->ext_csd.trim_timeout; 1185 else 1186 erase_timeout = card->ext_csd.hc_erase_timeout; 1187 } else { 1188 /* CSD Erase Group Size uses write timeout */ 1189 unsigned int mult = (10 << card->csd.r2w_factor); 1190 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1191 unsigned int timeout_us; 1192 1193 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1194 if (card->csd.tacc_ns < 1000000) 1195 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1196 else 1197 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1198 1199 /* 1200 * ios.clock is only a target. The real clock rate might be 1201 * less but not that much less, so fudge it by multiplying by 2. 1202 */ 1203 timeout_clks <<= 1; 1204 timeout_us += (timeout_clks * 1000) / 1205 (card->host->ios.clock / 1000); 1206 1207 erase_timeout = timeout_us / 1000; 1208 1209 /* 1210 * Theoretically, the calculation could underflow so round up 1211 * to 1ms in that case. 1212 */ 1213 if (!erase_timeout) 1214 erase_timeout = 1; 1215 } 1216 1217 /* Multiplier for secure operations */ 1218 if (arg & MMC_SECURE_ARGS) { 1219 if (arg == MMC_SECURE_ERASE_ARG) 1220 erase_timeout *= card->ext_csd.sec_erase_mult; 1221 else 1222 erase_timeout *= card->ext_csd.sec_trim_mult; 1223 } 1224 1225 erase_timeout *= qty; 1226 1227 /* 1228 * Ensure at least a 1 second timeout for SPI as per 1229 * 'mmc_set_data_timeout()' 1230 */ 1231 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1232 erase_timeout = 1000; 1233 1234 cmd->erase_timeout = erase_timeout; 1235} 1236 1237static void mmc_set_sd_erase_timeout(struct mmc_card *card, 1238 struct mmc_command *cmd, unsigned int arg, 1239 unsigned int qty) 1240{ 1241 if (card->ssr.erase_timeout) { 1242 /* Erase timeout specified in SD Status Register (SSR) */ 1243 cmd->erase_timeout = card->ssr.erase_timeout * qty + 1244 card->ssr.erase_offset; 1245 } else { 1246 /* 1247 * Erase timeout not specified in SD Status Register (SSR) so 1248 * use 250ms per write block. 1249 */ 1250 cmd->erase_timeout = 250 * qty; 1251 } 1252 1253 /* Must not be less than 1 second */ 1254 if (cmd->erase_timeout < 1000) 1255 cmd->erase_timeout = 1000; 1256} 1257 1258static void mmc_set_erase_timeout(struct mmc_card *card, 1259 struct mmc_command *cmd, unsigned int arg, 1260 unsigned int qty) 1261{ 1262 if (mmc_card_sd(card)) 1263 mmc_set_sd_erase_timeout(card, cmd, arg, qty); 1264 else 1265 mmc_set_mmc_erase_timeout(card, cmd, arg, qty); 1266} 1267 1268static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1269 unsigned int to, unsigned int arg) 1270{ 1271 struct mmc_command cmd; 1272 unsigned int qty = 0; 1273 int err; 1274 1275 /* 1276 * qty is used to calculate the erase timeout which depends on how many 1277 * erase groups (or allocation units in SD terminology) are affected. 1278 * We count erasing part of an erase group as one erase group. 1279 * For SD, the allocation units are always a power of 2. For MMC, the 1280 * erase group size is almost certainly also power of 2, but it does not 1281 * seem to insist on that in the JEDEC standard, so we fall back to 1282 * division in that case. SD may not specify an allocation unit size, 1283 * in which case the timeout is based on the number of write blocks. 1284 * 1285 * Note that the timeout for secure trim 2 will only be correct if the 1286 * number of erase groups specified is the same as the total of all 1287 * preceding secure trim 1 commands. Since the power may have been 1288 * lost since the secure trim 1 commands occurred, it is generally 1289 * impossible to calculate the secure trim 2 timeout correctly. 1290 */ 1291 if (card->erase_shift) 1292 qty += ((to >> card->erase_shift) - 1293 (from >> card->erase_shift)) + 1; 1294 else if (mmc_card_sd(card)) 1295 qty += to - from + 1; 1296 else 1297 qty += ((to / card->erase_size) - 1298 (from / card->erase_size)) + 1; 1299 1300 if (!mmc_card_blockaddr(card)) { 1301 from <<= 9; 1302 to <<= 9; 1303 } 1304 1305 memset(&cmd, 0, sizeof(struct mmc_command)); 1306 if (mmc_card_sd(card)) 1307 cmd.opcode = SD_ERASE_WR_BLK_START; 1308 else 1309 cmd.opcode = MMC_ERASE_GROUP_START; 1310 cmd.arg = from; 1311 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1312 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1313 if (err) { 1314 printk(KERN_ERR "mmc_erase: group start error %d, " 1315 "status %#x\n", err, cmd.resp[0]); 1316 err = -EINVAL; 1317 goto out; 1318 } 1319 1320 memset(&cmd, 0, sizeof(struct mmc_command)); 1321 if (mmc_card_sd(card)) 1322 cmd.opcode = SD_ERASE_WR_BLK_END; 1323 else 1324 cmd.opcode = MMC_ERASE_GROUP_END; 1325 cmd.arg = to; 1326 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1327 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1328 if (err) { 1329 printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", 1330 err, cmd.resp[0]); 1331 err = -EINVAL; 1332 goto out; 1333 } 1334 1335 memset(&cmd, 0, sizeof(struct mmc_command)); 1336 cmd.opcode = MMC_ERASE; 1337 cmd.arg = arg; 1338 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1339 mmc_set_erase_timeout(card, &cmd, arg, qty); 1340 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1341 if (err) { 1342 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", 1343 err, cmd.resp[0]); 1344 err = -EIO; 1345 goto out; 1346 } 1347 1348 if (mmc_host_is_spi(card->host)) 1349 goto out; 1350 1351 do { 1352 memset(&cmd, 0, sizeof(struct mmc_command)); 1353 cmd.opcode = MMC_SEND_STATUS; 1354 cmd.arg = card->rca << 16; 1355 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1356 /* Do not retry else we can't see errors */ 1357 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1358 if (err || (cmd.resp[0] & 0xFDF92000)) { 1359 printk(KERN_ERR "error %d requesting status %#x\n", 1360 err, cmd.resp[0]); 1361 err = -EIO; 1362 goto out; 1363 } 1364 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1365 R1_CURRENT_STATE(cmd.resp[0]) == 7); 1366out: 1367 return err; 1368} 1369 1370/** 1371 * mmc_erase - erase sectors. 1372 * @card: card to erase 1373 * @from: first sector to erase 1374 * @nr: number of sectors to erase 1375 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1376 * 1377 * Caller must claim host before calling this function. 1378 */ 1379int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1380 unsigned int arg) 1381{ 1382 unsigned int rem, to = from + nr; 1383 1384 if (!(card->host->caps & MMC_CAP_ERASE) || 1385 !(card->csd.cmdclass & CCC_ERASE)) 1386 return -EOPNOTSUPP; 1387 1388 if (!card->erase_size) 1389 return -EOPNOTSUPP; 1390 1391 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1392 return -EOPNOTSUPP; 1393 1394 if ((arg & MMC_SECURE_ARGS) && 1395 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1396 return -EOPNOTSUPP; 1397 1398 if ((arg & MMC_TRIM_ARGS) && 1399 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1400 return -EOPNOTSUPP; 1401 1402 if (arg == MMC_SECURE_ERASE_ARG) { 1403 if (from % card->erase_size || nr % card->erase_size) 1404 return -EINVAL; 1405 } 1406 1407 if (arg == MMC_ERASE_ARG) { 1408 rem = from % card->erase_size; 1409 if (rem) { 1410 rem = card->erase_size - rem; 1411 from += rem; 1412 if (nr > rem) 1413 nr -= rem; 1414 else 1415 return 0; 1416 } 1417 rem = nr % card->erase_size; 1418 if (rem) 1419 nr -= rem; 1420 } 1421 1422 if (nr == 0) 1423 return 0; 1424 1425 to = from + nr; 1426 1427 if (to <= from) 1428 return -EINVAL; 1429 1430 /* 'from' and 'to' are inclusive */ 1431 to -= 1; 1432 1433 return mmc_do_erase(card, from, to, arg); 1434} 1435EXPORT_SYMBOL(mmc_erase); 1436 1437int mmc_can_erase(struct mmc_card *card) 1438{ 1439 if ((card->host->caps & MMC_CAP_ERASE) && 1440 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 1441 return 1; 1442 return 0; 1443} 1444EXPORT_SYMBOL(mmc_can_erase); 1445 1446int mmc_can_trim(struct mmc_card *card) 1447{ 1448 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1449 return 1; 1450 return 0; 1451} 1452EXPORT_SYMBOL(mmc_can_trim); 1453 1454int mmc_can_secure_erase_trim(struct mmc_card *card) 1455{ 1456 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 1457 return 1; 1458 return 0; 1459} 1460EXPORT_SYMBOL(mmc_can_secure_erase_trim); 1461 1462int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 1463 unsigned int nr) 1464{ 1465 if (!card->erase_size) 1466 return 0; 1467 if (from % card->erase_size || nr % card->erase_size) 1468 return 0; 1469 return 1; 1470} 1471EXPORT_SYMBOL(mmc_erase_group_aligned); 1472 1473int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1474{ 1475 struct mmc_command cmd; 1476 1477 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 1478 return 0; 1479 1480 memset(&cmd, 0, sizeof(struct mmc_command)); 1481 cmd.opcode = MMC_SET_BLOCKLEN; 1482 cmd.arg = blocklen; 1483 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1484 return mmc_wait_for_cmd(card->host, &cmd, 5); 1485} 1486EXPORT_SYMBOL(mmc_set_blocklen); 1487 1488static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 1489{ 1490 host->f_init = freq; 1491 1492#ifdef CONFIG_MMC_DEBUG 1493 pr_info("%s: %s: trying to init card at %u Hz\n", 1494 mmc_hostname(host), __func__, host->f_init); 1495#endif 1496 mmc_power_up(host); 1497 sdio_reset(host); 1498 mmc_go_idle(host); 1499 1500 mmc_send_if_cond(host, host->ocr_avail); 1501 1502 /* Order's important: probe SDIO, then SD, then MMC */ 1503 if (!mmc_attach_sdio(host)) 1504 return 0; 1505 if (!mmc_attach_sd(host)) 1506 return 0; 1507 if (!mmc_attach_mmc(host)) 1508 return 0; 1509 1510 mmc_power_off(host); 1511 return -EIO; 1512} 1513 1514void mmc_rescan(struct work_struct *work) 1515{ 1516 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 1517 struct mmc_host *host = 1518 container_of(work, struct mmc_host, detect.work); 1519 int i; 1520 1521 if (host->rescan_disable) 1522 return; 1523 1524 mmc_bus_get(host); 1525 1526 /* 1527 * if there is a _removable_ card registered, check whether it is 1528 * still present 1529 */ 1530 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 1531 && mmc_card_is_removable(host)) 1532 host->bus_ops->detect(host); 1533 1534 mmc_bus_put(host); 1535 1536 1537 mmc_bus_get(host); 1538 1539 /* if there still is a card present, stop here */ 1540 if (host->bus_ops != NULL) { 1541 mmc_bus_put(host); 1542 goto out; 1543 } 1544 1545 /* 1546 * Only we can add a new handler, so it's safe to 1547 * release the lock here. 1548 */ 1549 mmc_bus_put(host); 1550 1551 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 1552 goto out; 1553 1554 mmc_claim_host(host); 1555 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 1556 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 1557 break; 1558 if (freqs[i] < host->f_min) 1559 break; 1560 } 1561 mmc_release_host(host); 1562 1563 out: 1564 if (host->caps & MMC_CAP_NEEDS_POLL) 1565 mmc_schedule_delayed_work(&host->detect, HZ); 1566} 1567 1568void mmc_start_host(struct mmc_host *host) 1569{ 1570 mmc_power_off(host); 1571 mmc_detect_change(host, 0); 1572} 1573 1574void mmc_stop_host(struct mmc_host *host) 1575{ 1576#ifdef CONFIG_MMC_DEBUG 1577 unsigned long flags; 1578 spin_lock_irqsave(&host->lock, flags); 1579 host->removed = 1; 1580 spin_unlock_irqrestore(&host->lock, flags); 1581#endif 1582 1583 if (host->caps & MMC_CAP_DISABLE) 1584 cancel_delayed_work(&host->disable); 1585 cancel_delayed_work_sync(&host->detect); 1586 mmc_flush_scheduled_work(); 1587 1588 /* clear pm flags now and let card drivers set them as needed */ 1589 host->pm_flags = 0; 1590 1591 mmc_bus_get(host); 1592 if (host->bus_ops && !host->bus_dead) { 1593 if (host->bus_ops->remove) 1594 host->bus_ops->remove(host); 1595 1596 mmc_claim_host(host); 1597 mmc_detach_bus(host); 1598 mmc_release_host(host); 1599 mmc_bus_put(host); 1600 return; 1601 } 1602 mmc_bus_put(host); 1603 1604 BUG_ON(host->card); 1605 1606 mmc_power_off(host); 1607} 1608 1609int mmc_power_save_host(struct mmc_host *host) 1610{ 1611 int ret = 0; 1612 1613 mmc_bus_get(host); 1614 1615 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1616 mmc_bus_put(host); 1617 return -EINVAL; 1618 } 1619 1620 if (host->bus_ops->power_save) 1621 ret = host->bus_ops->power_save(host); 1622 1623 mmc_bus_put(host); 1624 1625 mmc_power_off(host); 1626 1627 return ret; 1628} 1629EXPORT_SYMBOL(mmc_power_save_host); 1630 1631int mmc_power_restore_host(struct mmc_host *host) 1632{ 1633 int ret; 1634 1635 mmc_bus_get(host); 1636 1637 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1638 mmc_bus_put(host); 1639 return -EINVAL; 1640 } 1641 1642 mmc_power_up(host); 1643 ret = host->bus_ops->power_restore(host); 1644 1645 mmc_bus_put(host); 1646 1647 return ret; 1648} 1649EXPORT_SYMBOL(mmc_power_restore_host); 1650 1651int mmc_card_awake(struct mmc_host *host) 1652{ 1653 int err = -ENOSYS; 1654 1655 mmc_bus_get(host); 1656 1657 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1658 err = host->bus_ops->awake(host); 1659 1660 mmc_bus_put(host); 1661 1662 return err; 1663} 1664EXPORT_SYMBOL(mmc_card_awake); 1665 1666int mmc_card_sleep(struct mmc_host *host) 1667{ 1668 int err = -ENOSYS; 1669 1670 mmc_bus_get(host); 1671 1672 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1673 err = host->bus_ops->sleep(host); 1674 1675 mmc_bus_put(host); 1676 1677 return err; 1678} 1679EXPORT_SYMBOL(mmc_card_sleep); 1680 1681int mmc_card_can_sleep(struct mmc_host *host) 1682{ 1683 struct mmc_card *card = host->card; 1684 1685 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 1686 return 1; 1687 return 0; 1688} 1689EXPORT_SYMBOL(mmc_card_can_sleep); 1690 1691#ifdef CONFIG_PM 1692 1693/** 1694 * mmc_suspend_host - suspend a host 1695 * @host: mmc host 1696 */ 1697int mmc_suspend_host(struct mmc_host *host) 1698{ 1699 int err = 0; 1700 1701 if (host->caps & MMC_CAP_DISABLE) 1702 cancel_delayed_work(&host->disable); 1703 cancel_delayed_work(&host->detect); 1704 mmc_flush_scheduled_work(); 1705 1706 mmc_bus_get(host); 1707 if (host->bus_ops && !host->bus_dead) { 1708 if (host->bus_ops->suspend) 1709 err = host->bus_ops->suspend(host); 1710 if (err == -ENOSYS || !host->bus_ops->resume) { 1711 /* 1712 * We simply "remove" the card in this case. 1713 * It will be redetected on resume. 1714 */ 1715 if (host->bus_ops->remove) 1716 host->bus_ops->remove(host); 1717 mmc_claim_host(host); 1718 mmc_detach_bus(host); 1719 mmc_release_host(host); 1720 host->pm_flags = 0; 1721 err = 0; 1722 } 1723 } 1724 mmc_bus_put(host); 1725 1726 if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER)) 1727 mmc_power_off(host); 1728 1729 return err; 1730} 1731 1732EXPORT_SYMBOL(mmc_suspend_host); 1733 1734/** 1735 * mmc_resume_host - resume a previously suspended host 1736 * @host: mmc host 1737 */ 1738int mmc_resume_host(struct mmc_host *host) 1739{ 1740 int err = 0; 1741 1742 mmc_bus_get(host); 1743 if (host->bus_ops && !host->bus_dead) { 1744 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { 1745 mmc_power_up(host); 1746 mmc_select_voltage(host, host->ocr); 1747 /* 1748 * Tell runtime PM core we just powered up the card, 1749 * since it still believes the card is powered off. 1750 * Note that currently runtime PM is only enabled 1751 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD 1752 */ 1753 if (mmc_card_sdio(host->card) && 1754 (host->caps & MMC_CAP_POWER_OFF_CARD)) { 1755 pm_runtime_disable(&host->card->dev); 1756 pm_runtime_set_active(&host->card->dev); 1757 pm_runtime_enable(&host->card->dev); 1758 } 1759 } 1760 BUG_ON(!host->bus_ops->resume); 1761 err = host->bus_ops->resume(host); 1762 if (err) { 1763 printk(KERN_WARNING "%s: error %d during resume " 1764 "(card was removed?)\n", 1765 mmc_hostname(host), err); 1766 err = 0; 1767 } 1768 } 1769 mmc_bus_put(host); 1770 1771 return err; 1772} 1773EXPORT_SYMBOL(mmc_resume_host); 1774 1775/* Do the card removal on suspend if card is assumed removeable 1776 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 1777 to sync the card. 1778*/ 1779int mmc_pm_notify(struct notifier_block *notify_block, 1780 unsigned long mode, void *unused) 1781{ 1782 struct mmc_host *host = container_of( 1783 notify_block, struct mmc_host, pm_notify); 1784 unsigned long flags; 1785 1786 1787 switch (mode) { 1788 case PM_HIBERNATION_PREPARE: 1789 case PM_SUSPEND_PREPARE: 1790 1791 spin_lock_irqsave(&host->lock, flags); 1792 host->rescan_disable = 1; 1793 spin_unlock_irqrestore(&host->lock, flags); 1794 cancel_delayed_work_sync(&host->detect); 1795 1796 if (!host->bus_ops || host->bus_ops->suspend) 1797 break; 1798 1799 mmc_claim_host(host); 1800 1801 if (host->bus_ops->remove) 1802 host->bus_ops->remove(host); 1803 1804 mmc_detach_bus(host); 1805 mmc_release_host(host); 1806 host->pm_flags = 0; 1807 break; 1808 1809 case PM_POST_SUSPEND: 1810 case PM_POST_HIBERNATION: 1811 case PM_POST_RESTORE: 1812 1813 spin_lock_irqsave(&host->lock, flags); 1814 host->rescan_disable = 0; 1815 spin_unlock_irqrestore(&host->lock, flags); 1816 mmc_detect_change(host, 0); 1817 1818 } 1819 1820 return 0; 1821} 1822#endif 1823 1824static int __init mmc_init(void) 1825{ 1826 int ret; 1827 1828 workqueue = alloc_ordered_workqueue("kmmcd", 0); 1829 if (!workqueue) 1830 return -ENOMEM; 1831 1832 ret = mmc_register_bus(); 1833 if (ret) 1834 goto destroy_workqueue; 1835 1836 ret = mmc_register_host_class(); 1837 if (ret) 1838 goto unregister_bus; 1839 1840 ret = sdio_register_bus(); 1841 if (ret) 1842 goto unregister_host_class; 1843 1844 return 0; 1845 1846unregister_host_class: 1847 mmc_unregister_host_class(); 1848unregister_bus: 1849 mmc_unregister_bus(); 1850destroy_workqueue: 1851 destroy_workqueue(workqueue); 1852 1853 return ret; 1854} 1855 1856static void __exit mmc_exit(void) 1857{ 1858 sdio_unregister_bus(); 1859 mmc_unregister_host_class(); 1860 mmc_unregister_bus(); 1861 destroy_workqueue(workqueue); 1862} 1863 1864subsys_initcall(mmc_init); 1865module_exit(mmc_exit); 1866 1867MODULE_LICENSE("GPL"); 1868