block.c revision 4d6144de8ba263eb3691a737c547e5b2fdc45287
1/* 2 * Block driver for media (i.e., flash cards) 3 * 4 * Copyright 2002 Hewlett-Packard Company 5 * Copyright 2005-2008 Pierre Ossman 6 * 7 * Use consistent with the GNU GPL is permitted, 8 * provided that this copyright notice is 9 * preserved in its entirety in all copies and derived works. 10 * 11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, 12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS 13 * FITNESS FOR ANY PARTICULAR PURPOSE. 14 * 15 * Many thanks to Alessandro Rubini and Jonathan Corbet! 16 * 17 * Author: Andrew Christian 18 * 28 May 2002 19 */ 20#include <linux/moduleparam.h> 21#include <linux/module.h> 22#include <linux/init.h> 23 24#include <linux/kernel.h> 25#include <linux/fs.h> 26#include <linux/slab.h> 27#include <linux/errno.h> 28#include <linux/hdreg.h> 29#include <linux/kdev_t.h> 30#include <linux/blkdev.h> 31#include <linux/mutex.h> 32#include <linux/scatterlist.h> 33#include <linux/string_helpers.h> 34#include <linux/delay.h> 35#include <linux/capability.h> 36#include <linux/compat.h> 37 38#include <linux/mmc/ioctl.h> 39#include <linux/mmc/card.h> 40#include <linux/mmc/host.h> 41#include <linux/mmc/mmc.h> 42#include <linux/mmc/sd.h> 43 44#include <asm/system.h> 45#include <asm/uaccess.h> 46 47#include "queue.h" 48 49MODULE_ALIAS("mmc:block"); 50#ifdef MODULE_PARAM_PREFIX 51#undef MODULE_PARAM_PREFIX 52#endif 53#define MODULE_PARAM_PREFIX "mmcblk." 54 55#define INAND_CMD38_ARG_EXT_CSD 113 56#define INAND_CMD38_ARG_ERASE 0x00 57#define INAND_CMD38_ARG_TRIM 0x01 58#define INAND_CMD38_ARG_SECERASE 0x80 59#define INAND_CMD38_ARG_SECTRIM1 0x81 60#define INAND_CMD38_ARG_SECTRIM2 0x88 61 62static DEFINE_MUTEX(block_mutex); 63 64/* 65 * The defaults come from config options but can be overriden by module 66 * or bootarg options. 67 */ 68static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; 69 70/* 71 * We've only got one major, so number of mmcblk devices is 72 * limited to 256 / number of minors per device. 73 */ 74static int max_devices; 75 76/* 256 minors, so at most 256 separate devices */ 77static DECLARE_BITMAP(dev_use, 256); 78static DECLARE_BITMAP(name_use, 256); 79 80/* 81 * There is one mmc_blk_data per slot. 82 */ 83struct mmc_blk_data { 84 spinlock_t lock; 85 struct gendisk *disk; 86 struct mmc_queue queue; 87 struct list_head part; 88 89 unsigned int flags; 90#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ 91#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ 92 93 unsigned int usage; 94 unsigned int read_only; 95 unsigned int part_type; 96 unsigned int name_idx; 97 unsigned int reset_done; 98#define MMC_BLK_READ BIT(0) 99#define MMC_BLK_WRITE BIT(1) 100#define MMC_BLK_DISCARD BIT(2) 101#define MMC_BLK_SECDISCARD BIT(3) 102 103 /* 104 * Only set in main mmc_blk_data associated 105 * with mmc_card with mmc_set_drvdata, and keeps 106 * track of the current selected device partition. 107 */ 108 unsigned int part_curr; 109 struct device_attribute force_ro; 110}; 111 112static DEFINE_MUTEX(open_lock); 113 114enum mmc_blk_status { 115 MMC_BLK_SUCCESS = 0, 116 MMC_BLK_PARTIAL, 117 MMC_BLK_CMD_ERR, 118 MMC_BLK_RETRY, 119 MMC_BLK_ABORT, 120 MMC_BLK_DATA_ERR, 121 MMC_BLK_ECC_ERR, 122}; 123 124module_param(perdev_minors, int, 0444); 125MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 126 127static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 128{ 129 struct mmc_blk_data *md; 130 131 mutex_lock(&open_lock); 132 md = disk->private_data; 133 if (md && md->usage == 0) 134 md = NULL; 135 if (md) 136 md->usage++; 137 mutex_unlock(&open_lock); 138 139 return md; 140} 141 142static inline int mmc_get_devidx(struct gendisk *disk) 143{ 144 int devmaj = MAJOR(disk_devt(disk)); 145 int devidx = MINOR(disk_devt(disk)) / perdev_minors; 146 147 if (!devmaj) 148 devidx = disk->first_minor / perdev_minors; 149 return devidx; 150} 151 152static void mmc_blk_put(struct mmc_blk_data *md) 153{ 154 mutex_lock(&open_lock); 155 md->usage--; 156 if (md->usage == 0) { 157 int devidx = mmc_get_devidx(md->disk); 158 blk_cleanup_queue(md->queue.queue); 159 160 __clear_bit(devidx, dev_use); 161 162 put_disk(md->disk); 163 kfree(md); 164 } 165 mutex_unlock(&open_lock); 166} 167 168static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 169 char *buf) 170{ 171 int ret; 172 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 173 174 ret = snprintf(buf, PAGE_SIZE, "%d", 175 get_disk_ro(dev_to_disk(dev)) ^ 176 md->read_only); 177 mmc_blk_put(md); 178 return ret; 179} 180 181static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 182 const char *buf, size_t count) 183{ 184 int ret; 185 char *end; 186 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 187 unsigned long set = simple_strtoul(buf, &end, 0); 188 if (end == buf) { 189 ret = -EINVAL; 190 goto out; 191 } 192 193 set_disk_ro(dev_to_disk(dev), set || md->read_only); 194 ret = count; 195out: 196 mmc_blk_put(md); 197 return ret; 198} 199 200static int mmc_blk_open(struct block_device *bdev, fmode_t mode) 201{ 202 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 203 int ret = -ENXIO; 204 205 mutex_lock(&block_mutex); 206 if (md) { 207 if (md->usage == 2) 208 check_disk_change(bdev); 209 ret = 0; 210 211 if ((mode & FMODE_WRITE) && md->read_only) { 212 mmc_blk_put(md); 213 ret = -EROFS; 214 } 215 } 216 mutex_unlock(&block_mutex); 217 218 return ret; 219} 220 221static int mmc_blk_release(struct gendisk *disk, fmode_t mode) 222{ 223 struct mmc_blk_data *md = disk->private_data; 224 225 mutex_lock(&block_mutex); 226 mmc_blk_put(md); 227 mutex_unlock(&block_mutex); 228 return 0; 229} 230 231static int 232mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 233{ 234 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); 235 geo->heads = 4; 236 geo->sectors = 16; 237 return 0; 238} 239 240struct mmc_blk_ioc_data { 241 struct mmc_ioc_cmd ic; 242 unsigned char *buf; 243 u64 buf_bytes; 244}; 245 246static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( 247 struct mmc_ioc_cmd __user *user) 248{ 249 struct mmc_blk_ioc_data *idata; 250 int err; 251 252 idata = kzalloc(sizeof(*idata), GFP_KERNEL); 253 if (!idata) { 254 err = -ENOMEM; 255 goto out; 256 } 257 258 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { 259 err = -EFAULT; 260 goto idata_err; 261 } 262 263 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; 264 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { 265 err = -EOVERFLOW; 266 goto idata_err; 267 } 268 269 if (!idata->buf_bytes) 270 return idata; 271 272 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); 273 if (!idata->buf) { 274 err = -ENOMEM; 275 goto idata_err; 276 } 277 278 if (copy_from_user(idata->buf, (void __user *)(unsigned long) 279 idata->ic.data_ptr, idata->buf_bytes)) { 280 err = -EFAULT; 281 goto copy_err; 282 } 283 284 return idata; 285 286copy_err: 287 kfree(idata->buf); 288idata_err: 289 kfree(idata); 290out: 291 return ERR_PTR(err); 292} 293 294static int mmc_blk_ioctl_cmd(struct block_device *bdev, 295 struct mmc_ioc_cmd __user *ic_ptr) 296{ 297 struct mmc_blk_ioc_data *idata; 298 struct mmc_blk_data *md; 299 struct mmc_card *card; 300 struct mmc_command cmd = {0}; 301 struct mmc_data data = {0}; 302 struct mmc_request mrq = {NULL}; 303 struct scatterlist sg; 304 int err; 305 306 /* 307 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 308 * whole block device, not on a partition. This prevents overspray 309 * between sibling partitions. 310 */ 311 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) 312 return -EPERM; 313 314 idata = mmc_blk_ioctl_copy_from_user(ic_ptr); 315 if (IS_ERR(idata)) 316 return PTR_ERR(idata); 317 318 md = mmc_blk_get(bdev->bd_disk); 319 if (!md) { 320 err = -EINVAL; 321 goto cmd_done; 322 } 323 324 card = md->queue.card; 325 if (IS_ERR(card)) { 326 err = PTR_ERR(card); 327 goto cmd_done; 328 } 329 330 cmd.opcode = idata->ic.opcode; 331 cmd.arg = idata->ic.arg; 332 cmd.flags = idata->ic.flags; 333 334 if (idata->buf_bytes) { 335 data.sg = &sg; 336 data.sg_len = 1; 337 data.blksz = idata->ic.blksz; 338 data.blocks = idata->ic.blocks; 339 340 sg_init_one(data.sg, idata->buf, idata->buf_bytes); 341 342 if (idata->ic.write_flag) 343 data.flags = MMC_DATA_WRITE; 344 else 345 data.flags = MMC_DATA_READ; 346 347 /* data.flags must already be set before doing this. */ 348 mmc_set_data_timeout(&data, card); 349 350 /* Allow overriding the timeout_ns for empirical tuning. */ 351 if (idata->ic.data_timeout_ns) 352 data.timeout_ns = idata->ic.data_timeout_ns; 353 354 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { 355 /* 356 * Pretend this is a data transfer and rely on the 357 * host driver to compute timeout. When all host 358 * drivers support cmd.cmd_timeout for R1B, this 359 * can be changed to: 360 * 361 * mrq.data = NULL; 362 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; 363 */ 364 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; 365 } 366 367 mrq.data = &data; 368 } 369 370 mrq.cmd = &cmd; 371 372 mmc_claim_host(card->host); 373 374 if (idata->ic.is_acmd) { 375 err = mmc_app_cmd(card->host, card); 376 if (err) 377 goto cmd_rel_host; 378 } 379 380 mmc_wait_for_req(card->host, &mrq); 381 382 if (cmd.error) { 383 dev_err(mmc_dev(card->host), "%s: cmd error %d\n", 384 __func__, cmd.error); 385 err = cmd.error; 386 goto cmd_rel_host; 387 } 388 if (data.error) { 389 dev_err(mmc_dev(card->host), "%s: data error %d\n", 390 __func__, data.error); 391 err = data.error; 392 goto cmd_rel_host; 393 } 394 395 /* 396 * According to the SD specs, some commands require a delay after 397 * issuing the command. 398 */ 399 if (idata->ic.postsleep_min_us) 400 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); 401 402 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { 403 err = -EFAULT; 404 goto cmd_rel_host; 405 } 406 407 if (!idata->ic.write_flag) { 408 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr, 409 idata->buf, idata->buf_bytes)) { 410 err = -EFAULT; 411 goto cmd_rel_host; 412 } 413 } 414 415cmd_rel_host: 416 mmc_release_host(card->host); 417 418cmd_done: 419 mmc_blk_put(md); 420 kfree(idata->buf); 421 kfree(idata); 422 return err; 423} 424 425static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, 426 unsigned int cmd, unsigned long arg) 427{ 428 int ret = -EINVAL; 429 if (cmd == MMC_IOC_CMD) 430 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); 431 return ret; 432} 433 434#ifdef CONFIG_COMPAT 435static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, 436 unsigned int cmd, unsigned long arg) 437{ 438 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); 439} 440#endif 441 442static const struct block_device_operations mmc_bdops = { 443 .open = mmc_blk_open, 444 .release = mmc_blk_release, 445 .getgeo = mmc_blk_getgeo, 446 .owner = THIS_MODULE, 447 .ioctl = mmc_blk_ioctl, 448#ifdef CONFIG_COMPAT 449 .compat_ioctl = mmc_blk_compat_ioctl, 450#endif 451}; 452 453static inline int mmc_blk_part_switch(struct mmc_card *card, 454 struct mmc_blk_data *md) 455{ 456 int ret; 457 struct mmc_blk_data *main_md = mmc_get_drvdata(card); 458 459 if (main_md->part_curr == md->part_type) 460 return 0; 461 462 if (mmc_card_mmc(card)) { 463 u8 part_config = card->ext_csd.part_config; 464 465 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 466 part_config |= md->part_type; 467 468 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 469 EXT_CSD_PART_CONFIG, part_config, 470 card->ext_csd.part_time); 471 if (ret) 472 return ret; 473 474 card->ext_csd.part_config = part_config; 475 } 476 477 main_md->part_curr = md->part_type; 478 return 0; 479} 480 481static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) 482{ 483 int err; 484 u32 result; 485 __be32 *blocks; 486 487 struct mmc_request mrq = {NULL}; 488 struct mmc_command cmd = {0}; 489 struct mmc_data data = {0}; 490 unsigned int timeout_us; 491 492 struct scatterlist sg; 493 494 cmd.opcode = MMC_APP_CMD; 495 cmd.arg = card->rca << 16; 496 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 497 498 err = mmc_wait_for_cmd(card->host, &cmd, 0); 499 if (err) 500 return (u32)-1; 501 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) 502 return (u32)-1; 503 504 memset(&cmd, 0, sizeof(struct mmc_command)); 505 506 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; 507 cmd.arg = 0; 508 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 509 510 data.timeout_ns = card->csd.tacc_ns * 100; 511 data.timeout_clks = card->csd.tacc_clks * 100; 512 513 timeout_us = data.timeout_ns / 1000; 514 timeout_us += data.timeout_clks * 1000 / 515 (card->host->ios.clock / 1000); 516 517 if (timeout_us > 100000) { 518 data.timeout_ns = 100000000; 519 data.timeout_clks = 0; 520 } 521 522 data.blksz = 4; 523 data.blocks = 1; 524 data.flags = MMC_DATA_READ; 525 data.sg = &sg; 526 data.sg_len = 1; 527 528 mrq.cmd = &cmd; 529 mrq.data = &data; 530 531 blocks = kmalloc(4, GFP_KERNEL); 532 if (!blocks) 533 return (u32)-1; 534 535 sg_init_one(&sg, blocks, 4); 536 537 mmc_wait_for_req(card->host, &mrq); 538 539 result = ntohl(*blocks); 540 kfree(blocks); 541 542 if (cmd.error || data.error) 543 result = (u32)-1; 544 545 return result; 546} 547 548static int send_stop(struct mmc_card *card, u32 *status) 549{ 550 struct mmc_command cmd = {0}; 551 int err; 552 553 cmd.opcode = MMC_STOP_TRANSMISSION; 554 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 555 err = mmc_wait_for_cmd(card->host, &cmd, 5); 556 if (err == 0) 557 *status = cmd.resp[0]; 558 return err; 559} 560 561static int get_card_status(struct mmc_card *card, u32 *status, int retries) 562{ 563 struct mmc_command cmd = {0}; 564 int err; 565 566 cmd.opcode = MMC_SEND_STATUS; 567 if (!mmc_host_is_spi(card->host)) 568 cmd.arg = card->rca << 16; 569 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 570 err = mmc_wait_for_cmd(card->host, &cmd, retries); 571 if (err == 0) 572 *status = cmd.resp[0]; 573 return err; 574} 575 576#define ERR_RETRY 2 577#define ERR_ABORT 1 578#define ERR_CONTINUE 0 579 580static int mmc_blk_cmd_error(struct request *req, const char *name, int error, 581 bool status_valid, u32 status) 582{ 583 switch (error) { 584 case -EILSEQ: 585 /* response crc error, retry the r/w cmd */ 586 pr_err("%s: %s sending %s command, card status %#x\n", 587 req->rq_disk->disk_name, "response CRC error", 588 name, status); 589 return ERR_RETRY; 590 591 case -ETIMEDOUT: 592 pr_err("%s: %s sending %s command, card status %#x\n", 593 req->rq_disk->disk_name, "timed out", name, status); 594 595 /* If the status cmd initially failed, retry the r/w cmd */ 596 if (!status_valid) 597 return ERR_RETRY; 598 599 /* 600 * If it was a r/w cmd crc error, or illegal command 601 * (eg, issued in wrong state) then retry - we should 602 * have corrected the state problem above. 603 */ 604 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) 605 return ERR_RETRY; 606 607 /* Otherwise abort the command */ 608 return ERR_ABORT; 609 610 default: 611 /* We don't understand the error code the driver gave us */ 612 pr_err("%s: unknown error %d sending read/write command, card status %#x\n", 613 req->rq_disk->disk_name, error, status); 614 return ERR_ABORT; 615 } 616} 617 618/* 619 * Initial r/w and stop cmd error recovery. 620 * We don't know whether the card received the r/w cmd or not, so try to 621 * restore things back to a sane state. Essentially, we do this as follows: 622 * - Obtain card status. If the first attempt to obtain card status fails, 623 * the status word will reflect the failed status cmd, not the failed 624 * r/w cmd. If we fail to obtain card status, it suggests we can no 625 * longer communicate with the card. 626 * - Check the card state. If the card received the cmd but there was a 627 * transient problem with the response, it might still be in a data transfer 628 * mode. Try to send it a stop command. If this fails, we can't recover. 629 * - If the r/w cmd failed due to a response CRC error, it was probably 630 * transient, so retry the cmd. 631 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. 632 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or 633 * illegal cmd, retry. 634 * Otherwise we don't understand what happened, so abort. 635 */ 636static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, 637 struct mmc_blk_request *brq, int *ecc_err) 638{ 639 bool prev_cmd_status_valid = true; 640 u32 status, stop_status = 0; 641 int err, retry; 642 643 /* 644 * Try to get card status which indicates both the card state 645 * and why there was no response. If the first attempt fails, 646 * we can't be sure the returned status is for the r/w command. 647 */ 648 for (retry = 2; retry >= 0; retry--) { 649 err = get_card_status(card, &status, 0); 650 if (!err) 651 break; 652 653 prev_cmd_status_valid = false; 654 pr_err("%s: error %d sending status command, %sing\n", 655 req->rq_disk->disk_name, err, retry ? "retry" : "abort"); 656 } 657 658 /* We couldn't get a response from the card. Give up. */ 659 if (err) 660 return ERR_ABORT; 661 662 /* Flag ECC errors */ 663 if ((status & R1_CARD_ECC_FAILED) || 664 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || 665 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) 666 *ecc_err = 1; 667 668 /* 669 * Check the current card state. If it is in some data transfer 670 * mode, tell it to stop (and hopefully transition back to TRAN.) 671 */ 672 if (R1_CURRENT_STATE(status) == R1_STATE_DATA || 673 R1_CURRENT_STATE(status) == R1_STATE_RCV) { 674 err = send_stop(card, &stop_status); 675 if (err) 676 pr_err("%s: error %d sending stop command\n", 677 req->rq_disk->disk_name, err); 678 679 /* 680 * If the stop cmd also timed out, the card is probably 681 * not present, so abort. Other errors are bad news too. 682 */ 683 if (err) 684 return ERR_ABORT; 685 if (stop_status & R1_CARD_ECC_FAILED) 686 *ecc_err = 1; 687 } 688 689 /* Check for set block count errors */ 690 if (brq->sbc.error) 691 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, 692 prev_cmd_status_valid, status); 693 694 /* Check for r/w command errors */ 695 if (brq->cmd.error) 696 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, 697 prev_cmd_status_valid, status); 698 699 /* Data errors */ 700 if (!brq->stop.error) 701 return ERR_CONTINUE; 702 703 /* Now for stop errors. These aren't fatal to the transfer. */ 704 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", 705 req->rq_disk->disk_name, brq->stop.error, 706 brq->cmd.resp[0], status); 707 708 /* 709 * Subsitute in our own stop status as this will give the error 710 * state which happened during the execution of the r/w command. 711 */ 712 if (stop_status) { 713 brq->stop.resp[0] = stop_status; 714 brq->stop.error = 0; 715 } 716 return ERR_CONTINUE; 717} 718 719static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, 720 int type) 721{ 722 int err; 723 724 if (md->reset_done & type) 725 return -EEXIST; 726 727 md->reset_done |= type; 728 err = mmc_hw_reset(host); 729 /* Ensure we switch back to the correct partition */ 730 if (err != -EOPNOTSUPP) { 731 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); 732 int part_err; 733 734 main_md->part_curr = main_md->part_type; 735 part_err = mmc_blk_part_switch(host->card, md); 736 if (part_err) { 737 /* 738 * We have failed to get back into the correct 739 * partition, so we need to abort the whole request. 740 */ 741 return -ENODEV; 742 } 743 } 744 return err; 745} 746 747static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) 748{ 749 md->reset_done &= ~type; 750} 751 752static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 753{ 754 struct mmc_blk_data *md = mq->data; 755 struct mmc_card *card = md->queue.card; 756 unsigned int from, nr, arg; 757 int err = 0, type = MMC_BLK_DISCARD; 758 759 if (!mmc_can_erase(card)) { 760 err = -EOPNOTSUPP; 761 goto out; 762 } 763 764 from = blk_rq_pos(req); 765 nr = blk_rq_sectors(req); 766 767 if (mmc_can_discard(card)) 768 arg = MMC_DISCARD_ARG; 769 else if (mmc_can_trim(card)) 770 arg = MMC_TRIM_ARG; 771 else 772 arg = MMC_ERASE_ARG; 773retry: 774 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 775 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 776 INAND_CMD38_ARG_EXT_CSD, 777 arg == MMC_TRIM_ARG ? 778 INAND_CMD38_ARG_TRIM : 779 INAND_CMD38_ARG_ERASE, 780 0); 781 if (err) 782 goto out; 783 } 784 err = mmc_erase(card, from, nr, arg); 785out: 786 if (err == -EIO && !mmc_blk_reset(md, card->host, type)) 787 goto retry; 788 if (!err) 789 mmc_blk_reset_success(md, type); 790 spin_lock_irq(&md->lock); 791 __blk_end_request(req, err, blk_rq_bytes(req)); 792 spin_unlock_irq(&md->lock); 793 794 return err ? 0 : 1; 795} 796 797static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, 798 struct request *req) 799{ 800 struct mmc_blk_data *md = mq->data; 801 struct mmc_card *card = md->queue.card; 802 unsigned int from, nr, arg; 803 int err = 0, type = MMC_BLK_SECDISCARD; 804 805 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { 806 err = -EOPNOTSUPP; 807 goto out; 808 } 809 810 /* The sanitize operation is supported at v4.5 only */ 811 if (mmc_can_sanitize(card)) { 812 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 813 EXT_CSD_SANITIZE_START, 1, 0); 814 goto out; 815 } 816 817 from = blk_rq_pos(req); 818 nr = blk_rq_sectors(req); 819 820 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) 821 arg = MMC_SECURE_TRIM1_ARG; 822 else 823 arg = MMC_SECURE_ERASE_ARG; 824retry: 825 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 826 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 827 INAND_CMD38_ARG_EXT_CSD, 828 arg == MMC_SECURE_TRIM1_ARG ? 829 INAND_CMD38_ARG_SECTRIM1 : 830 INAND_CMD38_ARG_SECERASE, 831 0); 832 if (err) 833 goto out; 834 } 835 err = mmc_erase(card, from, nr, arg); 836 if (!err && arg == MMC_SECURE_TRIM1_ARG) { 837 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 838 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 839 INAND_CMD38_ARG_EXT_CSD, 840 INAND_CMD38_ARG_SECTRIM2, 841 0); 842 if (err) 843 goto out; 844 } 845 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 846 } 847out: 848 if (err == -EIO && !mmc_blk_reset(md, card->host, type)) 849 goto retry; 850 if (!err) 851 mmc_blk_reset_success(md, type); 852 spin_lock_irq(&md->lock); 853 __blk_end_request(req, err, blk_rq_bytes(req)); 854 spin_unlock_irq(&md->lock); 855 856 return err ? 0 : 1; 857} 858 859static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) 860{ 861 struct mmc_blk_data *md = mq->data; 862 struct mmc_card *card = md->queue.card; 863 int ret = 0; 864 865 ret = mmc_flush_cache(card); 866 if (ret) 867 ret = -EIO; 868 869 spin_lock_irq(&md->lock); 870 __blk_end_request_all(req, ret); 871 spin_unlock_irq(&md->lock); 872 873 return ret ? 0 : 1; 874} 875 876/* 877 * Reformat current write as a reliable write, supporting 878 * both legacy and the enhanced reliable write MMC cards. 879 * In each transfer we'll handle only as much as a single 880 * reliable write can handle, thus finish the request in 881 * partial completions. 882 */ 883static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, 884 struct mmc_card *card, 885 struct request *req) 886{ 887 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { 888 /* Legacy mode imposes restrictions on transfers. */ 889 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) 890 brq->data.blocks = 1; 891 892 if (brq->data.blocks > card->ext_csd.rel_sectors) 893 brq->data.blocks = card->ext_csd.rel_sectors; 894 else if (brq->data.blocks < card->ext_csd.rel_sectors) 895 brq->data.blocks = 1; 896 } 897} 898 899#define CMD_ERRORS \ 900 (R1_OUT_OF_RANGE | /* Command argument out of range */ \ 901 R1_ADDRESS_ERROR | /* Misaligned address */ \ 902 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ 903 R1_WP_VIOLATION | /* Tried to write to protected block */ \ 904 R1_CC_ERROR | /* Card controller error */ \ 905 R1_ERROR) /* General/unknown error */ 906 907static int mmc_blk_err_check(struct mmc_card *card, 908 struct mmc_async_req *areq) 909{ 910 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, 911 mmc_active); 912 struct mmc_blk_request *brq = &mq_mrq->brq; 913 struct request *req = mq_mrq->req; 914 int ecc_err = 0; 915 916 /* 917 * sbc.error indicates a problem with the set block count 918 * command. No data will have been transferred. 919 * 920 * cmd.error indicates a problem with the r/w command. No 921 * data will have been transferred. 922 * 923 * stop.error indicates a problem with the stop command. Data 924 * may have been transferred, or may still be transferring. 925 */ 926 if (brq->sbc.error || brq->cmd.error || brq->stop.error || 927 brq->data.error) { 928 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) { 929 case ERR_RETRY: 930 return MMC_BLK_RETRY; 931 case ERR_ABORT: 932 return MMC_BLK_ABORT; 933 case ERR_CONTINUE: 934 break; 935 } 936 } 937 938 /* 939 * Check for errors relating to the execution of the 940 * initial command - such as address errors. No data 941 * has been transferred. 942 */ 943 if (brq->cmd.resp[0] & CMD_ERRORS) { 944 pr_err("%s: r/w command failed, status = %#x\n", 945 req->rq_disk->disk_name, brq->cmd.resp[0]); 946 return MMC_BLK_ABORT; 947 } 948 949 /* 950 * Everything else is either success, or a data error of some 951 * kind. If it was a write, we may have transitioned to 952 * program mode, which we have to wait for it to complete. 953 */ 954 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { 955 u32 status; 956 do { 957 int err = get_card_status(card, &status, 5); 958 if (err) { 959 pr_err("%s: error %d requesting status\n", 960 req->rq_disk->disk_name, err); 961 return MMC_BLK_CMD_ERR; 962 } 963 /* 964 * Some cards mishandle the status bits, 965 * so make sure to check both the busy 966 * indication and the card state. 967 */ 968 } while (!(status & R1_READY_FOR_DATA) || 969 (R1_CURRENT_STATE(status) == R1_STATE_PRG)); 970 } 971 972 if (brq->data.error) { 973 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", 974 req->rq_disk->disk_name, brq->data.error, 975 (unsigned)blk_rq_pos(req), 976 (unsigned)blk_rq_sectors(req), 977 brq->cmd.resp[0], brq->stop.resp[0]); 978 979 if (rq_data_dir(req) == READ) { 980 if (ecc_err) 981 return MMC_BLK_ECC_ERR; 982 return MMC_BLK_DATA_ERR; 983 } else { 984 return MMC_BLK_CMD_ERR; 985 } 986 } 987 988 if (!brq->data.bytes_xfered) 989 return MMC_BLK_RETRY; 990 991 if (blk_rq_bytes(req) != brq->data.bytes_xfered) 992 return MMC_BLK_PARTIAL; 993 994 return MMC_BLK_SUCCESS; 995} 996 997static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 998 struct mmc_card *card, 999 int disable_multi, 1000 struct mmc_queue *mq) 1001{ 1002 u32 readcmd, writecmd; 1003 struct mmc_blk_request *brq = &mqrq->brq; 1004 struct request *req = mqrq->req; 1005 struct mmc_blk_data *md = mq->data; 1006 1007 /* 1008 * Reliable writes are used to implement Forced Unit Access and 1009 * REQ_META accesses, and are supported only on MMCs. 1010 * 1011 * XXX: this really needs a good explanation of why REQ_META 1012 * is treated special. 1013 */ 1014 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 1015 (req->cmd_flags & REQ_META)) && 1016 (rq_data_dir(req) == WRITE) && 1017 (md->flags & MMC_BLK_REL_WR); 1018 1019 memset(brq, 0, sizeof(struct mmc_blk_request)); 1020 brq->mrq.cmd = &brq->cmd; 1021 brq->mrq.data = &brq->data; 1022 1023 brq->cmd.arg = blk_rq_pos(req); 1024 if (!mmc_card_blockaddr(card)) 1025 brq->cmd.arg <<= 9; 1026 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 1027 brq->data.blksz = 512; 1028 brq->stop.opcode = MMC_STOP_TRANSMISSION; 1029 brq->stop.arg = 0; 1030 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1031 brq->data.blocks = blk_rq_sectors(req); 1032 1033 /* 1034 * The block layer doesn't support all sector count 1035 * restrictions, so we need to be prepared for too big 1036 * requests. 1037 */ 1038 if (brq->data.blocks > card->host->max_blk_count) 1039 brq->data.blocks = card->host->max_blk_count; 1040 1041 if (brq->data.blocks > 1) { 1042 /* 1043 * After a read error, we redo the request one sector 1044 * at a time in order to accurately determine which 1045 * sectors can be read successfully. 1046 */ 1047 if (disable_multi) 1048 brq->data.blocks = 1; 1049 1050 /* Some controllers can't do multiblock reads due to hw bugs */ 1051 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ && 1052 rq_data_dir(req) == READ) 1053 brq->data.blocks = 1; 1054 } 1055 1056 if (brq->data.blocks > 1 || do_rel_wr) { 1057 /* SPI multiblock writes terminate using a special 1058 * token, not a STOP_TRANSMISSION request. 1059 */ 1060 if (!mmc_host_is_spi(card->host) || 1061 rq_data_dir(req) == READ) 1062 brq->mrq.stop = &brq->stop; 1063 readcmd = MMC_READ_MULTIPLE_BLOCK; 1064 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 1065 } else { 1066 brq->mrq.stop = NULL; 1067 readcmd = MMC_READ_SINGLE_BLOCK; 1068 writecmd = MMC_WRITE_BLOCK; 1069 } 1070 if (rq_data_dir(req) == READ) { 1071 brq->cmd.opcode = readcmd; 1072 brq->data.flags |= MMC_DATA_READ; 1073 } else { 1074 brq->cmd.opcode = writecmd; 1075 brq->data.flags |= MMC_DATA_WRITE; 1076 } 1077 1078 if (do_rel_wr) 1079 mmc_apply_rel_rw(brq, card, req); 1080 1081 /* 1082 * Pre-defined multi-block transfers are preferable to 1083 * open ended-ones (and necessary for reliable writes). 1084 * However, it is not sufficient to just send CMD23, 1085 * and avoid the final CMD12, as on an error condition 1086 * CMD12 (stop) needs to be sent anyway. This, coupled 1087 * with Auto-CMD23 enhancements provided by some 1088 * hosts, means that the complexity of dealing 1089 * with this is best left to the host. If CMD23 is 1090 * supported by card and host, we'll fill sbc in and let 1091 * the host deal with handling it correctly. This means 1092 * that for hosts that don't expose MMC_CAP_CMD23, no 1093 * change of behavior will be observed. 1094 * 1095 * N.B: Some MMC cards experience perf degradation. 1096 * We'll avoid using CMD23-bounded multiblock writes for 1097 * these, while retaining features like reliable writes. 1098 */ 1099 1100 if ((md->flags & MMC_BLK_CMD23) && 1101 mmc_op_multi(brq->cmd.opcode) && 1102 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { 1103 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; 1104 brq->sbc.arg = brq->data.blocks | 1105 (do_rel_wr ? (1 << 31) : 0); 1106 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 1107 brq->mrq.sbc = &brq->sbc; 1108 } 1109 1110 mmc_set_data_timeout(&brq->data, card); 1111 1112 brq->data.sg = mqrq->sg; 1113 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); 1114 1115 /* 1116 * Adjust the sg list so it is the same size as the 1117 * request. 1118 */ 1119 if (brq->data.blocks != blk_rq_sectors(req)) { 1120 int i, data_size = brq->data.blocks << 9; 1121 struct scatterlist *sg; 1122 1123 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { 1124 data_size -= sg->length; 1125 if (data_size <= 0) { 1126 sg->length += data_size; 1127 i++; 1128 break; 1129 } 1130 } 1131 brq->data.sg_len = i; 1132 } 1133 1134 mqrq->mmc_active.mrq = &brq->mrq; 1135 mqrq->mmc_active.err_check = mmc_blk_err_check; 1136 1137 mmc_queue_bounce_pre(mqrq); 1138} 1139 1140static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, 1141 struct mmc_blk_request *brq, struct request *req, 1142 int ret) 1143{ 1144 /* 1145 * If this is an SD card and we're writing, we can first 1146 * mark the known good sectors as ok. 1147 * 1148 * If the card is not SD, we can still ok written sectors 1149 * as reported by the controller (which might be less than 1150 * the real number of written sectors, but never more). 1151 */ 1152 if (mmc_card_sd(card)) { 1153 u32 blocks; 1154 1155 blocks = mmc_sd_num_wr_blocks(card); 1156 if (blocks != (u32)-1) { 1157 spin_lock_irq(&md->lock); 1158 ret = __blk_end_request(req, 0, blocks << 9); 1159 spin_unlock_irq(&md->lock); 1160 } 1161 } else { 1162 spin_lock_irq(&md->lock); 1163 ret = __blk_end_request(req, 0, brq->data.bytes_xfered); 1164 spin_unlock_irq(&md->lock); 1165 } 1166 return ret; 1167} 1168 1169static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) 1170{ 1171 struct mmc_blk_data *md = mq->data; 1172 struct mmc_card *card = md->queue.card; 1173 struct mmc_blk_request *brq = &mq->mqrq_cur->brq; 1174 int ret = 1, disable_multi = 0, retry = 0, type; 1175 enum mmc_blk_status status; 1176 struct mmc_queue_req *mq_rq; 1177 struct request *req; 1178 struct mmc_async_req *areq; 1179 1180 if (!rqc && !mq->mqrq_prev->req) 1181 return 0; 1182 1183 do { 1184 if (rqc) { 1185 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1186 areq = &mq->mqrq_cur->mmc_active; 1187 } else 1188 areq = NULL; 1189 areq = mmc_start_req(card->host, areq, (int *) &status); 1190 if (!areq) 1191 return 0; 1192 1193 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); 1194 brq = &mq_rq->brq; 1195 req = mq_rq->req; 1196 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1197 mmc_queue_bounce_post(mq_rq); 1198 1199 switch (status) { 1200 case MMC_BLK_SUCCESS: 1201 case MMC_BLK_PARTIAL: 1202 /* 1203 * A block was successfully transferred. 1204 */ 1205 mmc_blk_reset_success(md, type); 1206 spin_lock_irq(&md->lock); 1207 ret = __blk_end_request(req, 0, 1208 brq->data.bytes_xfered); 1209 spin_unlock_irq(&md->lock); 1210 /* 1211 * If the blk_end_request function returns non-zero even 1212 * though all data has been transferred and no errors 1213 * were returned by the host controller, it's a bug. 1214 */ 1215 if (status == MMC_BLK_SUCCESS && ret) { 1216 pr_err("%s BUG rq_tot %d d_xfer %d\n", 1217 __func__, blk_rq_bytes(req), 1218 brq->data.bytes_xfered); 1219 rqc = NULL; 1220 goto cmd_abort; 1221 } 1222 break; 1223 case MMC_BLK_CMD_ERR: 1224 ret = mmc_blk_cmd_err(md, card, brq, req, ret); 1225 if (!mmc_blk_reset(md, card->host, type)) 1226 break; 1227 goto cmd_abort; 1228 case MMC_BLK_RETRY: 1229 if (retry++ < 5) 1230 break; 1231 /* Fall through */ 1232 case MMC_BLK_ABORT: 1233 if (!mmc_blk_reset(md, card->host, type)) 1234 break; 1235 goto cmd_abort; 1236 case MMC_BLK_DATA_ERR: { 1237 int err; 1238 1239 err = mmc_blk_reset(md, card->host, type); 1240 if (!err) 1241 break; 1242 if (err == -ENODEV) 1243 goto cmd_abort; 1244 /* Fall through */ 1245 } 1246 case MMC_BLK_ECC_ERR: 1247 if (brq->data.blocks > 1) { 1248 /* Redo read one sector at a time */ 1249 pr_warning("%s: retrying using single block read\n", 1250 req->rq_disk->disk_name); 1251 disable_multi = 1; 1252 break; 1253 } 1254 /* 1255 * After an error, we redo I/O one sector at a 1256 * time, so we only reach here after trying to 1257 * read a single sector. 1258 */ 1259 spin_lock_irq(&md->lock); 1260 ret = __blk_end_request(req, -EIO, 1261 brq->data.blksz); 1262 spin_unlock_irq(&md->lock); 1263 if (!ret) 1264 goto start_new_req; 1265 break; 1266 } 1267 1268 if (ret) { 1269 /* 1270 * In case of a incomplete request 1271 * prepare it again and resend. 1272 */ 1273 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); 1274 mmc_start_req(card->host, &mq_rq->mmc_active, NULL); 1275 } 1276 } while (ret); 1277 1278 return 1; 1279 1280 cmd_abort: 1281 spin_lock_irq(&md->lock); 1282 while (ret) 1283 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 1284 spin_unlock_irq(&md->lock); 1285 1286 start_new_req: 1287 if (rqc) { 1288 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1289 mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); 1290 } 1291 1292 return 0; 1293} 1294 1295static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 1296{ 1297 int ret; 1298 struct mmc_blk_data *md = mq->data; 1299 struct mmc_card *card = md->queue.card; 1300 1301 if (req && !mq->mqrq_prev->req) 1302 /* claim host only for the first request */ 1303 mmc_claim_host(card->host); 1304 1305 ret = mmc_blk_part_switch(card, md); 1306 if (ret) { 1307 if (req) { 1308 spin_lock_irq(&md->lock); 1309 __blk_end_request_all(req, -EIO); 1310 spin_unlock_irq(&md->lock); 1311 } 1312 ret = 0; 1313 goto out; 1314 } 1315 1316 if (req && req->cmd_flags & REQ_DISCARD) { 1317 /* complete ongoing async transfer before issuing discard */ 1318 if (card->host->areq) 1319 mmc_blk_issue_rw_rq(mq, NULL); 1320 if (req->cmd_flags & REQ_SECURE) 1321 ret = mmc_blk_issue_secdiscard_rq(mq, req); 1322 else 1323 ret = mmc_blk_issue_discard_rq(mq, req); 1324 } else if (req && req->cmd_flags & REQ_FLUSH) { 1325 /* complete ongoing async transfer before issuing flush */ 1326 if (card->host->areq) 1327 mmc_blk_issue_rw_rq(mq, NULL); 1328 ret = mmc_blk_issue_flush(mq, req); 1329 } else { 1330 ret = mmc_blk_issue_rw_rq(mq, req); 1331 } 1332 1333out: 1334 if (!req) 1335 /* release host only when there are no more requests */ 1336 mmc_release_host(card->host); 1337 return ret; 1338} 1339 1340static inline int mmc_blk_readonly(struct mmc_card *card) 1341{ 1342 return mmc_card_readonly(card) || 1343 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 1344} 1345 1346static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, 1347 struct device *parent, 1348 sector_t size, 1349 bool default_ro, 1350 const char *subname) 1351{ 1352 struct mmc_blk_data *md; 1353 int devidx, ret; 1354 1355 devidx = find_first_zero_bit(dev_use, max_devices); 1356 if (devidx >= max_devices) 1357 return ERR_PTR(-ENOSPC); 1358 __set_bit(devidx, dev_use); 1359 1360 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); 1361 if (!md) { 1362 ret = -ENOMEM; 1363 goto out; 1364 } 1365 1366 /* 1367 * !subname implies we are creating main mmc_blk_data that will be 1368 * associated with mmc_card with mmc_set_drvdata. Due to device 1369 * partitions, devidx will not coincide with a per-physical card 1370 * index anymore so we keep track of a name index. 1371 */ 1372 if (!subname) { 1373 md->name_idx = find_first_zero_bit(name_use, max_devices); 1374 __set_bit(md->name_idx, name_use); 1375 } 1376 else 1377 md->name_idx = ((struct mmc_blk_data *) 1378 dev_to_disk(parent)->private_data)->name_idx; 1379 1380 /* 1381 * Set the read-only status based on the supported commands 1382 * and the write protect switch. 1383 */ 1384 md->read_only = mmc_blk_readonly(card); 1385 1386 md->disk = alloc_disk(perdev_minors); 1387 if (md->disk == NULL) { 1388 ret = -ENOMEM; 1389 goto err_kfree; 1390 } 1391 1392 spin_lock_init(&md->lock); 1393 INIT_LIST_HEAD(&md->part); 1394 md->usage = 1; 1395 1396 ret = mmc_init_queue(&md->queue, card, &md->lock, subname); 1397 if (ret) 1398 goto err_putdisk; 1399 1400 md->queue.issue_fn = mmc_blk_issue_rq; 1401 md->queue.data = md; 1402 1403 md->disk->major = MMC_BLOCK_MAJOR; 1404 md->disk->first_minor = devidx * perdev_minors; 1405 md->disk->fops = &mmc_bdops; 1406 md->disk->private_data = md; 1407 md->disk->queue = md->queue.queue; 1408 md->disk->driverfs_dev = parent; 1409 set_disk_ro(md->disk, md->read_only || default_ro); 1410 1411 /* 1412 * As discussed on lkml, GENHD_FL_REMOVABLE should: 1413 * 1414 * - be set for removable media with permanent block devices 1415 * - be unset for removable block devices with permanent media 1416 * 1417 * Since MMC block devices clearly fall under the second 1418 * case, we do not set GENHD_FL_REMOVABLE. Userspace 1419 * should use the block device creation/destruction hotplug 1420 * messages to tell when the card is present. 1421 */ 1422 1423 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 1424 "mmcblk%d%s", md->name_idx, subname ? subname : ""); 1425 1426 blk_queue_logical_block_size(md->queue.queue, 512); 1427 set_capacity(md->disk, size); 1428 1429 if (mmc_host_cmd23(card->host)) { 1430 if (mmc_card_mmc(card) || 1431 (mmc_card_sd(card) && 1432 card->scr.cmds & SD_SCR_CMD23_SUPPORT)) 1433 md->flags |= MMC_BLK_CMD23; 1434 } 1435 1436 if (mmc_card_mmc(card) && 1437 md->flags & MMC_BLK_CMD23 && 1438 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || 1439 card->ext_csd.rel_sectors)) { 1440 md->flags |= MMC_BLK_REL_WR; 1441 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); 1442 } 1443 1444 return md; 1445 1446 err_putdisk: 1447 put_disk(md->disk); 1448 err_kfree: 1449 kfree(md); 1450 out: 1451 return ERR_PTR(ret); 1452} 1453 1454static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 1455{ 1456 sector_t size; 1457 struct mmc_blk_data *md; 1458 1459 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 1460 /* 1461 * The EXT_CSD sector count is in number or 512 byte 1462 * sectors. 1463 */ 1464 size = card->ext_csd.sectors; 1465 } else { 1466 /* 1467 * The CSD capacity field is in units of read_blkbits. 1468 * set_capacity takes units of 512 bytes. 1469 */ 1470 size = card->csd.capacity << (card->csd.read_blkbits - 9); 1471 } 1472 1473 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL); 1474 return md; 1475} 1476 1477static int mmc_blk_alloc_part(struct mmc_card *card, 1478 struct mmc_blk_data *md, 1479 unsigned int part_type, 1480 sector_t size, 1481 bool default_ro, 1482 const char *subname) 1483{ 1484 char cap_str[10]; 1485 struct mmc_blk_data *part_md; 1486 1487 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, 1488 subname); 1489 if (IS_ERR(part_md)) 1490 return PTR_ERR(part_md); 1491 part_md->part_type = part_type; 1492 list_add(&part_md->part, &md->part); 1493 1494 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, 1495 cap_str, sizeof(cap_str)); 1496 pr_info("%s: %s %s partition %u %s\n", 1497 part_md->disk->disk_name, mmc_card_id(card), 1498 mmc_card_name(card), part_md->part_type, cap_str); 1499 return 0; 1500} 1501 1502/* MMC Physical partitions consist of two boot partitions and 1503 * up to four general purpose partitions. 1504 * For each partition enabled in EXT_CSD a block device will be allocatedi 1505 * to provide access to the partition. 1506 */ 1507 1508static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) 1509{ 1510 int idx, ret = 0; 1511 1512 if (!mmc_card_mmc(card)) 1513 return 0; 1514 1515 for (idx = 0; idx < card->nr_parts; idx++) { 1516 if (card->part[idx].size) { 1517 ret = mmc_blk_alloc_part(card, md, 1518 card->part[idx].part_cfg, 1519 card->part[idx].size >> 9, 1520 card->part[idx].force_ro, 1521 card->part[idx].name); 1522 if (ret) 1523 return ret; 1524 } 1525 } 1526 1527 return ret; 1528} 1529 1530static int 1531mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) 1532{ 1533 int err; 1534 1535 mmc_claim_host(card->host); 1536 err = mmc_set_blocklen(card, 512); 1537 mmc_release_host(card->host); 1538 1539 if (err) { 1540 pr_err("%s: unable to set block size to 512: %d\n", 1541 md->disk->disk_name, err); 1542 return -EINVAL; 1543 } 1544 1545 return 0; 1546} 1547 1548static void mmc_blk_remove_req(struct mmc_blk_data *md) 1549{ 1550 if (md) { 1551 if (md->disk->flags & GENHD_FL_UP) { 1552 device_remove_file(disk_to_dev(md->disk), &md->force_ro); 1553 1554 /* Stop new requests from getting into the queue */ 1555 del_gendisk(md->disk); 1556 } 1557 1558 /* Then flush out any already in there */ 1559 mmc_cleanup_queue(&md->queue); 1560 mmc_blk_put(md); 1561 } 1562} 1563 1564static void mmc_blk_remove_parts(struct mmc_card *card, 1565 struct mmc_blk_data *md) 1566{ 1567 struct list_head *pos, *q; 1568 struct mmc_blk_data *part_md; 1569 1570 __clear_bit(md->name_idx, name_use); 1571 list_for_each_safe(pos, q, &md->part) { 1572 part_md = list_entry(pos, struct mmc_blk_data, part); 1573 list_del(pos); 1574 mmc_blk_remove_req(part_md); 1575 } 1576} 1577 1578static int mmc_add_disk(struct mmc_blk_data *md) 1579{ 1580 int ret; 1581 1582 add_disk(md->disk); 1583 md->force_ro.show = force_ro_show; 1584 md->force_ro.store = force_ro_store; 1585 sysfs_attr_init(&md->force_ro.attr); 1586 md->force_ro.attr.name = "force_ro"; 1587 md->force_ro.attr.mode = S_IRUGO | S_IWUSR; 1588 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); 1589 if (ret) 1590 del_gendisk(md->disk); 1591 1592 return ret; 1593} 1594 1595#define CID_MANFID_SANDISK 0x2 1596#define CID_MANFID_TOSHIBA 0x11 1597#define CID_MANFID_MICRON 0x13 1598 1599static const struct mmc_fixup blk_fixups[] = 1600{ 1601 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, 1602 MMC_QUIRK_INAND_CMD38), 1603 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk, 1604 MMC_QUIRK_INAND_CMD38), 1605 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk, 1606 MMC_QUIRK_INAND_CMD38), 1607 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk, 1608 MMC_QUIRK_INAND_CMD38), 1609 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk, 1610 MMC_QUIRK_INAND_CMD38), 1611 1612 /* 1613 * Some MMC cards experience performance degradation with CMD23 1614 * instead of CMD12-bounded multiblock transfers. For now we'll 1615 * black list what's bad... 1616 * - Certain Toshiba cards. 1617 * 1618 * N.B. This doesn't affect SD cards. 1619 */ 1620 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 1621 MMC_QUIRK_BLK_NO_CMD23), 1622 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 1623 MMC_QUIRK_BLK_NO_CMD23), 1624 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 1625 MMC_QUIRK_BLK_NO_CMD23), 1626 1627 /* 1628 * Some Micron MMC cards needs longer data read timeout than 1629 * indicated in CSD. 1630 */ 1631 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, 1632 MMC_QUIRK_LONG_READ_TIME), 1633 1634 END_FIXUP 1635}; 1636 1637static int mmc_blk_probe(struct mmc_card *card) 1638{ 1639 struct mmc_blk_data *md, *part_md; 1640 int err; 1641 char cap_str[10]; 1642 1643 /* 1644 * Check that the card supports the command class(es) we need. 1645 */ 1646 if (!(card->csd.cmdclass & CCC_BLOCK_READ)) 1647 return -ENODEV; 1648 1649 md = mmc_blk_alloc(card); 1650 if (IS_ERR(md)) 1651 return PTR_ERR(md); 1652 1653 err = mmc_blk_set_blksize(md, card); 1654 if (err) 1655 goto out; 1656 1657 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, 1658 cap_str, sizeof(cap_str)); 1659 pr_info("%s: %s %s %s %s\n", 1660 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 1661 cap_str, md->read_only ? "(ro)" : ""); 1662 1663 if (mmc_blk_alloc_parts(card, md)) 1664 goto out; 1665 1666 mmc_set_drvdata(card, md); 1667 mmc_fixup_device(card, blk_fixups); 1668 1669 if (mmc_add_disk(md)) 1670 goto out; 1671 1672 list_for_each_entry(part_md, &md->part, part) { 1673 if (mmc_add_disk(part_md)) 1674 goto out; 1675 } 1676 return 0; 1677 1678 out: 1679 mmc_blk_remove_parts(card, md); 1680 mmc_blk_remove_req(md); 1681 return err; 1682} 1683 1684static void mmc_blk_remove(struct mmc_card *card) 1685{ 1686 struct mmc_blk_data *md = mmc_get_drvdata(card); 1687 1688 mmc_blk_remove_parts(card, md); 1689 mmc_claim_host(card->host); 1690 mmc_blk_part_switch(card, md); 1691 mmc_release_host(card->host); 1692 mmc_blk_remove_req(md); 1693 mmc_set_drvdata(card, NULL); 1694} 1695 1696#ifdef CONFIG_PM 1697static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) 1698{ 1699 struct mmc_blk_data *part_md; 1700 struct mmc_blk_data *md = mmc_get_drvdata(card); 1701 1702 if (md) { 1703 mmc_queue_suspend(&md->queue); 1704 list_for_each_entry(part_md, &md->part, part) { 1705 mmc_queue_suspend(&part_md->queue); 1706 } 1707 } 1708 return 0; 1709} 1710 1711static int mmc_blk_resume(struct mmc_card *card) 1712{ 1713 struct mmc_blk_data *part_md; 1714 struct mmc_blk_data *md = mmc_get_drvdata(card); 1715 1716 if (md) { 1717 mmc_blk_set_blksize(md, card); 1718 1719 /* 1720 * Resume involves the card going into idle state, 1721 * so current partition is always the main one. 1722 */ 1723 md->part_curr = md->part_type; 1724 mmc_queue_resume(&md->queue); 1725 list_for_each_entry(part_md, &md->part, part) { 1726 mmc_queue_resume(&part_md->queue); 1727 } 1728 } 1729 return 0; 1730} 1731#else 1732#define mmc_blk_suspend NULL 1733#define mmc_blk_resume NULL 1734#endif 1735 1736static struct mmc_driver mmc_driver = { 1737 .drv = { 1738 .name = "mmcblk", 1739 }, 1740 .probe = mmc_blk_probe, 1741 .remove = mmc_blk_remove, 1742 .suspend = mmc_blk_suspend, 1743 .resume = mmc_blk_resume, 1744}; 1745 1746static int __init mmc_blk_init(void) 1747{ 1748 int res; 1749 1750 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) 1751 pr_info("mmcblk: using %d minors per device\n", perdev_minors); 1752 1753 max_devices = 256 / perdev_minors; 1754 1755 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 1756 if (res) 1757 goto out; 1758 1759 res = mmc_register_driver(&mmc_driver); 1760 if (res) 1761 goto out2; 1762 1763 return 0; 1764 out2: 1765 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 1766 out: 1767 return res; 1768} 1769 1770static void __exit mmc_blk_exit(void) 1771{ 1772 mmc_unregister_driver(&mmc_driver); 1773 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 1774} 1775 1776module_init(mmc_blk_init); 1777module_exit(mmc_blk_exit); 1778 1779MODULE_LICENSE("GPL"); 1780MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); 1781 1782