block.c revision 6de5fc9cf7de334912de4cfd2d06eb2d744d2afe
1/* 2 * Block driver for media (i.e., flash cards) 3 * 4 * Copyright 2002 Hewlett-Packard Company 5 * Copyright 2005-2008 Pierre Ossman 6 * 7 * Use consistent with the GNU GPL is permitted, 8 * provided that this copyright notice is 9 * preserved in its entirety in all copies and derived works. 10 * 11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, 12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS 13 * FITNESS FOR ANY PARTICULAR PURPOSE. 14 * 15 * Many thanks to Alessandro Rubini and Jonathan Corbet! 16 * 17 * Author: Andrew Christian 18 * 28 May 2002 19 */ 20#include <linux/moduleparam.h> 21#include <linux/module.h> 22#include <linux/init.h> 23 24#include <linux/kernel.h> 25#include <linux/fs.h> 26#include <linux/slab.h> 27#include <linux/errno.h> 28#include <linux/hdreg.h> 29#include <linux/kdev_t.h> 30#include <linux/blkdev.h> 31#include <linux/mutex.h> 32#include <linux/scatterlist.h> 33#include <linux/string_helpers.h> 34#include <linux/delay.h> 35#include <linux/capability.h> 36#include <linux/compat.h> 37 38#include <linux/mmc/ioctl.h> 39#include <linux/mmc/card.h> 40#include <linux/mmc/host.h> 41#include <linux/mmc/mmc.h> 42#include <linux/mmc/sd.h> 43 44#include <asm/system.h> 45#include <asm/uaccess.h> 46 47#include "queue.h" 48 49MODULE_ALIAS("mmc:block"); 50#ifdef MODULE_PARAM_PREFIX 51#undef MODULE_PARAM_PREFIX 52#endif 53#define MODULE_PARAM_PREFIX "mmcblk." 54 55#define INAND_CMD38_ARG_EXT_CSD 113 56#define INAND_CMD38_ARG_ERASE 0x00 57#define INAND_CMD38_ARG_TRIM 0x01 58#define INAND_CMD38_ARG_SECERASE 0x80 59#define INAND_CMD38_ARG_SECTRIM1 0x81 60#define INAND_CMD38_ARG_SECTRIM2 0x88 61 62static DEFINE_MUTEX(block_mutex); 63 64/* 65 * The defaults come from config options but can be overriden by module 66 * or bootarg options. 67 */ 68static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; 69 70/* 71 * We've only got one major, so number of mmcblk devices is 72 * limited to 256 / number of minors per device. 73 */ 74static int max_devices; 75 76/* 256 minors, so at most 256 separate devices */ 77static DECLARE_BITMAP(dev_use, 256); 78static DECLARE_BITMAP(name_use, 256); 79 80/* 81 * There is one mmc_blk_data per slot. 82 */ 83struct mmc_blk_data { 84 spinlock_t lock; 85 struct gendisk *disk; 86 struct mmc_queue queue; 87 struct list_head part; 88 89 unsigned int flags; 90#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ 91#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ 92 93 unsigned int usage; 94 unsigned int read_only; 95 unsigned int part_type; 96 unsigned int name_idx; 97 unsigned int reset_done; 98#define MMC_BLK_READ BIT(0) 99#define MMC_BLK_WRITE BIT(1) 100#define MMC_BLK_DISCARD BIT(2) 101#define MMC_BLK_SECDISCARD BIT(3) 102 103 /* 104 * Only set in main mmc_blk_data associated 105 * with mmc_card with mmc_set_drvdata, and keeps 106 * track of the current selected device partition. 107 */ 108 unsigned int part_curr; 109 struct device_attribute force_ro; 110}; 111 112static DEFINE_MUTEX(open_lock); 113 114enum mmc_blk_status { 115 MMC_BLK_SUCCESS = 0, 116 MMC_BLK_PARTIAL, 117 MMC_BLK_CMD_ERR, 118 MMC_BLK_RETRY, 119 MMC_BLK_ABORT, 120 MMC_BLK_DATA_ERR, 121 MMC_BLK_ECC_ERR, 122}; 123 124module_param(perdev_minors, int, 0444); 125MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 126 127static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 128{ 129 struct mmc_blk_data *md; 130 131 mutex_lock(&open_lock); 132 md = disk->private_data; 133 if (md && md->usage == 0) 134 md = NULL; 135 if (md) 136 md->usage++; 137 mutex_unlock(&open_lock); 138 139 return md; 140} 141 142static inline int mmc_get_devidx(struct gendisk *disk) 143{ 144 int devmaj = MAJOR(disk_devt(disk)); 145 int devidx = MINOR(disk_devt(disk)) / perdev_minors; 146 147 if (!devmaj) 148 devidx = disk->first_minor / perdev_minors; 149 return devidx; 150} 151 152static void mmc_blk_put(struct mmc_blk_data *md) 153{ 154 mutex_lock(&open_lock); 155 md->usage--; 156 if (md->usage == 0) { 157 int devidx = mmc_get_devidx(md->disk); 158 blk_cleanup_queue(md->queue.queue); 159 160 __clear_bit(devidx, dev_use); 161 162 put_disk(md->disk); 163 kfree(md); 164 } 165 mutex_unlock(&open_lock); 166} 167 168static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 169 char *buf) 170{ 171 int ret; 172 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 173 174 ret = snprintf(buf, PAGE_SIZE, "%d", 175 get_disk_ro(dev_to_disk(dev)) ^ 176 md->read_only); 177 mmc_blk_put(md); 178 return ret; 179} 180 181static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 182 const char *buf, size_t count) 183{ 184 int ret; 185 char *end; 186 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 187 unsigned long set = simple_strtoul(buf, &end, 0); 188 if (end == buf) { 189 ret = -EINVAL; 190 goto out; 191 } 192 193 set_disk_ro(dev_to_disk(dev), set || md->read_only); 194 ret = count; 195out: 196 mmc_blk_put(md); 197 return ret; 198} 199 200static int mmc_blk_open(struct block_device *bdev, fmode_t mode) 201{ 202 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 203 int ret = -ENXIO; 204 205 mutex_lock(&block_mutex); 206 if (md) { 207 if (md->usage == 2) 208 check_disk_change(bdev); 209 ret = 0; 210 211 if ((mode & FMODE_WRITE) && md->read_only) { 212 mmc_blk_put(md); 213 ret = -EROFS; 214 } 215 } 216 mutex_unlock(&block_mutex); 217 218 return ret; 219} 220 221static int mmc_blk_release(struct gendisk *disk, fmode_t mode) 222{ 223 struct mmc_blk_data *md = disk->private_data; 224 225 mutex_lock(&block_mutex); 226 mmc_blk_put(md); 227 mutex_unlock(&block_mutex); 228 return 0; 229} 230 231static int 232mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 233{ 234 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); 235 geo->heads = 4; 236 geo->sectors = 16; 237 return 0; 238} 239 240struct mmc_blk_ioc_data { 241 struct mmc_ioc_cmd ic; 242 unsigned char *buf; 243 u64 buf_bytes; 244}; 245 246static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( 247 struct mmc_ioc_cmd __user *user) 248{ 249 struct mmc_blk_ioc_data *idata; 250 int err; 251 252 idata = kzalloc(sizeof(*idata), GFP_KERNEL); 253 if (!idata) { 254 err = -ENOMEM; 255 goto out; 256 } 257 258 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { 259 err = -EFAULT; 260 goto idata_err; 261 } 262 263 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; 264 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { 265 err = -EOVERFLOW; 266 goto idata_err; 267 } 268 269 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); 270 if (!idata->buf) { 271 err = -ENOMEM; 272 goto idata_err; 273 } 274 275 if (copy_from_user(idata->buf, (void __user *)(unsigned long) 276 idata->ic.data_ptr, idata->buf_bytes)) { 277 err = -EFAULT; 278 goto copy_err; 279 } 280 281 return idata; 282 283copy_err: 284 kfree(idata->buf); 285idata_err: 286 kfree(idata); 287out: 288 return ERR_PTR(err); 289} 290 291static int mmc_blk_ioctl_cmd(struct block_device *bdev, 292 struct mmc_ioc_cmd __user *ic_ptr) 293{ 294 struct mmc_blk_ioc_data *idata; 295 struct mmc_blk_data *md; 296 struct mmc_card *card; 297 struct mmc_command cmd = {0}; 298 struct mmc_data data = {0}; 299 struct mmc_request mrq = {NULL}; 300 struct scatterlist sg; 301 int err; 302 303 /* 304 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 305 * whole block device, not on a partition. This prevents overspray 306 * between sibling partitions. 307 */ 308 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) 309 return -EPERM; 310 311 idata = mmc_blk_ioctl_copy_from_user(ic_ptr); 312 if (IS_ERR(idata)) 313 return PTR_ERR(idata); 314 315 cmd.opcode = idata->ic.opcode; 316 cmd.arg = idata->ic.arg; 317 cmd.flags = idata->ic.flags; 318 319 data.sg = &sg; 320 data.sg_len = 1; 321 data.blksz = idata->ic.blksz; 322 data.blocks = idata->ic.blocks; 323 324 sg_init_one(data.sg, idata->buf, idata->buf_bytes); 325 326 if (idata->ic.write_flag) 327 data.flags = MMC_DATA_WRITE; 328 else 329 data.flags = MMC_DATA_READ; 330 331 mrq.cmd = &cmd; 332 mrq.data = &data; 333 334 md = mmc_blk_get(bdev->bd_disk); 335 if (!md) { 336 err = -EINVAL; 337 goto cmd_done; 338 } 339 340 card = md->queue.card; 341 if (IS_ERR(card)) { 342 err = PTR_ERR(card); 343 goto cmd_done; 344 } 345 346 mmc_claim_host(card->host); 347 348 if (idata->ic.is_acmd) { 349 err = mmc_app_cmd(card->host, card); 350 if (err) 351 goto cmd_rel_host; 352 } 353 354 /* data.flags must already be set before doing this. */ 355 mmc_set_data_timeout(&data, card); 356 /* Allow overriding the timeout_ns for empirical tuning. */ 357 if (idata->ic.data_timeout_ns) 358 data.timeout_ns = idata->ic.data_timeout_ns; 359 360 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { 361 /* 362 * Pretend this is a data transfer and rely on the host driver 363 * to compute timeout. When all host drivers support 364 * cmd.cmd_timeout for R1B, this can be changed to: 365 * 366 * mrq.data = NULL; 367 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; 368 */ 369 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; 370 } 371 372 mmc_wait_for_req(card->host, &mrq); 373 374 if (cmd.error) { 375 dev_err(mmc_dev(card->host), "%s: cmd error %d\n", 376 __func__, cmd.error); 377 err = cmd.error; 378 goto cmd_rel_host; 379 } 380 if (data.error) { 381 dev_err(mmc_dev(card->host), "%s: data error %d\n", 382 __func__, data.error); 383 err = data.error; 384 goto cmd_rel_host; 385 } 386 387 /* 388 * According to the SD specs, some commands require a delay after 389 * issuing the command. 390 */ 391 if (idata->ic.postsleep_min_us) 392 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); 393 394 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { 395 err = -EFAULT; 396 goto cmd_rel_host; 397 } 398 399 if (!idata->ic.write_flag) { 400 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr, 401 idata->buf, idata->buf_bytes)) { 402 err = -EFAULT; 403 goto cmd_rel_host; 404 } 405 } 406 407cmd_rel_host: 408 mmc_release_host(card->host); 409 410cmd_done: 411 mmc_blk_put(md); 412 kfree(idata->buf); 413 kfree(idata); 414 return err; 415} 416 417static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, 418 unsigned int cmd, unsigned long arg) 419{ 420 int ret = -EINVAL; 421 if (cmd == MMC_IOC_CMD) 422 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); 423 return ret; 424} 425 426#ifdef CONFIG_COMPAT 427static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, 428 unsigned int cmd, unsigned long arg) 429{ 430 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); 431} 432#endif 433 434static const struct block_device_operations mmc_bdops = { 435 .open = mmc_blk_open, 436 .release = mmc_blk_release, 437 .getgeo = mmc_blk_getgeo, 438 .owner = THIS_MODULE, 439 .ioctl = mmc_blk_ioctl, 440#ifdef CONFIG_COMPAT 441 .compat_ioctl = mmc_blk_compat_ioctl, 442#endif 443}; 444 445static inline int mmc_blk_part_switch(struct mmc_card *card, 446 struct mmc_blk_data *md) 447{ 448 int ret; 449 struct mmc_blk_data *main_md = mmc_get_drvdata(card); 450 451 if (main_md->part_curr == md->part_type) 452 return 0; 453 454 if (mmc_card_mmc(card)) { 455 u8 part_config = card->ext_csd.part_config; 456 457 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 458 part_config |= md->part_type; 459 460 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 461 EXT_CSD_PART_CONFIG, part_config, 462 card->ext_csd.part_time); 463 if (ret) 464 return ret; 465 466 card->ext_csd.part_config = part_config; 467 } 468 469 main_md->part_curr = md->part_type; 470 return 0; 471} 472 473static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) 474{ 475 int err; 476 u32 result; 477 __be32 *blocks; 478 479 struct mmc_request mrq = {NULL}; 480 struct mmc_command cmd = {0}; 481 struct mmc_data data = {0}; 482 unsigned int timeout_us; 483 484 struct scatterlist sg; 485 486 cmd.opcode = MMC_APP_CMD; 487 cmd.arg = card->rca << 16; 488 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 489 490 err = mmc_wait_for_cmd(card->host, &cmd, 0); 491 if (err) 492 return (u32)-1; 493 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) 494 return (u32)-1; 495 496 memset(&cmd, 0, sizeof(struct mmc_command)); 497 498 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; 499 cmd.arg = 0; 500 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 501 502 data.timeout_ns = card->csd.tacc_ns * 100; 503 data.timeout_clks = card->csd.tacc_clks * 100; 504 505 timeout_us = data.timeout_ns / 1000; 506 timeout_us += data.timeout_clks * 1000 / 507 (card->host->ios.clock / 1000); 508 509 if (timeout_us > 100000) { 510 data.timeout_ns = 100000000; 511 data.timeout_clks = 0; 512 } 513 514 data.blksz = 4; 515 data.blocks = 1; 516 data.flags = MMC_DATA_READ; 517 data.sg = &sg; 518 data.sg_len = 1; 519 520 mrq.cmd = &cmd; 521 mrq.data = &data; 522 523 blocks = kmalloc(4, GFP_KERNEL); 524 if (!blocks) 525 return (u32)-1; 526 527 sg_init_one(&sg, blocks, 4); 528 529 mmc_wait_for_req(card->host, &mrq); 530 531 result = ntohl(*blocks); 532 kfree(blocks); 533 534 if (cmd.error || data.error) 535 result = (u32)-1; 536 537 return result; 538} 539 540static int send_stop(struct mmc_card *card, u32 *status) 541{ 542 struct mmc_command cmd = {0}; 543 int err; 544 545 cmd.opcode = MMC_STOP_TRANSMISSION; 546 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 547 err = mmc_wait_for_cmd(card->host, &cmd, 5); 548 if (err == 0) 549 *status = cmd.resp[0]; 550 return err; 551} 552 553static int get_card_status(struct mmc_card *card, u32 *status, int retries) 554{ 555 struct mmc_command cmd = {0}; 556 int err; 557 558 cmd.opcode = MMC_SEND_STATUS; 559 if (!mmc_host_is_spi(card->host)) 560 cmd.arg = card->rca << 16; 561 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 562 err = mmc_wait_for_cmd(card->host, &cmd, retries); 563 if (err == 0) 564 *status = cmd.resp[0]; 565 return err; 566} 567 568#define ERR_RETRY 2 569#define ERR_ABORT 1 570#define ERR_CONTINUE 0 571 572static int mmc_blk_cmd_error(struct request *req, const char *name, int error, 573 bool status_valid, u32 status) 574{ 575 switch (error) { 576 case -EILSEQ: 577 /* response crc error, retry the r/w cmd */ 578 pr_err("%s: %s sending %s command, card status %#x\n", 579 req->rq_disk->disk_name, "response CRC error", 580 name, status); 581 return ERR_RETRY; 582 583 case -ETIMEDOUT: 584 pr_err("%s: %s sending %s command, card status %#x\n", 585 req->rq_disk->disk_name, "timed out", name, status); 586 587 /* If the status cmd initially failed, retry the r/w cmd */ 588 if (!status_valid) 589 return ERR_RETRY; 590 591 /* 592 * If it was a r/w cmd crc error, or illegal command 593 * (eg, issued in wrong state) then retry - we should 594 * have corrected the state problem above. 595 */ 596 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) 597 return ERR_RETRY; 598 599 /* Otherwise abort the command */ 600 return ERR_ABORT; 601 602 default: 603 /* We don't understand the error code the driver gave us */ 604 pr_err("%s: unknown error %d sending read/write command, card status %#x\n", 605 req->rq_disk->disk_name, error, status); 606 return ERR_ABORT; 607 } 608} 609 610/* 611 * Initial r/w and stop cmd error recovery. 612 * We don't know whether the card received the r/w cmd or not, so try to 613 * restore things back to a sane state. Essentially, we do this as follows: 614 * - Obtain card status. If the first attempt to obtain card status fails, 615 * the status word will reflect the failed status cmd, not the failed 616 * r/w cmd. If we fail to obtain card status, it suggests we can no 617 * longer communicate with the card. 618 * - Check the card state. If the card received the cmd but there was a 619 * transient problem with the response, it might still be in a data transfer 620 * mode. Try to send it a stop command. If this fails, we can't recover. 621 * - If the r/w cmd failed due to a response CRC error, it was probably 622 * transient, so retry the cmd. 623 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. 624 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or 625 * illegal cmd, retry. 626 * Otherwise we don't understand what happened, so abort. 627 */ 628static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, 629 struct mmc_blk_request *brq, int *ecc_err) 630{ 631 bool prev_cmd_status_valid = true; 632 u32 status, stop_status = 0; 633 int err, retry; 634 635 /* 636 * Try to get card status which indicates both the card state 637 * and why there was no response. If the first attempt fails, 638 * we can't be sure the returned status is for the r/w command. 639 */ 640 for (retry = 2; retry >= 0; retry--) { 641 err = get_card_status(card, &status, 0); 642 if (!err) 643 break; 644 645 prev_cmd_status_valid = false; 646 pr_err("%s: error %d sending status command, %sing\n", 647 req->rq_disk->disk_name, err, retry ? "retry" : "abort"); 648 } 649 650 /* We couldn't get a response from the card. Give up. */ 651 if (err) 652 return ERR_ABORT; 653 654 /* Flag ECC errors */ 655 if ((status & R1_CARD_ECC_FAILED) || 656 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || 657 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) 658 *ecc_err = 1; 659 660 /* 661 * Check the current card state. If it is in some data transfer 662 * mode, tell it to stop (and hopefully transition back to TRAN.) 663 */ 664 if (R1_CURRENT_STATE(status) == R1_STATE_DATA || 665 R1_CURRENT_STATE(status) == R1_STATE_RCV) { 666 err = send_stop(card, &stop_status); 667 if (err) 668 pr_err("%s: error %d sending stop command\n", 669 req->rq_disk->disk_name, err); 670 671 /* 672 * If the stop cmd also timed out, the card is probably 673 * not present, so abort. Other errors are bad news too. 674 */ 675 if (err) 676 return ERR_ABORT; 677 if (stop_status & R1_CARD_ECC_FAILED) 678 *ecc_err = 1; 679 } 680 681 /* Check for set block count errors */ 682 if (brq->sbc.error) 683 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, 684 prev_cmd_status_valid, status); 685 686 /* Check for r/w command errors */ 687 if (brq->cmd.error) 688 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, 689 prev_cmd_status_valid, status); 690 691 /* Data errors */ 692 if (!brq->stop.error) 693 return ERR_CONTINUE; 694 695 /* Now for stop errors. These aren't fatal to the transfer. */ 696 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", 697 req->rq_disk->disk_name, brq->stop.error, 698 brq->cmd.resp[0], status); 699 700 /* 701 * Subsitute in our own stop status as this will give the error 702 * state which happened during the execution of the r/w command. 703 */ 704 if (stop_status) { 705 brq->stop.resp[0] = stop_status; 706 brq->stop.error = 0; 707 } 708 return ERR_CONTINUE; 709} 710 711static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, 712 int type) 713{ 714 int err; 715 716 if (md->reset_done & type) 717 return -EEXIST; 718 719 md->reset_done |= type; 720 err = mmc_hw_reset(host); 721 /* Ensure we switch back to the correct partition */ 722 if (err != -EOPNOTSUPP) { 723 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); 724 int part_err; 725 726 main_md->part_curr = main_md->part_type; 727 part_err = mmc_blk_part_switch(host->card, md); 728 if (part_err) { 729 /* 730 * We have failed to get back into the correct 731 * partition, so we need to abort the whole request. 732 */ 733 return -ENODEV; 734 } 735 } 736 return err; 737} 738 739static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) 740{ 741 md->reset_done &= ~type; 742} 743 744static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 745{ 746 struct mmc_blk_data *md = mq->data; 747 struct mmc_card *card = md->queue.card; 748 unsigned int from, nr, arg; 749 int err = 0, type = MMC_BLK_DISCARD; 750 751 if (!mmc_can_erase(card)) { 752 err = -EOPNOTSUPP; 753 goto out; 754 } 755 756 from = blk_rq_pos(req); 757 nr = blk_rq_sectors(req); 758 759 if (mmc_can_discard(card)) 760 arg = MMC_DISCARD_ARG; 761 else if (mmc_can_trim(card)) 762 arg = MMC_TRIM_ARG; 763 else 764 arg = MMC_ERASE_ARG; 765retry: 766 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 767 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 768 INAND_CMD38_ARG_EXT_CSD, 769 arg == MMC_TRIM_ARG ? 770 INAND_CMD38_ARG_TRIM : 771 INAND_CMD38_ARG_ERASE, 772 0); 773 if (err) 774 goto out; 775 } 776 err = mmc_erase(card, from, nr, arg); 777out: 778 if (err == -EIO && !mmc_blk_reset(md, card->host, type)) 779 goto retry; 780 if (!err) 781 mmc_blk_reset_success(md, type); 782 spin_lock_irq(&md->lock); 783 __blk_end_request(req, err, blk_rq_bytes(req)); 784 spin_unlock_irq(&md->lock); 785 786 return err ? 0 : 1; 787} 788 789static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, 790 struct request *req) 791{ 792 struct mmc_blk_data *md = mq->data; 793 struct mmc_card *card = md->queue.card; 794 unsigned int from, nr, arg; 795 int err = 0, type = MMC_BLK_SECDISCARD; 796 797 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { 798 err = -EOPNOTSUPP; 799 goto out; 800 } 801 802 /* The sanitize operation is supported at v4.5 only */ 803 if (mmc_can_sanitize(card)) { 804 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 805 EXT_CSD_SANITIZE_START, 1, 0); 806 goto out; 807 } 808 809 from = blk_rq_pos(req); 810 nr = blk_rq_sectors(req); 811 812 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) 813 arg = MMC_SECURE_TRIM1_ARG; 814 else 815 arg = MMC_SECURE_ERASE_ARG; 816retry: 817 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 818 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 819 INAND_CMD38_ARG_EXT_CSD, 820 arg == MMC_SECURE_TRIM1_ARG ? 821 INAND_CMD38_ARG_SECTRIM1 : 822 INAND_CMD38_ARG_SECERASE, 823 0); 824 if (err) 825 goto out; 826 } 827 err = mmc_erase(card, from, nr, arg); 828 if (!err && arg == MMC_SECURE_TRIM1_ARG) { 829 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 830 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 831 INAND_CMD38_ARG_EXT_CSD, 832 INAND_CMD38_ARG_SECTRIM2, 833 0); 834 if (err) 835 goto out; 836 } 837 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 838 } 839out: 840 if (err == -EIO && !mmc_blk_reset(md, card->host, type)) 841 goto retry; 842 if (!err) 843 mmc_blk_reset_success(md, type); 844 spin_lock_irq(&md->lock); 845 __blk_end_request(req, err, blk_rq_bytes(req)); 846 spin_unlock_irq(&md->lock); 847 848 return err ? 0 : 1; 849} 850 851static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) 852{ 853 struct mmc_blk_data *md = mq->data; 854 struct mmc_card *card = md->queue.card; 855 int ret = 0; 856 857 ret = mmc_flush_cache(card); 858 if (ret) 859 ret = -EIO; 860 861 spin_lock_irq(&md->lock); 862 __blk_end_request_all(req, ret); 863 spin_unlock_irq(&md->lock); 864 865 return ret ? 0 : 1; 866} 867 868/* 869 * Reformat current write as a reliable write, supporting 870 * both legacy and the enhanced reliable write MMC cards. 871 * In each transfer we'll handle only as much as a single 872 * reliable write can handle, thus finish the request in 873 * partial completions. 874 */ 875static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, 876 struct mmc_card *card, 877 struct request *req) 878{ 879 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { 880 /* Legacy mode imposes restrictions on transfers. */ 881 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) 882 brq->data.blocks = 1; 883 884 if (brq->data.blocks > card->ext_csd.rel_sectors) 885 brq->data.blocks = card->ext_csd.rel_sectors; 886 else if (brq->data.blocks < card->ext_csd.rel_sectors) 887 brq->data.blocks = 1; 888 } 889} 890 891#define CMD_ERRORS \ 892 (R1_OUT_OF_RANGE | /* Command argument out of range */ \ 893 R1_ADDRESS_ERROR | /* Misaligned address */ \ 894 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ 895 R1_WP_VIOLATION | /* Tried to write to protected block */ \ 896 R1_CC_ERROR | /* Card controller error */ \ 897 R1_ERROR) /* General/unknown error */ 898 899static int mmc_blk_err_check(struct mmc_card *card, 900 struct mmc_async_req *areq) 901{ 902 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, 903 mmc_active); 904 struct mmc_blk_request *brq = &mq_mrq->brq; 905 struct request *req = mq_mrq->req; 906 int ecc_err = 0; 907 908 /* 909 * sbc.error indicates a problem with the set block count 910 * command. No data will have been transferred. 911 * 912 * cmd.error indicates a problem with the r/w command. No 913 * data will have been transferred. 914 * 915 * stop.error indicates a problem with the stop command. Data 916 * may have been transferred, or may still be transferring. 917 */ 918 if (brq->sbc.error || brq->cmd.error || brq->stop.error || 919 brq->data.error) { 920 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) { 921 case ERR_RETRY: 922 return MMC_BLK_RETRY; 923 case ERR_ABORT: 924 return MMC_BLK_ABORT; 925 case ERR_CONTINUE: 926 break; 927 } 928 } 929 930 /* 931 * Check for errors relating to the execution of the 932 * initial command - such as address errors. No data 933 * has been transferred. 934 */ 935 if (brq->cmd.resp[0] & CMD_ERRORS) { 936 pr_err("%s: r/w command failed, status = %#x\n", 937 req->rq_disk->disk_name, brq->cmd.resp[0]); 938 return MMC_BLK_ABORT; 939 } 940 941 /* 942 * Everything else is either success, or a data error of some 943 * kind. If it was a write, we may have transitioned to 944 * program mode, which we have to wait for it to complete. 945 */ 946 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { 947 u32 status; 948 do { 949 int err = get_card_status(card, &status, 5); 950 if (err) { 951 pr_err("%s: error %d requesting status\n", 952 req->rq_disk->disk_name, err); 953 return MMC_BLK_CMD_ERR; 954 } 955 /* 956 * Some cards mishandle the status bits, 957 * so make sure to check both the busy 958 * indication and the card state. 959 */ 960 } while (!(status & R1_READY_FOR_DATA) || 961 (R1_CURRENT_STATE(status) == R1_STATE_PRG)); 962 } 963 964 if (brq->data.error) { 965 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", 966 req->rq_disk->disk_name, brq->data.error, 967 (unsigned)blk_rq_pos(req), 968 (unsigned)blk_rq_sectors(req), 969 brq->cmd.resp[0], brq->stop.resp[0]); 970 971 if (rq_data_dir(req) == READ) { 972 if (ecc_err) 973 return MMC_BLK_ECC_ERR; 974 return MMC_BLK_DATA_ERR; 975 } else { 976 return MMC_BLK_CMD_ERR; 977 } 978 } 979 980 if (!brq->data.bytes_xfered) 981 return MMC_BLK_RETRY; 982 983 if (blk_rq_bytes(req) != brq->data.bytes_xfered) 984 return MMC_BLK_PARTIAL; 985 986 return MMC_BLK_SUCCESS; 987} 988 989static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 990 struct mmc_card *card, 991 int disable_multi, 992 struct mmc_queue *mq) 993{ 994 u32 readcmd, writecmd; 995 struct mmc_blk_request *brq = &mqrq->brq; 996 struct request *req = mqrq->req; 997 struct mmc_blk_data *md = mq->data; 998 999 /* 1000 * Reliable writes are used to implement Forced Unit Access and 1001 * REQ_META accesses, and are supported only on MMCs. 1002 * 1003 * XXX: this really needs a good explanation of why REQ_META 1004 * is treated special. 1005 */ 1006 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 1007 (req->cmd_flags & REQ_META)) && 1008 (rq_data_dir(req) == WRITE) && 1009 (md->flags & MMC_BLK_REL_WR); 1010 1011 memset(brq, 0, sizeof(struct mmc_blk_request)); 1012 brq->mrq.cmd = &brq->cmd; 1013 brq->mrq.data = &brq->data; 1014 1015 brq->cmd.arg = blk_rq_pos(req); 1016 if (!mmc_card_blockaddr(card)) 1017 brq->cmd.arg <<= 9; 1018 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 1019 brq->data.blksz = 512; 1020 brq->stop.opcode = MMC_STOP_TRANSMISSION; 1021 brq->stop.arg = 0; 1022 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1023 brq->data.blocks = blk_rq_sectors(req); 1024 1025 /* 1026 * The block layer doesn't support all sector count 1027 * restrictions, so we need to be prepared for too big 1028 * requests. 1029 */ 1030 if (brq->data.blocks > card->host->max_blk_count) 1031 brq->data.blocks = card->host->max_blk_count; 1032 1033 if (brq->data.blocks > 1) { 1034 /* 1035 * After a read error, we redo the request one sector 1036 * at a time in order to accurately determine which 1037 * sectors can be read successfully. 1038 */ 1039 if (disable_multi) 1040 brq->data.blocks = 1; 1041 1042 /* Some controllers can't do multiblock reads due to hw bugs */ 1043 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ && 1044 rq_data_dir(req) == READ) 1045 brq->data.blocks = 1; 1046 } 1047 1048 if (brq->data.blocks > 1 || do_rel_wr) { 1049 /* SPI multiblock writes terminate using a special 1050 * token, not a STOP_TRANSMISSION request. 1051 */ 1052 if (!mmc_host_is_spi(card->host) || 1053 rq_data_dir(req) == READ) 1054 brq->mrq.stop = &brq->stop; 1055 readcmd = MMC_READ_MULTIPLE_BLOCK; 1056 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 1057 } else { 1058 brq->mrq.stop = NULL; 1059 readcmd = MMC_READ_SINGLE_BLOCK; 1060 writecmd = MMC_WRITE_BLOCK; 1061 } 1062 if (rq_data_dir(req) == READ) { 1063 brq->cmd.opcode = readcmd; 1064 brq->data.flags |= MMC_DATA_READ; 1065 } else { 1066 brq->cmd.opcode = writecmd; 1067 brq->data.flags |= MMC_DATA_WRITE; 1068 } 1069 1070 if (do_rel_wr) 1071 mmc_apply_rel_rw(brq, card, req); 1072 1073 /* 1074 * Pre-defined multi-block transfers are preferable to 1075 * open ended-ones (and necessary for reliable writes). 1076 * However, it is not sufficient to just send CMD23, 1077 * and avoid the final CMD12, as on an error condition 1078 * CMD12 (stop) needs to be sent anyway. This, coupled 1079 * with Auto-CMD23 enhancements provided by some 1080 * hosts, means that the complexity of dealing 1081 * with this is best left to the host. If CMD23 is 1082 * supported by card and host, we'll fill sbc in and let 1083 * the host deal with handling it correctly. This means 1084 * that for hosts that don't expose MMC_CAP_CMD23, no 1085 * change of behavior will be observed. 1086 * 1087 * N.B: Some MMC cards experience perf degradation. 1088 * We'll avoid using CMD23-bounded multiblock writes for 1089 * these, while retaining features like reliable writes. 1090 */ 1091 1092 if ((md->flags & MMC_BLK_CMD23) && 1093 mmc_op_multi(brq->cmd.opcode) && 1094 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { 1095 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; 1096 brq->sbc.arg = brq->data.blocks | 1097 (do_rel_wr ? (1 << 31) : 0); 1098 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 1099 brq->mrq.sbc = &brq->sbc; 1100 } 1101 1102 mmc_set_data_timeout(&brq->data, card); 1103 1104 brq->data.sg = mqrq->sg; 1105 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); 1106 1107 /* 1108 * Adjust the sg list so it is the same size as the 1109 * request. 1110 */ 1111 if (brq->data.blocks != blk_rq_sectors(req)) { 1112 int i, data_size = brq->data.blocks << 9; 1113 struct scatterlist *sg; 1114 1115 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { 1116 data_size -= sg->length; 1117 if (data_size <= 0) { 1118 sg->length += data_size; 1119 i++; 1120 break; 1121 } 1122 } 1123 brq->data.sg_len = i; 1124 } 1125 1126 mqrq->mmc_active.mrq = &brq->mrq; 1127 mqrq->mmc_active.err_check = mmc_blk_err_check; 1128 1129 mmc_queue_bounce_pre(mqrq); 1130} 1131 1132static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, 1133 struct mmc_blk_request *brq, struct request *req, 1134 int ret) 1135{ 1136 /* 1137 * If this is an SD card and we're writing, we can first 1138 * mark the known good sectors as ok. 1139 * 1140 * If the card is not SD, we can still ok written sectors 1141 * as reported by the controller (which might be less than 1142 * the real number of written sectors, but never more). 1143 */ 1144 if (mmc_card_sd(card)) { 1145 u32 blocks; 1146 1147 blocks = mmc_sd_num_wr_blocks(card); 1148 if (blocks != (u32)-1) { 1149 spin_lock_irq(&md->lock); 1150 ret = __blk_end_request(req, 0, blocks << 9); 1151 spin_unlock_irq(&md->lock); 1152 } 1153 } else { 1154 spin_lock_irq(&md->lock); 1155 ret = __blk_end_request(req, 0, brq->data.bytes_xfered); 1156 spin_unlock_irq(&md->lock); 1157 } 1158 return ret; 1159} 1160 1161static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) 1162{ 1163 struct mmc_blk_data *md = mq->data; 1164 struct mmc_card *card = md->queue.card; 1165 struct mmc_blk_request *brq = &mq->mqrq_cur->brq; 1166 int ret = 1, disable_multi = 0, retry = 0, type; 1167 enum mmc_blk_status status; 1168 struct mmc_queue_req *mq_rq; 1169 struct request *req; 1170 struct mmc_async_req *areq; 1171 1172 if (!rqc && !mq->mqrq_prev->req) 1173 return 0; 1174 1175 do { 1176 if (rqc) { 1177 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1178 areq = &mq->mqrq_cur->mmc_active; 1179 } else 1180 areq = NULL; 1181 areq = mmc_start_req(card->host, areq, (int *) &status); 1182 if (!areq) 1183 return 0; 1184 1185 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); 1186 brq = &mq_rq->brq; 1187 req = mq_rq->req; 1188 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1189 mmc_queue_bounce_post(mq_rq); 1190 1191 switch (status) { 1192 case MMC_BLK_SUCCESS: 1193 case MMC_BLK_PARTIAL: 1194 /* 1195 * A block was successfully transferred. 1196 */ 1197 mmc_blk_reset_success(md, type); 1198 spin_lock_irq(&md->lock); 1199 ret = __blk_end_request(req, 0, 1200 brq->data.bytes_xfered); 1201 spin_unlock_irq(&md->lock); 1202 /* 1203 * If the blk_end_request function returns non-zero even 1204 * though all data has been transferred and no errors 1205 * were returned by the host controller, it's a bug. 1206 */ 1207 if (status == MMC_BLK_SUCCESS && ret) { 1208 pr_err("%s BUG rq_tot %d d_xfer %d\n", 1209 __func__, blk_rq_bytes(req), 1210 brq->data.bytes_xfered); 1211 rqc = NULL; 1212 goto cmd_abort; 1213 } 1214 break; 1215 case MMC_BLK_CMD_ERR: 1216 ret = mmc_blk_cmd_err(md, card, brq, req, ret); 1217 if (!mmc_blk_reset(md, card->host, type)) 1218 break; 1219 goto cmd_abort; 1220 case MMC_BLK_RETRY: 1221 if (retry++ < 5) 1222 break; 1223 /* Fall through */ 1224 case MMC_BLK_ABORT: 1225 if (!mmc_blk_reset(md, card->host, type)) 1226 break; 1227 goto cmd_abort; 1228 case MMC_BLK_DATA_ERR: { 1229 int err; 1230 1231 err = mmc_blk_reset(md, card->host, type); 1232 if (!err) 1233 break; 1234 if (err == -ENODEV) 1235 goto cmd_abort; 1236 /* Fall through */ 1237 } 1238 case MMC_BLK_ECC_ERR: 1239 if (brq->data.blocks > 1) { 1240 /* Redo read one sector at a time */ 1241 pr_warning("%s: retrying using single block read\n", 1242 req->rq_disk->disk_name); 1243 disable_multi = 1; 1244 break; 1245 } 1246 /* 1247 * After an error, we redo I/O one sector at a 1248 * time, so we only reach here after trying to 1249 * read a single sector. 1250 */ 1251 spin_lock_irq(&md->lock); 1252 ret = __blk_end_request(req, -EIO, 1253 brq->data.blksz); 1254 spin_unlock_irq(&md->lock); 1255 if (!ret) 1256 goto start_new_req; 1257 break; 1258 } 1259 1260 if (ret) { 1261 /* 1262 * In case of a incomplete request 1263 * prepare it again and resend. 1264 */ 1265 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); 1266 mmc_start_req(card->host, &mq_rq->mmc_active, NULL); 1267 } 1268 } while (ret); 1269 1270 return 1; 1271 1272 cmd_abort: 1273 spin_lock_irq(&md->lock); 1274 while (ret) 1275 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 1276 spin_unlock_irq(&md->lock); 1277 1278 start_new_req: 1279 if (rqc) { 1280 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1281 mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); 1282 } 1283 1284 return 0; 1285} 1286 1287static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 1288{ 1289 int ret; 1290 struct mmc_blk_data *md = mq->data; 1291 struct mmc_card *card = md->queue.card; 1292 1293 if (req && !mq->mqrq_prev->req) 1294 /* claim host only for the first request */ 1295 mmc_claim_host(card->host); 1296 1297 ret = mmc_blk_part_switch(card, md); 1298 if (ret) { 1299 if (req) { 1300 spin_lock_irq(&md->lock); 1301 __blk_end_request_all(req, -EIO); 1302 spin_unlock_irq(&md->lock); 1303 } 1304 ret = 0; 1305 goto out; 1306 } 1307 1308 if (req && req->cmd_flags & REQ_DISCARD) { 1309 /* complete ongoing async transfer before issuing discard */ 1310 if (card->host->areq) 1311 mmc_blk_issue_rw_rq(mq, NULL); 1312 if (req->cmd_flags & REQ_SECURE) 1313 ret = mmc_blk_issue_secdiscard_rq(mq, req); 1314 else 1315 ret = mmc_blk_issue_discard_rq(mq, req); 1316 } else if (req && req->cmd_flags & REQ_FLUSH) { 1317 /* complete ongoing async transfer before issuing flush */ 1318 if (card->host->areq) 1319 mmc_blk_issue_rw_rq(mq, NULL); 1320 ret = mmc_blk_issue_flush(mq, req); 1321 } else { 1322 ret = mmc_blk_issue_rw_rq(mq, req); 1323 } 1324 1325out: 1326 if (!req) 1327 /* release host only when there are no more requests */ 1328 mmc_release_host(card->host); 1329 return ret; 1330} 1331 1332static inline int mmc_blk_readonly(struct mmc_card *card) 1333{ 1334 return mmc_card_readonly(card) || 1335 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 1336} 1337 1338static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, 1339 struct device *parent, 1340 sector_t size, 1341 bool default_ro, 1342 const char *subname) 1343{ 1344 struct mmc_blk_data *md; 1345 int devidx, ret; 1346 1347 devidx = find_first_zero_bit(dev_use, max_devices); 1348 if (devidx >= max_devices) 1349 return ERR_PTR(-ENOSPC); 1350 __set_bit(devidx, dev_use); 1351 1352 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); 1353 if (!md) { 1354 ret = -ENOMEM; 1355 goto out; 1356 } 1357 1358 /* 1359 * !subname implies we are creating main mmc_blk_data that will be 1360 * associated with mmc_card with mmc_set_drvdata. Due to device 1361 * partitions, devidx will not coincide with a per-physical card 1362 * index anymore so we keep track of a name index. 1363 */ 1364 if (!subname) { 1365 md->name_idx = find_first_zero_bit(name_use, max_devices); 1366 __set_bit(md->name_idx, name_use); 1367 } 1368 else 1369 md->name_idx = ((struct mmc_blk_data *) 1370 dev_to_disk(parent)->private_data)->name_idx; 1371 1372 /* 1373 * Set the read-only status based on the supported commands 1374 * and the write protect switch. 1375 */ 1376 md->read_only = mmc_blk_readonly(card); 1377 1378 md->disk = alloc_disk(perdev_minors); 1379 if (md->disk == NULL) { 1380 ret = -ENOMEM; 1381 goto err_kfree; 1382 } 1383 1384 spin_lock_init(&md->lock); 1385 INIT_LIST_HEAD(&md->part); 1386 md->usage = 1; 1387 1388 ret = mmc_init_queue(&md->queue, card, &md->lock, subname); 1389 if (ret) 1390 goto err_putdisk; 1391 1392 md->queue.issue_fn = mmc_blk_issue_rq; 1393 md->queue.data = md; 1394 1395 md->disk->major = MMC_BLOCK_MAJOR; 1396 md->disk->first_minor = devidx * perdev_minors; 1397 md->disk->fops = &mmc_bdops; 1398 md->disk->private_data = md; 1399 md->disk->queue = md->queue.queue; 1400 md->disk->driverfs_dev = parent; 1401 set_disk_ro(md->disk, md->read_only || default_ro); 1402 1403 /* 1404 * As discussed on lkml, GENHD_FL_REMOVABLE should: 1405 * 1406 * - be set for removable media with permanent block devices 1407 * - be unset for removable block devices with permanent media 1408 * 1409 * Since MMC block devices clearly fall under the second 1410 * case, we do not set GENHD_FL_REMOVABLE. Userspace 1411 * should use the block device creation/destruction hotplug 1412 * messages to tell when the card is present. 1413 */ 1414 1415 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 1416 "mmcblk%d%s", md->name_idx, subname ? subname : ""); 1417 1418 blk_queue_logical_block_size(md->queue.queue, 512); 1419 set_capacity(md->disk, size); 1420 1421 if (mmc_host_cmd23(card->host)) { 1422 if (mmc_card_mmc(card) || 1423 (mmc_card_sd(card) && 1424 card->scr.cmds & SD_SCR_CMD23_SUPPORT)) 1425 md->flags |= MMC_BLK_CMD23; 1426 } 1427 1428 if (mmc_card_mmc(card) && 1429 md->flags & MMC_BLK_CMD23 && 1430 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || 1431 card->ext_csd.rel_sectors)) { 1432 md->flags |= MMC_BLK_REL_WR; 1433 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); 1434 } 1435 1436 return md; 1437 1438 err_putdisk: 1439 put_disk(md->disk); 1440 err_kfree: 1441 kfree(md); 1442 out: 1443 return ERR_PTR(ret); 1444} 1445 1446static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 1447{ 1448 sector_t size; 1449 struct mmc_blk_data *md; 1450 1451 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 1452 /* 1453 * The EXT_CSD sector count is in number or 512 byte 1454 * sectors. 1455 */ 1456 size = card->ext_csd.sectors; 1457 } else { 1458 /* 1459 * The CSD capacity field is in units of read_blkbits. 1460 * set_capacity takes units of 512 bytes. 1461 */ 1462 size = card->csd.capacity << (card->csd.read_blkbits - 9); 1463 } 1464 1465 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL); 1466 return md; 1467} 1468 1469static int mmc_blk_alloc_part(struct mmc_card *card, 1470 struct mmc_blk_data *md, 1471 unsigned int part_type, 1472 sector_t size, 1473 bool default_ro, 1474 const char *subname) 1475{ 1476 char cap_str[10]; 1477 struct mmc_blk_data *part_md; 1478 1479 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, 1480 subname); 1481 if (IS_ERR(part_md)) 1482 return PTR_ERR(part_md); 1483 part_md->part_type = part_type; 1484 list_add(&part_md->part, &md->part); 1485 1486 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, 1487 cap_str, sizeof(cap_str)); 1488 pr_info("%s: %s %s partition %u %s\n", 1489 part_md->disk->disk_name, mmc_card_id(card), 1490 mmc_card_name(card), part_md->part_type, cap_str); 1491 return 0; 1492} 1493 1494/* MMC Physical partitions consist of two boot partitions and 1495 * up to four general purpose partitions. 1496 * For each partition enabled in EXT_CSD a block device will be allocatedi 1497 * to provide access to the partition. 1498 */ 1499 1500static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) 1501{ 1502 int idx, ret = 0; 1503 1504 if (!mmc_card_mmc(card)) 1505 return 0; 1506 1507 for (idx = 0; idx < card->nr_parts; idx++) { 1508 if (card->part[idx].size) { 1509 ret = mmc_blk_alloc_part(card, md, 1510 card->part[idx].part_cfg, 1511 card->part[idx].size >> 9, 1512 card->part[idx].force_ro, 1513 card->part[idx].name); 1514 if (ret) 1515 return ret; 1516 } 1517 } 1518 1519 return ret; 1520} 1521 1522static int 1523mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) 1524{ 1525 int err; 1526 1527 mmc_claim_host(card->host); 1528 err = mmc_set_blocklen(card, 512); 1529 mmc_release_host(card->host); 1530 1531 if (err) { 1532 pr_err("%s: unable to set block size to 512: %d\n", 1533 md->disk->disk_name, err); 1534 return -EINVAL; 1535 } 1536 1537 return 0; 1538} 1539 1540static void mmc_blk_remove_req(struct mmc_blk_data *md) 1541{ 1542 if (md) { 1543 if (md->disk->flags & GENHD_FL_UP) { 1544 device_remove_file(disk_to_dev(md->disk), &md->force_ro); 1545 1546 /* Stop new requests from getting into the queue */ 1547 del_gendisk(md->disk); 1548 } 1549 1550 /* Then flush out any already in there */ 1551 mmc_cleanup_queue(&md->queue); 1552 mmc_blk_put(md); 1553 } 1554} 1555 1556static void mmc_blk_remove_parts(struct mmc_card *card, 1557 struct mmc_blk_data *md) 1558{ 1559 struct list_head *pos, *q; 1560 struct mmc_blk_data *part_md; 1561 1562 __clear_bit(md->name_idx, name_use); 1563 list_for_each_safe(pos, q, &md->part) { 1564 part_md = list_entry(pos, struct mmc_blk_data, part); 1565 list_del(pos); 1566 mmc_blk_remove_req(part_md); 1567 } 1568} 1569 1570static int mmc_add_disk(struct mmc_blk_data *md) 1571{ 1572 int ret; 1573 1574 add_disk(md->disk); 1575 md->force_ro.show = force_ro_show; 1576 md->force_ro.store = force_ro_store; 1577 sysfs_attr_init(&md->force_ro.attr); 1578 md->force_ro.attr.name = "force_ro"; 1579 md->force_ro.attr.mode = S_IRUGO | S_IWUSR; 1580 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); 1581 if (ret) 1582 del_gendisk(md->disk); 1583 1584 return ret; 1585} 1586 1587static const struct mmc_fixup blk_fixups[] = 1588{ 1589 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1590 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1591 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1592 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1593 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1594 1595 /* 1596 * Some MMC cards experience performance degradation with CMD23 1597 * instead of CMD12-bounded multiblock transfers. For now we'll 1598 * black list what's bad... 1599 * - Certain Toshiba cards. 1600 * 1601 * N.B. This doesn't affect SD cards. 1602 */ 1603 MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc, 1604 MMC_QUIRK_BLK_NO_CMD23), 1605 MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc, 1606 MMC_QUIRK_BLK_NO_CMD23), 1607 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc, 1608 MMC_QUIRK_BLK_NO_CMD23), 1609 1610 /* 1611 * Some Micron MMC cards needs longer data read timeout than 1612 * indicated in CSD. 1613 */ 1614 MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc, 1615 MMC_QUIRK_LONG_READ_TIME), 1616 1617 END_FIXUP 1618}; 1619 1620static int mmc_blk_probe(struct mmc_card *card) 1621{ 1622 struct mmc_blk_data *md, *part_md; 1623 int err; 1624 char cap_str[10]; 1625 1626 /* 1627 * Check that the card supports the command class(es) we need. 1628 */ 1629 if (!(card->csd.cmdclass & CCC_BLOCK_READ)) 1630 return -ENODEV; 1631 1632 md = mmc_blk_alloc(card); 1633 if (IS_ERR(md)) 1634 return PTR_ERR(md); 1635 1636 err = mmc_blk_set_blksize(md, card); 1637 if (err) 1638 goto out; 1639 1640 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, 1641 cap_str, sizeof(cap_str)); 1642 pr_info("%s: %s %s %s %s\n", 1643 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 1644 cap_str, md->read_only ? "(ro)" : ""); 1645 1646 if (mmc_blk_alloc_parts(card, md)) 1647 goto out; 1648 1649 mmc_set_drvdata(card, md); 1650 mmc_fixup_device(card, blk_fixups); 1651 1652 if (mmc_add_disk(md)) 1653 goto out; 1654 1655 list_for_each_entry(part_md, &md->part, part) { 1656 if (mmc_add_disk(part_md)) 1657 goto out; 1658 } 1659 return 0; 1660 1661 out: 1662 mmc_blk_remove_parts(card, md); 1663 mmc_blk_remove_req(md); 1664 return err; 1665} 1666 1667static void mmc_blk_remove(struct mmc_card *card) 1668{ 1669 struct mmc_blk_data *md = mmc_get_drvdata(card); 1670 1671 mmc_blk_remove_parts(card, md); 1672 mmc_claim_host(card->host); 1673 mmc_blk_part_switch(card, md); 1674 mmc_release_host(card->host); 1675 mmc_blk_remove_req(md); 1676 mmc_set_drvdata(card, NULL); 1677} 1678 1679#ifdef CONFIG_PM 1680static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) 1681{ 1682 struct mmc_blk_data *part_md; 1683 struct mmc_blk_data *md = mmc_get_drvdata(card); 1684 1685 if (md) { 1686 mmc_queue_suspend(&md->queue); 1687 list_for_each_entry(part_md, &md->part, part) { 1688 mmc_queue_suspend(&part_md->queue); 1689 } 1690 } 1691 return 0; 1692} 1693 1694static int mmc_blk_resume(struct mmc_card *card) 1695{ 1696 struct mmc_blk_data *part_md; 1697 struct mmc_blk_data *md = mmc_get_drvdata(card); 1698 1699 if (md) { 1700 mmc_blk_set_blksize(md, card); 1701 1702 /* 1703 * Resume involves the card going into idle state, 1704 * so current partition is always the main one. 1705 */ 1706 md->part_curr = md->part_type; 1707 mmc_queue_resume(&md->queue); 1708 list_for_each_entry(part_md, &md->part, part) { 1709 mmc_queue_resume(&part_md->queue); 1710 } 1711 } 1712 return 0; 1713} 1714#else 1715#define mmc_blk_suspend NULL 1716#define mmc_blk_resume NULL 1717#endif 1718 1719static struct mmc_driver mmc_driver = { 1720 .drv = { 1721 .name = "mmcblk", 1722 }, 1723 .probe = mmc_blk_probe, 1724 .remove = mmc_blk_remove, 1725 .suspend = mmc_blk_suspend, 1726 .resume = mmc_blk_resume, 1727}; 1728 1729static int __init mmc_blk_init(void) 1730{ 1731 int res; 1732 1733 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) 1734 pr_info("mmcblk: using %d minors per device\n", perdev_minors); 1735 1736 max_devices = 256 / perdev_minors; 1737 1738 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 1739 if (res) 1740 goto out; 1741 1742 res = mmc_register_driver(&mmc_driver); 1743 if (res) 1744 goto out2; 1745 1746 return 0; 1747 out2: 1748 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 1749 out: 1750 return res; 1751} 1752 1753static void __exit mmc_blk_exit(void) 1754{ 1755 mmc_unregister_driver(&mmc_driver); 1756 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 1757} 1758 1759module_init(mmc_blk_init); 1760module_exit(mmc_blk_exit); 1761 1762MODULE_LICENSE("GPL"); 1763MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); 1764 1765