block.c revision f5b4d71f72c5f08c2e1d0af68ef881f85537e7a1
1/* 2 * Block driver for media (i.e., flash cards) 3 * 4 * Copyright 2002 Hewlett-Packard Company 5 * Copyright 2005-2008 Pierre Ossman 6 * 7 * Use consistent with the GNU GPL is permitted, 8 * provided that this copyright notice is 9 * preserved in its entirety in all copies and derived works. 10 * 11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, 12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS 13 * FITNESS FOR ANY PARTICULAR PURPOSE. 14 * 15 * Many thanks to Alessandro Rubini and Jonathan Corbet! 16 * 17 * Author: Andrew Christian 18 * 28 May 2002 19 */ 20#include <linux/moduleparam.h> 21#include <linux/module.h> 22#include <linux/init.h> 23 24#include <linux/kernel.h> 25#include <linux/fs.h> 26#include <linux/slab.h> 27#include <linux/errno.h> 28#include <linux/hdreg.h> 29#include <linux/kdev_t.h> 30#include <linux/blkdev.h> 31#include <linux/mutex.h> 32#include <linux/scatterlist.h> 33#include <linux/string_helpers.h> 34#include <linux/delay.h> 35#include <linux/capability.h> 36#include <linux/compat.h> 37#include <linux/pm_runtime.h> 38 39#include <linux/mmc/ioctl.h> 40#include <linux/mmc/card.h> 41#include <linux/mmc/host.h> 42#include <linux/mmc/mmc.h> 43#include <linux/mmc/sd.h> 44 45#include <asm/uaccess.h> 46 47#include "queue.h" 48 49MODULE_ALIAS("mmc:block"); 50#ifdef MODULE_PARAM_PREFIX 51#undef MODULE_PARAM_PREFIX 52#endif 53#define MODULE_PARAM_PREFIX "mmcblk." 54 55#define INAND_CMD38_ARG_EXT_CSD 113 56#define INAND_CMD38_ARG_ERASE 0x00 57#define INAND_CMD38_ARG_TRIM 0x01 58#define INAND_CMD38_ARG_SECERASE 0x80 59#define INAND_CMD38_ARG_SECTRIM1 0x81 60#define INAND_CMD38_ARG_SECTRIM2 0x88 61#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ 62#define MMC_SANITIZE_REQ_TIMEOUT 240000 63#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 64 65#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ 66 (req->cmd_flags & REQ_META)) && \ 67 (rq_data_dir(req) == WRITE)) 68#define PACKED_CMD_VER 0x01 69#define PACKED_CMD_WR 0x02 70 71static DEFINE_MUTEX(block_mutex); 72 73/* 74 * The defaults come from config options but can be overriden by module 75 * or bootarg options. 76 */ 77static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; 78 79/* 80 * We've only got one major, so number of mmcblk devices is 81 * limited to 256 / number of minors per device. 82 */ 83static int max_devices; 84 85/* 256 minors, so at most 256 separate devices */ 86static DECLARE_BITMAP(dev_use, 256); 87static DECLARE_BITMAP(name_use, 256); 88 89/* 90 * There is one mmc_blk_data per slot. 91 */ 92struct mmc_blk_data { 93 spinlock_t lock; 94 struct gendisk *disk; 95 struct mmc_queue queue; 96 struct list_head part; 97 98 unsigned int flags; 99#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ 100#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ 101#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */ 102 103 unsigned int usage; 104 unsigned int read_only; 105 unsigned int part_type; 106 unsigned int name_idx; 107 unsigned int reset_done; 108#define MMC_BLK_READ BIT(0) 109#define MMC_BLK_WRITE BIT(1) 110#define MMC_BLK_DISCARD BIT(2) 111#define MMC_BLK_SECDISCARD BIT(3) 112 113 /* 114 * Only set in main mmc_blk_data associated 115 * with mmc_card with mmc_set_drvdata, and keeps 116 * track of the current selected device partition. 117 */ 118 unsigned int part_curr; 119 struct device_attribute force_ro; 120 struct device_attribute power_ro_lock; 121 int area_type; 122}; 123 124static DEFINE_MUTEX(open_lock); 125 126enum { 127 MMC_PACKED_NR_IDX = -1, 128 MMC_PACKED_NR_ZERO, 129 MMC_PACKED_NR_SINGLE, 130}; 131 132module_param(perdev_minors, int, 0444); 133MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 134 135static inline int mmc_blk_part_switch(struct mmc_card *card, 136 struct mmc_blk_data *md); 137static int get_card_status(struct mmc_card *card, u32 *status, int retries); 138 139static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq) 140{ 141 struct mmc_packed *packed = mqrq->packed; 142 143 BUG_ON(!packed); 144 145 mqrq->cmd_type = MMC_PACKED_NONE; 146 packed->nr_entries = MMC_PACKED_NR_ZERO; 147 packed->idx_failure = MMC_PACKED_NR_IDX; 148 packed->retries = 0; 149 packed->blocks = 0; 150} 151 152static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 153{ 154 struct mmc_blk_data *md; 155 156 mutex_lock(&open_lock); 157 md = disk->private_data; 158 if (md && md->usage == 0) 159 md = NULL; 160 if (md) 161 md->usage++; 162 mutex_unlock(&open_lock); 163 164 return md; 165} 166 167static inline int mmc_get_devidx(struct gendisk *disk) 168{ 169 int devmaj = MAJOR(disk_devt(disk)); 170 int devidx = MINOR(disk_devt(disk)) / perdev_minors; 171 172 if (!devmaj) 173 devidx = disk->first_minor / perdev_minors; 174 return devidx; 175} 176 177static void mmc_blk_put(struct mmc_blk_data *md) 178{ 179 mutex_lock(&open_lock); 180 md->usage--; 181 if (md->usage == 0) { 182 int devidx = mmc_get_devidx(md->disk); 183 blk_cleanup_queue(md->queue.queue); 184 185 __clear_bit(devidx, dev_use); 186 187 put_disk(md->disk); 188 kfree(md); 189 } 190 mutex_unlock(&open_lock); 191} 192 193static ssize_t power_ro_lock_show(struct device *dev, 194 struct device_attribute *attr, char *buf) 195{ 196 int ret; 197 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 198 struct mmc_card *card = md->queue.card; 199 int locked = 0; 200 201 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) 202 locked = 2; 203 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) 204 locked = 1; 205 206 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); 207 208 return ret; 209} 210 211static ssize_t power_ro_lock_store(struct device *dev, 212 struct device_attribute *attr, const char *buf, size_t count) 213{ 214 int ret; 215 struct mmc_blk_data *md, *part_md; 216 struct mmc_card *card; 217 unsigned long set; 218 219 if (kstrtoul(buf, 0, &set)) 220 return -EINVAL; 221 222 if (set != 1) 223 return count; 224 225 md = mmc_blk_get(dev_to_disk(dev)); 226 card = md->queue.card; 227 228 mmc_get_card(card); 229 230 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 231 card->ext_csd.boot_ro_lock | 232 EXT_CSD_BOOT_WP_B_PWR_WP_EN, 233 card->ext_csd.part_time); 234 if (ret) 235 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); 236 else 237 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; 238 239 mmc_put_card(card); 240 241 if (!ret) { 242 pr_info("%s: Locking boot partition ro until next power on\n", 243 md->disk->disk_name); 244 set_disk_ro(md->disk, 1); 245 246 list_for_each_entry(part_md, &md->part, part) 247 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { 248 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); 249 set_disk_ro(part_md->disk, 1); 250 } 251 } 252 253 mmc_blk_put(md); 254 return count; 255} 256 257static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 258 char *buf) 259{ 260 int ret; 261 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 262 263 ret = snprintf(buf, PAGE_SIZE, "%d", 264 get_disk_ro(dev_to_disk(dev)) ^ 265 md->read_only); 266 mmc_blk_put(md); 267 return ret; 268} 269 270static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 271 const char *buf, size_t count) 272{ 273 int ret; 274 char *end; 275 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 276 unsigned long set = simple_strtoul(buf, &end, 0); 277 if (end == buf) { 278 ret = -EINVAL; 279 goto out; 280 } 281 282 set_disk_ro(dev_to_disk(dev), set || md->read_only); 283 ret = count; 284out: 285 mmc_blk_put(md); 286 return ret; 287} 288 289static int mmc_blk_open(struct block_device *bdev, fmode_t mode) 290{ 291 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 292 int ret = -ENXIO; 293 294 mutex_lock(&block_mutex); 295 if (md) { 296 if (md->usage == 2) 297 check_disk_change(bdev); 298 ret = 0; 299 300 if ((mode & FMODE_WRITE) && md->read_only) { 301 mmc_blk_put(md); 302 ret = -EROFS; 303 } 304 } 305 mutex_unlock(&block_mutex); 306 307 return ret; 308} 309 310static void mmc_blk_release(struct gendisk *disk, fmode_t mode) 311{ 312 struct mmc_blk_data *md = disk->private_data; 313 314 mutex_lock(&block_mutex); 315 mmc_blk_put(md); 316 mutex_unlock(&block_mutex); 317} 318 319static int 320mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 321{ 322 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); 323 geo->heads = 4; 324 geo->sectors = 16; 325 return 0; 326} 327 328struct mmc_blk_ioc_data { 329 struct mmc_ioc_cmd ic; 330 unsigned char *buf; 331 u64 buf_bytes; 332}; 333 334static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( 335 struct mmc_ioc_cmd __user *user) 336{ 337 struct mmc_blk_ioc_data *idata; 338 int err; 339 340 idata = kzalloc(sizeof(*idata), GFP_KERNEL); 341 if (!idata) { 342 err = -ENOMEM; 343 goto out; 344 } 345 346 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { 347 err = -EFAULT; 348 goto idata_err; 349 } 350 351 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; 352 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { 353 err = -EOVERFLOW; 354 goto idata_err; 355 } 356 357 if (!idata->buf_bytes) 358 return idata; 359 360 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); 361 if (!idata->buf) { 362 err = -ENOMEM; 363 goto idata_err; 364 } 365 366 if (copy_from_user(idata->buf, (void __user *)(unsigned long) 367 idata->ic.data_ptr, idata->buf_bytes)) { 368 err = -EFAULT; 369 goto copy_err; 370 } 371 372 return idata; 373 374copy_err: 375 kfree(idata->buf); 376idata_err: 377 kfree(idata); 378out: 379 return ERR_PTR(err); 380} 381 382static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, 383 u32 retries_max) 384{ 385 int err; 386 u32 retry_count = 0; 387 388 if (!status || !retries_max) 389 return -EINVAL; 390 391 do { 392 err = get_card_status(card, status, 5); 393 if (err) 394 break; 395 396 if (!R1_STATUS(*status) && 397 (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) 398 break; /* RPMB programming operation complete */ 399 400 /* 401 * Rechedule to give the MMC device a chance to continue 402 * processing the previous command without being polled too 403 * frequently. 404 */ 405 usleep_range(1000, 5000); 406 } while (++retry_count < retries_max); 407 408 if (retry_count == retries_max) 409 err = -EPERM; 410 411 return err; 412} 413 414static int ioctl_do_sanitize(struct mmc_card *card) 415{ 416 int err; 417 418 if (!mmc_can_sanitize(card)) { 419 pr_warn("%s: %s - SANITIZE is not supported\n", 420 mmc_hostname(card->host), __func__); 421 err = -EOPNOTSUPP; 422 goto out; 423 } 424 425 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", 426 mmc_hostname(card->host), __func__); 427 428 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 429 EXT_CSD_SANITIZE_START, 1, 430 MMC_SANITIZE_REQ_TIMEOUT); 431 432 if (err) 433 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", 434 mmc_hostname(card->host), __func__, err); 435 436 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), 437 __func__); 438out: 439 return err; 440} 441 442static int mmc_blk_ioctl_cmd(struct block_device *bdev, 443 struct mmc_ioc_cmd __user *ic_ptr) 444{ 445 struct mmc_blk_ioc_data *idata; 446 struct mmc_blk_data *md; 447 struct mmc_card *card; 448 struct mmc_command cmd = {0}; 449 struct mmc_data data = {0}; 450 struct mmc_request mrq = {NULL}; 451 struct scatterlist sg; 452 int err; 453 int is_rpmb = false; 454 u32 status = 0; 455 456 /* 457 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 458 * whole block device, not on a partition. This prevents overspray 459 * between sibling partitions. 460 */ 461 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) 462 return -EPERM; 463 464 idata = mmc_blk_ioctl_copy_from_user(ic_ptr); 465 if (IS_ERR(idata)) 466 return PTR_ERR(idata); 467 468 md = mmc_blk_get(bdev->bd_disk); 469 if (!md) { 470 err = -EINVAL; 471 goto cmd_err; 472 } 473 474 if (md->area_type & MMC_BLK_DATA_AREA_RPMB) 475 is_rpmb = true; 476 477 card = md->queue.card; 478 if (IS_ERR(card)) { 479 err = PTR_ERR(card); 480 goto cmd_done; 481 } 482 483 cmd.opcode = idata->ic.opcode; 484 cmd.arg = idata->ic.arg; 485 cmd.flags = idata->ic.flags; 486 487 if (idata->buf_bytes) { 488 data.sg = &sg; 489 data.sg_len = 1; 490 data.blksz = idata->ic.blksz; 491 data.blocks = idata->ic.blocks; 492 493 sg_init_one(data.sg, idata->buf, idata->buf_bytes); 494 495 if (idata->ic.write_flag) 496 data.flags = MMC_DATA_WRITE; 497 else 498 data.flags = MMC_DATA_READ; 499 500 /* data.flags must already be set before doing this. */ 501 mmc_set_data_timeout(&data, card); 502 503 /* Allow overriding the timeout_ns for empirical tuning. */ 504 if (idata->ic.data_timeout_ns) 505 data.timeout_ns = idata->ic.data_timeout_ns; 506 507 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { 508 /* 509 * Pretend this is a data transfer and rely on the 510 * host driver to compute timeout. When all host 511 * drivers support cmd.cmd_timeout for R1B, this 512 * can be changed to: 513 * 514 * mrq.data = NULL; 515 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; 516 */ 517 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; 518 } 519 520 mrq.data = &data; 521 } 522 523 mrq.cmd = &cmd; 524 525 mmc_get_card(card); 526 527 err = mmc_blk_part_switch(card, md); 528 if (err) 529 goto cmd_rel_host; 530 531 if (idata->ic.is_acmd) { 532 err = mmc_app_cmd(card->host, card); 533 if (err) 534 goto cmd_rel_host; 535 } 536 537 if (is_rpmb) { 538 err = mmc_set_blockcount(card, data.blocks, 539 idata->ic.write_flag & (1 << 31)); 540 if (err) 541 goto cmd_rel_host; 542 } 543 544 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && 545 (cmd.opcode == MMC_SWITCH)) { 546 err = ioctl_do_sanitize(card); 547 548 if (err) 549 pr_err("%s: ioctl_do_sanitize() failed. err = %d", 550 __func__, err); 551 552 goto cmd_rel_host; 553 } 554 555 mmc_wait_for_req(card->host, &mrq); 556 557 if (cmd.error) { 558 dev_err(mmc_dev(card->host), "%s: cmd error %d\n", 559 __func__, cmd.error); 560 err = cmd.error; 561 goto cmd_rel_host; 562 } 563 if (data.error) { 564 dev_err(mmc_dev(card->host), "%s: data error %d\n", 565 __func__, data.error); 566 err = data.error; 567 goto cmd_rel_host; 568 } 569 570 /* 571 * According to the SD specs, some commands require a delay after 572 * issuing the command. 573 */ 574 if (idata->ic.postsleep_min_us) 575 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); 576 577 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { 578 err = -EFAULT; 579 goto cmd_rel_host; 580 } 581 582 if (!idata->ic.write_flag) { 583 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr, 584 idata->buf, idata->buf_bytes)) { 585 err = -EFAULT; 586 goto cmd_rel_host; 587 } 588 } 589 590 if (is_rpmb) { 591 /* 592 * Ensure RPMB command has completed by polling CMD13 593 * "Send Status". 594 */ 595 err = ioctl_rpmb_card_status_poll(card, &status, 5); 596 if (err) 597 dev_err(mmc_dev(card->host), 598 "%s: Card Status=0x%08X, error %d\n", 599 __func__, status, err); 600 } 601 602cmd_rel_host: 603 mmc_put_card(card); 604 605cmd_done: 606 mmc_blk_put(md); 607cmd_err: 608 kfree(idata->buf); 609 kfree(idata); 610 return err; 611} 612 613static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, 614 unsigned int cmd, unsigned long arg) 615{ 616 int ret = -EINVAL; 617 if (cmd == MMC_IOC_CMD) 618 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); 619 return ret; 620} 621 622#ifdef CONFIG_COMPAT 623static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, 624 unsigned int cmd, unsigned long arg) 625{ 626 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); 627} 628#endif 629 630static const struct block_device_operations mmc_bdops = { 631 .open = mmc_blk_open, 632 .release = mmc_blk_release, 633 .getgeo = mmc_blk_getgeo, 634 .owner = THIS_MODULE, 635 .ioctl = mmc_blk_ioctl, 636#ifdef CONFIG_COMPAT 637 .compat_ioctl = mmc_blk_compat_ioctl, 638#endif 639}; 640 641static inline int mmc_blk_part_switch(struct mmc_card *card, 642 struct mmc_blk_data *md) 643{ 644 int ret; 645 struct mmc_blk_data *main_md = mmc_get_drvdata(card); 646 647 if (main_md->part_curr == md->part_type) 648 return 0; 649 650 if (mmc_card_mmc(card)) { 651 u8 part_config = card->ext_csd.part_config; 652 653 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 654 part_config |= md->part_type; 655 656 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 657 EXT_CSD_PART_CONFIG, part_config, 658 card->ext_csd.part_time); 659 if (ret) 660 return ret; 661 662 card->ext_csd.part_config = part_config; 663 } 664 665 main_md->part_curr = md->part_type; 666 return 0; 667} 668 669static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) 670{ 671 int err; 672 u32 result; 673 __be32 *blocks; 674 675 struct mmc_request mrq = {NULL}; 676 struct mmc_command cmd = {0}; 677 struct mmc_data data = {0}; 678 679 struct scatterlist sg; 680 681 cmd.opcode = MMC_APP_CMD; 682 cmd.arg = card->rca << 16; 683 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 684 685 err = mmc_wait_for_cmd(card->host, &cmd, 0); 686 if (err) 687 return (u32)-1; 688 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) 689 return (u32)-1; 690 691 memset(&cmd, 0, sizeof(struct mmc_command)); 692 693 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; 694 cmd.arg = 0; 695 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 696 697 data.blksz = 4; 698 data.blocks = 1; 699 data.flags = MMC_DATA_READ; 700 data.sg = &sg; 701 data.sg_len = 1; 702 mmc_set_data_timeout(&data, card); 703 704 mrq.cmd = &cmd; 705 mrq.data = &data; 706 707 blocks = kmalloc(4, GFP_KERNEL); 708 if (!blocks) 709 return (u32)-1; 710 711 sg_init_one(&sg, blocks, 4); 712 713 mmc_wait_for_req(card->host, &mrq); 714 715 result = ntohl(*blocks); 716 kfree(blocks); 717 718 if (cmd.error || data.error) 719 result = (u32)-1; 720 721 return result; 722} 723 724static int get_card_status(struct mmc_card *card, u32 *status, int retries) 725{ 726 struct mmc_command cmd = {0}; 727 int err; 728 729 cmd.opcode = MMC_SEND_STATUS; 730 if (!mmc_host_is_spi(card->host)) 731 cmd.arg = card->rca << 16; 732 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 733 err = mmc_wait_for_cmd(card->host, &cmd, retries); 734 if (err == 0) 735 *status = cmd.resp[0]; 736 return err; 737} 738 739static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, 740 bool hw_busy_detect, struct request *req, int *gen_err) 741{ 742 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); 743 int err = 0; 744 u32 status; 745 746 do { 747 err = get_card_status(card, &status, 5); 748 if (err) { 749 pr_err("%s: error %d requesting status\n", 750 req->rq_disk->disk_name, err); 751 return err; 752 } 753 754 if (status & R1_ERROR) { 755 pr_err("%s: %s: error sending status cmd, status %#x\n", 756 req->rq_disk->disk_name, __func__, status); 757 *gen_err = 1; 758 } 759 760 /* We may rely on the host hw to handle busy detection.*/ 761 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && 762 hw_busy_detect) 763 break; 764 765 /* 766 * Timeout if the device never becomes ready for data and never 767 * leaves the program state. 768 */ 769 if (time_after(jiffies, timeout)) { 770 pr_err("%s: Card stuck in programming state! %s %s\n", 771 mmc_hostname(card->host), 772 req->rq_disk->disk_name, __func__); 773 return -ETIMEDOUT; 774 } 775 776 /* 777 * Some cards mishandle the status bits, 778 * so make sure to check both the busy 779 * indication and the card state. 780 */ 781 } while (!(status & R1_READY_FOR_DATA) || 782 (R1_CURRENT_STATE(status) == R1_STATE_PRG)); 783 784 return err; 785} 786 787static int send_stop(struct mmc_card *card, unsigned int timeout_ms, 788 struct request *req, int *gen_err, u32 *stop_status) 789{ 790 struct mmc_host *host = card->host; 791 struct mmc_command cmd = {0}; 792 int err; 793 bool use_r1b_resp = rq_data_dir(req) == WRITE; 794 795 /* 796 * Normally we use R1B responses for WRITE, but in cases where the host 797 * has specified a max_busy_timeout we need to validate it. A failure 798 * means we need to prevent the host from doing hw busy detection, which 799 * is done by converting to a R1 response instead. 800 */ 801 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) 802 use_r1b_resp = false; 803 804 cmd.opcode = MMC_STOP_TRANSMISSION; 805 if (use_r1b_resp) { 806 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 807 cmd.busy_timeout = timeout_ms; 808 } else { 809 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 810 } 811 812 err = mmc_wait_for_cmd(host, &cmd, 5); 813 if (err) 814 return err; 815 816 *stop_status = cmd.resp[0]; 817 818 /* No need to check card status in case of READ. */ 819 if (rq_data_dir(req) == READ) 820 return 0; 821 822 if (!mmc_host_is_spi(host) && 823 (*stop_status & R1_ERROR)) { 824 pr_err("%s: %s: general error sending stop command, resp %#x\n", 825 req->rq_disk->disk_name, __func__, *stop_status); 826 *gen_err = 1; 827 } 828 829 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err); 830} 831 832#define ERR_NOMEDIUM 3 833#define ERR_RETRY 2 834#define ERR_ABORT 1 835#define ERR_CONTINUE 0 836 837static int mmc_blk_cmd_error(struct request *req, const char *name, int error, 838 bool status_valid, u32 status) 839{ 840 switch (error) { 841 case -EILSEQ: 842 /* response crc error, retry the r/w cmd */ 843 pr_err("%s: %s sending %s command, card status %#x\n", 844 req->rq_disk->disk_name, "response CRC error", 845 name, status); 846 return ERR_RETRY; 847 848 case -ETIMEDOUT: 849 pr_err("%s: %s sending %s command, card status %#x\n", 850 req->rq_disk->disk_name, "timed out", name, status); 851 852 /* If the status cmd initially failed, retry the r/w cmd */ 853 if (!status_valid) 854 return ERR_RETRY; 855 856 /* 857 * If it was a r/w cmd crc error, or illegal command 858 * (eg, issued in wrong state) then retry - we should 859 * have corrected the state problem above. 860 */ 861 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) 862 return ERR_RETRY; 863 864 /* Otherwise abort the command */ 865 return ERR_ABORT; 866 867 default: 868 /* We don't understand the error code the driver gave us */ 869 pr_err("%s: unknown error %d sending read/write command, card status %#x\n", 870 req->rq_disk->disk_name, error, status); 871 return ERR_ABORT; 872 } 873} 874 875/* 876 * Initial r/w and stop cmd error recovery. 877 * We don't know whether the card received the r/w cmd or not, so try to 878 * restore things back to a sane state. Essentially, we do this as follows: 879 * - Obtain card status. If the first attempt to obtain card status fails, 880 * the status word will reflect the failed status cmd, not the failed 881 * r/w cmd. If we fail to obtain card status, it suggests we can no 882 * longer communicate with the card. 883 * - Check the card state. If the card received the cmd but there was a 884 * transient problem with the response, it might still be in a data transfer 885 * mode. Try to send it a stop command. If this fails, we can't recover. 886 * - If the r/w cmd failed due to a response CRC error, it was probably 887 * transient, so retry the cmd. 888 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. 889 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or 890 * illegal cmd, retry. 891 * Otherwise we don't understand what happened, so abort. 892 */ 893static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, 894 struct mmc_blk_request *brq, int *ecc_err, int *gen_err) 895{ 896 bool prev_cmd_status_valid = true; 897 u32 status, stop_status = 0; 898 int err, retry; 899 900 if (mmc_card_removed(card)) 901 return ERR_NOMEDIUM; 902 903 /* 904 * Try to get card status which indicates both the card state 905 * and why there was no response. If the first attempt fails, 906 * we can't be sure the returned status is for the r/w command. 907 */ 908 for (retry = 2; retry >= 0; retry--) { 909 err = get_card_status(card, &status, 0); 910 if (!err) 911 break; 912 913 prev_cmd_status_valid = false; 914 pr_err("%s: error %d sending status command, %sing\n", 915 req->rq_disk->disk_name, err, retry ? "retry" : "abort"); 916 } 917 918 /* We couldn't get a response from the card. Give up. */ 919 if (err) { 920 /* Check if the card is removed */ 921 if (mmc_detect_card_removed(card->host)) 922 return ERR_NOMEDIUM; 923 return ERR_ABORT; 924 } 925 926 /* Flag ECC errors */ 927 if ((status & R1_CARD_ECC_FAILED) || 928 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || 929 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) 930 *ecc_err = 1; 931 932 /* Flag General errors */ 933 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) 934 if ((status & R1_ERROR) || 935 (brq->stop.resp[0] & R1_ERROR)) { 936 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", 937 req->rq_disk->disk_name, __func__, 938 brq->stop.resp[0], status); 939 *gen_err = 1; 940 } 941 942 /* 943 * Check the current card state. If it is in some data transfer 944 * mode, tell it to stop (and hopefully transition back to TRAN.) 945 */ 946 if (R1_CURRENT_STATE(status) == R1_STATE_DATA || 947 R1_CURRENT_STATE(status) == R1_STATE_RCV) { 948 err = send_stop(card, 949 DIV_ROUND_UP(brq->data.timeout_ns, 1000000), 950 req, gen_err, &stop_status); 951 if (err) { 952 pr_err("%s: error %d sending stop command\n", 953 req->rq_disk->disk_name, err); 954 /* 955 * If the stop cmd also timed out, the card is probably 956 * not present, so abort. Other errors are bad news too. 957 */ 958 return ERR_ABORT; 959 } 960 961 if (stop_status & R1_CARD_ECC_FAILED) 962 *ecc_err = 1; 963 } 964 965 /* Check for set block count errors */ 966 if (brq->sbc.error) 967 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, 968 prev_cmd_status_valid, status); 969 970 /* Check for r/w command errors */ 971 if (brq->cmd.error) 972 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, 973 prev_cmd_status_valid, status); 974 975 /* Data errors */ 976 if (!brq->stop.error) 977 return ERR_CONTINUE; 978 979 /* Now for stop errors. These aren't fatal to the transfer. */ 980 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", 981 req->rq_disk->disk_name, brq->stop.error, 982 brq->cmd.resp[0], status); 983 984 /* 985 * Subsitute in our own stop status as this will give the error 986 * state which happened during the execution of the r/w command. 987 */ 988 if (stop_status) { 989 brq->stop.resp[0] = stop_status; 990 brq->stop.error = 0; 991 } 992 return ERR_CONTINUE; 993} 994 995static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, 996 int type) 997{ 998 int err; 999 1000 if (md->reset_done & type) 1001 return -EEXIST; 1002 1003 md->reset_done |= type; 1004 err = mmc_hw_reset(host); 1005 /* Ensure we switch back to the correct partition */ 1006 if (err != -EOPNOTSUPP) { 1007 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); 1008 int part_err; 1009 1010 main_md->part_curr = main_md->part_type; 1011 part_err = mmc_blk_part_switch(host->card, md); 1012 if (part_err) { 1013 /* 1014 * We have failed to get back into the correct 1015 * partition, so we need to abort the whole request. 1016 */ 1017 return -ENODEV; 1018 } 1019 } 1020 return err; 1021} 1022 1023static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) 1024{ 1025 md->reset_done &= ~type; 1026} 1027 1028static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 1029{ 1030 struct mmc_blk_data *md = mq->data; 1031 struct mmc_card *card = md->queue.card; 1032 unsigned int from, nr, arg; 1033 int err = 0, type = MMC_BLK_DISCARD; 1034 1035 if (!mmc_can_erase(card)) { 1036 err = -EOPNOTSUPP; 1037 goto out; 1038 } 1039 1040 from = blk_rq_pos(req); 1041 nr = blk_rq_sectors(req); 1042 1043 if (mmc_can_discard(card)) 1044 arg = MMC_DISCARD_ARG; 1045 else if (mmc_can_trim(card)) 1046 arg = MMC_TRIM_ARG; 1047 else 1048 arg = MMC_ERASE_ARG; 1049retry: 1050 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1051 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1052 INAND_CMD38_ARG_EXT_CSD, 1053 arg == MMC_TRIM_ARG ? 1054 INAND_CMD38_ARG_TRIM : 1055 INAND_CMD38_ARG_ERASE, 1056 0); 1057 if (err) 1058 goto out; 1059 } 1060 err = mmc_erase(card, from, nr, arg); 1061out: 1062 if (err == -EIO && !mmc_blk_reset(md, card->host, type)) 1063 goto retry; 1064 if (!err) 1065 mmc_blk_reset_success(md, type); 1066 blk_end_request(req, err, blk_rq_bytes(req)); 1067 1068 return err ? 0 : 1; 1069} 1070 1071static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, 1072 struct request *req) 1073{ 1074 struct mmc_blk_data *md = mq->data; 1075 struct mmc_card *card = md->queue.card; 1076 unsigned int from, nr, arg; 1077 int err = 0, type = MMC_BLK_SECDISCARD; 1078 1079 if (!(mmc_can_secure_erase_trim(card))) { 1080 err = -EOPNOTSUPP; 1081 goto out; 1082 } 1083 1084 from = blk_rq_pos(req); 1085 nr = blk_rq_sectors(req); 1086 1087 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) 1088 arg = MMC_SECURE_TRIM1_ARG; 1089 else 1090 arg = MMC_SECURE_ERASE_ARG; 1091 1092retry: 1093 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1094 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1095 INAND_CMD38_ARG_EXT_CSD, 1096 arg == MMC_SECURE_TRIM1_ARG ? 1097 INAND_CMD38_ARG_SECTRIM1 : 1098 INAND_CMD38_ARG_SECERASE, 1099 0); 1100 if (err) 1101 goto out_retry; 1102 } 1103 1104 err = mmc_erase(card, from, nr, arg); 1105 if (err == -EIO) 1106 goto out_retry; 1107 if (err) 1108 goto out; 1109 1110 if (arg == MMC_SECURE_TRIM1_ARG) { 1111 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1112 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1113 INAND_CMD38_ARG_EXT_CSD, 1114 INAND_CMD38_ARG_SECTRIM2, 1115 0); 1116 if (err) 1117 goto out_retry; 1118 } 1119 1120 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 1121 if (err == -EIO) 1122 goto out_retry; 1123 if (err) 1124 goto out; 1125 } 1126 1127out_retry: 1128 if (err && !mmc_blk_reset(md, card->host, type)) 1129 goto retry; 1130 if (!err) 1131 mmc_blk_reset_success(md, type); 1132out: 1133 blk_end_request(req, err, blk_rq_bytes(req)); 1134 1135 return err ? 0 : 1; 1136} 1137 1138static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) 1139{ 1140 struct mmc_blk_data *md = mq->data; 1141 struct mmc_card *card = md->queue.card; 1142 int ret = 0; 1143 1144 ret = mmc_flush_cache(card); 1145 if (ret) 1146 ret = -EIO; 1147 1148 blk_end_request_all(req, ret); 1149 1150 return ret ? 0 : 1; 1151} 1152 1153/* 1154 * Reformat current write as a reliable write, supporting 1155 * both legacy and the enhanced reliable write MMC cards. 1156 * In each transfer we'll handle only as much as a single 1157 * reliable write can handle, thus finish the request in 1158 * partial completions. 1159 */ 1160static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, 1161 struct mmc_card *card, 1162 struct request *req) 1163{ 1164 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { 1165 /* Legacy mode imposes restrictions on transfers. */ 1166 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) 1167 brq->data.blocks = 1; 1168 1169 if (brq->data.blocks > card->ext_csd.rel_sectors) 1170 brq->data.blocks = card->ext_csd.rel_sectors; 1171 else if (brq->data.blocks < card->ext_csd.rel_sectors) 1172 brq->data.blocks = 1; 1173 } 1174} 1175 1176#define CMD_ERRORS \ 1177 (R1_OUT_OF_RANGE | /* Command argument out of range */ \ 1178 R1_ADDRESS_ERROR | /* Misaligned address */ \ 1179 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ 1180 R1_WP_VIOLATION | /* Tried to write to protected block */ \ 1181 R1_CC_ERROR | /* Card controller error */ \ 1182 R1_ERROR) /* General/unknown error */ 1183 1184static int mmc_blk_err_check(struct mmc_card *card, 1185 struct mmc_async_req *areq) 1186{ 1187 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, 1188 mmc_active); 1189 struct mmc_blk_request *brq = &mq_mrq->brq; 1190 struct request *req = mq_mrq->req; 1191 int ecc_err = 0, gen_err = 0; 1192 1193 /* 1194 * sbc.error indicates a problem with the set block count 1195 * command. No data will have been transferred. 1196 * 1197 * cmd.error indicates a problem with the r/w command. No 1198 * data will have been transferred. 1199 * 1200 * stop.error indicates a problem with the stop command. Data 1201 * may have been transferred, or may still be transferring. 1202 */ 1203 if (brq->sbc.error || brq->cmd.error || brq->stop.error || 1204 brq->data.error) { 1205 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { 1206 case ERR_RETRY: 1207 return MMC_BLK_RETRY; 1208 case ERR_ABORT: 1209 return MMC_BLK_ABORT; 1210 case ERR_NOMEDIUM: 1211 return MMC_BLK_NOMEDIUM; 1212 case ERR_CONTINUE: 1213 break; 1214 } 1215 } 1216 1217 /* 1218 * Check for errors relating to the execution of the 1219 * initial command - such as address errors. No data 1220 * has been transferred. 1221 */ 1222 if (brq->cmd.resp[0] & CMD_ERRORS) { 1223 pr_err("%s: r/w command failed, status = %#x\n", 1224 req->rq_disk->disk_name, brq->cmd.resp[0]); 1225 return MMC_BLK_ABORT; 1226 } 1227 1228 /* 1229 * Everything else is either success, or a data error of some 1230 * kind. If it was a write, we may have transitioned to 1231 * program mode, which we have to wait for it to complete. 1232 */ 1233 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { 1234 int err; 1235 1236 /* Check stop command response */ 1237 if (brq->stop.resp[0] & R1_ERROR) { 1238 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", 1239 req->rq_disk->disk_name, __func__, 1240 brq->stop.resp[0]); 1241 gen_err = 1; 1242 } 1243 1244 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, 1245 &gen_err); 1246 if (err) 1247 return MMC_BLK_CMD_ERR; 1248 } 1249 1250 /* if general error occurs, retry the write operation. */ 1251 if (gen_err) { 1252 pr_warn("%s: retrying write for general error\n", 1253 req->rq_disk->disk_name); 1254 return MMC_BLK_RETRY; 1255 } 1256 1257 if (brq->data.error) { 1258 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", 1259 req->rq_disk->disk_name, brq->data.error, 1260 (unsigned)blk_rq_pos(req), 1261 (unsigned)blk_rq_sectors(req), 1262 brq->cmd.resp[0], brq->stop.resp[0]); 1263 1264 if (rq_data_dir(req) == READ) { 1265 if (ecc_err) 1266 return MMC_BLK_ECC_ERR; 1267 return MMC_BLK_DATA_ERR; 1268 } else { 1269 return MMC_BLK_CMD_ERR; 1270 } 1271 } 1272 1273 if (!brq->data.bytes_xfered) 1274 return MMC_BLK_RETRY; 1275 1276 if (mmc_packed_cmd(mq_mrq->cmd_type)) { 1277 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) 1278 return MMC_BLK_PARTIAL; 1279 else 1280 return MMC_BLK_SUCCESS; 1281 } 1282 1283 if (blk_rq_bytes(req) != brq->data.bytes_xfered) 1284 return MMC_BLK_PARTIAL; 1285 1286 return MMC_BLK_SUCCESS; 1287} 1288 1289static int mmc_blk_packed_err_check(struct mmc_card *card, 1290 struct mmc_async_req *areq) 1291{ 1292 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, 1293 mmc_active); 1294 struct request *req = mq_rq->req; 1295 struct mmc_packed *packed = mq_rq->packed; 1296 int err, check, status; 1297 u8 *ext_csd; 1298 1299 BUG_ON(!packed); 1300 1301 packed->retries--; 1302 check = mmc_blk_err_check(card, areq); 1303 err = get_card_status(card, &status, 0); 1304 if (err) { 1305 pr_err("%s: error %d sending status command\n", 1306 req->rq_disk->disk_name, err); 1307 return MMC_BLK_ABORT; 1308 } 1309 1310 if (status & R1_EXCEPTION_EVENT) { 1311 ext_csd = kzalloc(512, GFP_KERNEL); 1312 if (!ext_csd) { 1313 pr_err("%s: unable to allocate buffer for ext_csd\n", 1314 req->rq_disk->disk_name); 1315 return -ENOMEM; 1316 } 1317 1318 err = mmc_send_ext_csd(card, ext_csd); 1319 if (err) { 1320 pr_err("%s: error %d sending ext_csd\n", 1321 req->rq_disk->disk_name, err); 1322 check = MMC_BLK_ABORT; 1323 goto free; 1324 } 1325 1326 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & 1327 EXT_CSD_PACKED_FAILURE) && 1328 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & 1329 EXT_CSD_PACKED_GENERIC_ERROR)) { 1330 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & 1331 EXT_CSD_PACKED_INDEXED_ERROR) { 1332 packed->idx_failure = 1333 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1; 1334 check = MMC_BLK_PARTIAL; 1335 } 1336 pr_err("%s: packed cmd failed, nr %u, sectors %u, " 1337 "failure index: %d\n", 1338 req->rq_disk->disk_name, packed->nr_entries, 1339 packed->blocks, packed->idx_failure); 1340 } 1341free: 1342 kfree(ext_csd); 1343 } 1344 1345 return check; 1346} 1347 1348static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1349 struct mmc_card *card, 1350 int disable_multi, 1351 struct mmc_queue *mq) 1352{ 1353 u32 readcmd, writecmd; 1354 struct mmc_blk_request *brq = &mqrq->brq; 1355 struct request *req = mqrq->req; 1356 struct mmc_blk_data *md = mq->data; 1357 bool do_data_tag; 1358 1359 /* 1360 * Reliable writes are used to implement Forced Unit Access and 1361 * REQ_META accesses, and are supported only on MMCs. 1362 * 1363 * XXX: this really needs a good explanation of why REQ_META 1364 * is treated special. 1365 */ 1366 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 1367 (req->cmd_flags & REQ_META)) && 1368 (rq_data_dir(req) == WRITE) && 1369 (md->flags & MMC_BLK_REL_WR); 1370 1371 memset(brq, 0, sizeof(struct mmc_blk_request)); 1372 brq->mrq.cmd = &brq->cmd; 1373 brq->mrq.data = &brq->data; 1374 1375 brq->cmd.arg = blk_rq_pos(req); 1376 if (!mmc_card_blockaddr(card)) 1377 brq->cmd.arg <<= 9; 1378 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 1379 brq->data.blksz = 512; 1380 brq->stop.opcode = MMC_STOP_TRANSMISSION; 1381 brq->stop.arg = 0; 1382 brq->data.blocks = blk_rq_sectors(req); 1383 1384 /* 1385 * The block layer doesn't support all sector count 1386 * restrictions, so we need to be prepared for too big 1387 * requests. 1388 */ 1389 if (brq->data.blocks > card->host->max_blk_count) 1390 brq->data.blocks = card->host->max_blk_count; 1391 1392 if (brq->data.blocks > 1) { 1393 /* 1394 * After a read error, we redo the request one sector 1395 * at a time in order to accurately determine which 1396 * sectors can be read successfully. 1397 */ 1398 if (disable_multi) 1399 brq->data.blocks = 1; 1400 1401 /* 1402 * Some controllers have HW issues while operating 1403 * in multiple I/O mode 1404 */ 1405 if (card->host->ops->multi_io_quirk) 1406 brq->data.blocks = card->host->ops->multi_io_quirk(card, 1407 (rq_data_dir(req) == READ) ? 1408 MMC_DATA_READ : MMC_DATA_WRITE, 1409 brq->data.blocks); 1410 } 1411 1412 if (brq->data.blocks > 1 || do_rel_wr) { 1413 /* SPI multiblock writes terminate using a special 1414 * token, not a STOP_TRANSMISSION request. 1415 */ 1416 if (!mmc_host_is_spi(card->host) || 1417 rq_data_dir(req) == READ) 1418 brq->mrq.stop = &brq->stop; 1419 readcmd = MMC_READ_MULTIPLE_BLOCK; 1420 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 1421 } else { 1422 brq->mrq.stop = NULL; 1423 readcmd = MMC_READ_SINGLE_BLOCK; 1424 writecmd = MMC_WRITE_BLOCK; 1425 } 1426 if (rq_data_dir(req) == READ) { 1427 brq->cmd.opcode = readcmd; 1428 brq->data.flags |= MMC_DATA_READ; 1429 if (brq->mrq.stop) 1430 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | 1431 MMC_CMD_AC; 1432 } else { 1433 brq->cmd.opcode = writecmd; 1434 brq->data.flags |= MMC_DATA_WRITE; 1435 if (brq->mrq.stop) 1436 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | 1437 MMC_CMD_AC; 1438 } 1439 1440 if (do_rel_wr) 1441 mmc_apply_rel_rw(brq, card, req); 1442 1443 /* 1444 * Data tag is used only during writing meta data to speed 1445 * up write and any subsequent read of this meta data 1446 */ 1447 do_data_tag = (card->ext_csd.data_tag_unit_size) && 1448 (req->cmd_flags & REQ_META) && 1449 (rq_data_dir(req) == WRITE) && 1450 ((brq->data.blocks * brq->data.blksz) >= 1451 card->ext_csd.data_tag_unit_size); 1452 1453 /* 1454 * Pre-defined multi-block transfers are preferable to 1455 * open ended-ones (and necessary for reliable writes). 1456 * However, it is not sufficient to just send CMD23, 1457 * and avoid the final CMD12, as on an error condition 1458 * CMD12 (stop) needs to be sent anyway. This, coupled 1459 * with Auto-CMD23 enhancements provided by some 1460 * hosts, means that the complexity of dealing 1461 * with this is best left to the host. If CMD23 is 1462 * supported by card and host, we'll fill sbc in and let 1463 * the host deal with handling it correctly. This means 1464 * that for hosts that don't expose MMC_CAP_CMD23, no 1465 * change of behavior will be observed. 1466 * 1467 * N.B: Some MMC cards experience perf degradation. 1468 * We'll avoid using CMD23-bounded multiblock writes for 1469 * these, while retaining features like reliable writes. 1470 */ 1471 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && 1472 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || 1473 do_data_tag)) { 1474 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; 1475 brq->sbc.arg = brq->data.blocks | 1476 (do_rel_wr ? (1 << 31) : 0) | 1477 (do_data_tag ? (1 << 29) : 0); 1478 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 1479 brq->mrq.sbc = &brq->sbc; 1480 } 1481 1482 mmc_set_data_timeout(&brq->data, card); 1483 1484 brq->data.sg = mqrq->sg; 1485 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); 1486 1487 /* 1488 * Adjust the sg list so it is the same size as the 1489 * request. 1490 */ 1491 if (brq->data.blocks != blk_rq_sectors(req)) { 1492 int i, data_size = brq->data.blocks << 9; 1493 struct scatterlist *sg; 1494 1495 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { 1496 data_size -= sg->length; 1497 if (data_size <= 0) { 1498 sg->length += data_size; 1499 i++; 1500 break; 1501 } 1502 } 1503 brq->data.sg_len = i; 1504 } 1505 1506 mqrq->mmc_active.mrq = &brq->mrq; 1507 mqrq->mmc_active.err_check = mmc_blk_err_check; 1508 1509 mmc_queue_bounce_pre(mqrq); 1510} 1511 1512static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q, 1513 struct mmc_card *card) 1514{ 1515 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512; 1516 unsigned int max_seg_sz = queue_max_segment_size(q); 1517 unsigned int len, nr_segs = 0; 1518 1519 do { 1520 len = min(hdr_sz, max_seg_sz); 1521 hdr_sz -= len; 1522 nr_segs++; 1523 } while (hdr_sz); 1524 1525 return nr_segs; 1526} 1527 1528static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) 1529{ 1530 struct request_queue *q = mq->queue; 1531 struct mmc_card *card = mq->card; 1532 struct request *cur = req, *next = NULL; 1533 struct mmc_blk_data *md = mq->data; 1534 struct mmc_queue_req *mqrq = mq->mqrq_cur; 1535 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN; 1536 unsigned int req_sectors = 0, phys_segments = 0; 1537 unsigned int max_blk_count, max_phys_segs; 1538 bool put_back = true; 1539 u8 max_packed_rw = 0; 1540 u8 reqs = 0; 1541 1542 if (!(md->flags & MMC_BLK_PACKED_CMD)) 1543 goto no_packed; 1544 1545 if ((rq_data_dir(cur) == WRITE) && 1546 mmc_host_packed_wr(card->host)) 1547 max_packed_rw = card->ext_csd.max_packed_writes; 1548 1549 if (max_packed_rw == 0) 1550 goto no_packed; 1551 1552 if (mmc_req_rel_wr(cur) && 1553 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) 1554 goto no_packed; 1555 1556 if (mmc_large_sector(card) && 1557 !IS_ALIGNED(blk_rq_sectors(cur), 8)) 1558 goto no_packed; 1559 1560 mmc_blk_clear_packed(mqrq); 1561 1562 max_blk_count = min(card->host->max_blk_count, 1563 card->host->max_req_size >> 9); 1564 if (unlikely(max_blk_count > 0xffff)) 1565 max_blk_count = 0xffff; 1566 1567 max_phys_segs = queue_max_segments(q); 1568 req_sectors += blk_rq_sectors(cur); 1569 phys_segments += cur->nr_phys_segments; 1570 1571 if (rq_data_dir(cur) == WRITE) { 1572 req_sectors += mmc_large_sector(card) ? 8 : 1; 1573 phys_segments += mmc_calc_packed_hdr_segs(q, card); 1574 } 1575 1576 do { 1577 if (reqs >= max_packed_rw - 1) { 1578 put_back = false; 1579 break; 1580 } 1581 1582 spin_lock_irq(q->queue_lock); 1583 next = blk_fetch_request(q); 1584 spin_unlock_irq(q->queue_lock); 1585 if (!next) { 1586 put_back = false; 1587 break; 1588 } 1589 1590 if (mmc_large_sector(card) && 1591 !IS_ALIGNED(blk_rq_sectors(next), 8)) 1592 break; 1593 1594 if (next->cmd_flags & REQ_DISCARD || 1595 next->cmd_flags & REQ_FLUSH) 1596 break; 1597 1598 if (rq_data_dir(cur) != rq_data_dir(next)) 1599 break; 1600 1601 if (mmc_req_rel_wr(next) && 1602 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) 1603 break; 1604 1605 req_sectors += blk_rq_sectors(next); 1606 if (req_sectors > max_blk_count) 1607 break; 1608 1609 phys_segments += next->nr_phys_segments; 1610 if (phys_segments > max_phys_segs) 1611 break; 1612 1613 list_add_tail(&next->queuelist, &mqrq->packed->list); 1614 cur = next; 1615 reqs++; 1616 } while (1); 1617 1618 if (put_back) { 1619 spin_lock_irq(q->queue_lock); 1620 blk_requeue_request(q, next); 1621 spin_unlock_irq(q->queue_lock); 1622 } 1623 1624 if (reqs > 0) { 1625 list_add(&req->queuelist, &mqrq->packed->list); 1626 mqrq->packed->nr_entries = ++reqs; 1627 mqrq->packed->retries = reqs; 1628 return reqs; 1629 } 1630 1631no_packed: 1632 mqrq->cmd_type = MMC_PACKED_NONE; 1633 return 0; 1634} 1635 1636static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, 1637 struct mmc_card *card, 1638 struct mmc_queue *mq) 1639{ 1640 struct mmc_blk_request *brq = &mqrq->brq; 1641 struct request *req = mqrq->req; 1642 struct request *prq; 1643 struct mmc_blk_data *md = mq->data; 1644 struct mmc_packed *packed = mqrq->packed; 1645 bool do_rel_wr, do_data_tag; 1646 u32 *packed_cmd_hdr; 1647 u8 hdr_blocks; 1648 u8 i = 1; 1649 1650 BUG_ON(!packed); 1651 1652 mqrq->cmd_type = MMC_PACKED_WRITE; 1653 packed->blocks = 0; 1654 packed->idx_failure = MMC_PACKED_NR_IDX; 1655 1656 packed_cmd_hdr = packed->cmd_hdr; 1657 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr)); 1658 packed_cmd_hdr[0] = (packed->nr_entries << 16) | 1659 (PACKED_CMD_WR << 8) | PACKED_CMD_VER; 1660 hdr_blocks = mmc_large_sector(card) ? 8 : 1; 1661 1662 /* 1663 * Argument for each entry of packed group 1664 */ 1665 list_for_each_entry(prq, &packed->list, queuelist) { 1666 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); 1667 do_data_tag = (card->ext_csd.data_tag_unit_size) && 1668 (prq->cmd_flags & REQ_META) && 1669 (rq_data_dir(prq) == WRITE) && 1670 ((brq->data.blocks * brq->data.blksz) >= 1671 card->ext_csd.data_tag_unit_size); 1672 /* Argument of CMD23 */ 1673 packed_cmd_hdr[(i * 2)] = 1674 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | 1675 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | 1676 blk_rq_sectors(prq); 1677 /* Argument of CMD18 or CMD25 */ 1678 packed_cmd_hdr[((i * 2)) + 1] = 1679 mmc_card_blockaddr(card) ? 1680 blk_rq_pos(prq) : blk_rq_pos(prq) << 9; 1681 packed->blocks += blk_rq_sectors(prq); 1682 i++; 1683 } 1684 1685 memset(brq, 0, sizeof(struct mmc_blk_request)); 1686 brq->mrq.cmd = &brq->cmd; 1687 brq->mrq.data = &brq->data; 1688 brq->mrq.sbc = &brq->sbc; 1689 brq->mrq.stop = &brq->stop; 1690 1691 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; 1692 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks); 1693 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 1694 1695 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; 1696 brq->cmd.arg = blk_rq_pos(req); 1697 if (!mmc_card_blockaddr(card)) 1698 brq->cmd.arg <<= 9; 1699 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 1700 1701 brq->data.blksz = 512; 1702 brq->data.blocks = packed->blocks + hdr_blocks; 1703 brq->data.flags |= MMC_DATA_WRITE; 1704 1705 brq->stop.opcode = MMC_STOP_TRANSMISSION; 1706 brq->stop.arg = 0; 1707 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1708 1709 mmc_set_data_timeout(&brq->data, card); 1710 1711 brq->data.sg = mqrq->sg; 1712 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); 1713 1714 mqrq->mmc_active.mrq = &brq->mrq; 1715 mqrq->mmc_active.err_check = mmc_blk_packed_err_check; 1716 1717 mmc_queue_bounce_pre(mqrq); 1718} 1719 1720static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, 1721 struct mmc_blk_request *brq, struct request *req, 1722 int ret) 1723{ 1724 struct mmc_queue_req *mq_rq; 1725 mq_rq = container_of(brq, struct mmc_queue_req, brq); 1726 1727 /* 1728 * If this is an SD card and we're writing, we can first 1729 * mark the known good sectors as ok. 1730 * 1731 * If the card is not SD, we can still ok written sectors 1732 * as reported by the controller (which might be less than 1733 * the real number of written sectors, but never more). 1734 */ 1735 if (mmc_card_sd(card)) { 1736 u32 blocks; 1737 1738 blocks = mmc_sd_num_wr_blocks(card); 1739 if (blocks != (u32)-1) { 1740 ret = blk_end_request(req, 0, blocks << 9); 1741 } 1742 } else { 1743 if (!mmc_packed_cmd(mq_rq->cmd_type)) 1744 ret = blk_end_request(req, 0, brq->data.bytes_xfered); 1745 } 1746 return ret; 1747} 1748 1749static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq) 1750{ 1751 struct request *prq; 1752 struct mmc_packed *packed = mq_rq->packed; 1753 int idx = packed->idx_failure, i = 0; 1754 int ret = 0; 1755 1756 BUG_ON(!packed); 1757 1758 while (!list_empty(&packed->list)) { 1759 prq = list_entry_rq(packed->list.next); 1760 if (idx == i) { 1761 /* retry from error index */ 1762 packed->nr_entries -= idx; 1763 mq_rq->req = prq; 1764 ret = 1; 1765 1766 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) { 1767 list_del_init(&prq->queuelist); 1768 mmc_blk_clear_packed(mq_rq); 1769 } 1770 return ret; 1771 } 1772 list_del_init(&prq->queuelist); 1773 blk_end_request(prq, 0, blk_rq_bytes(prq)); 1774 i++; 1775 } 1776 1777 mmc_blk_clear_packed(mq_rq); 1778 return ret; 1779} 1780 1781static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq) 1782{ 1783 struct request *prq; 1784 struct mmc_packed *packed = mq_rq->packed; 1785 1786 BUG_ON(!packed); 1787 1788 while (!list_empty(&packed->list)) { 1789 prq = list_entry_rq(packed->list.next); 1790 list_del_init(&prq->queuelist); 1791 blk_end_request(prq, -EIO, blk_rq_bytes(prq)); 1792 } 1793 1794 mmc_blk_clear_packed(mq_rq); 1795} 1796 1797static void mmc_blk_revert_packed_req(struct mmc_queue *mq, 1798 struct mmc_queue_req *mq_rq) 1799{ 1800 struct request *prq; 1801 struct request_queue *q = mq->queue; 1802 struct mmc_packed *packed = mq_rq->packed; 1803 1804 BUG_ON(!packed); 1805 1806 while (!list_empty(&packed->list)) { 1807 prq = list_entry_rq(packed->list.prev); 1808 if (prq->queuelist.prev != &packed->list) { 1809 list_del_init(&prq->queuelist); 1810 spin_lock_irq(q->queue_lock); 1811 blk_requeue_request(mq->queue, prq); 1812 spin_unlock_irq(q->queue_lock); 1813 } else { 1814 list_del_init(&prq->queuelist); 1815 } 1816 } 1817 1818 mmc_blk_clear_packed(mq_rq); 1819} 1820 1821static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) 1822{ 1823 struct mmc_blk_data *md = mq->data; 1824 struct mmc_card *card = md->queue.card; 1825 struct mmc_blk_request *brq = &mq->mqrq_cur->brq; 1826 int ret = 1, disable_multi = 0, retry = 0, type; 1827 enum mmc_blk_status status; 1828 struct mmc_queue_req *mq_rq; 1829 struct request *req = rqc; 1830 struct mmc_async_req *areq; 1831 const u8 packed_nr = 2; 1832 u8 reqs = 0; 1833 1834 if (!rqc && !mq->mqrq_prev->req) 1835 return 0; 1836 1837 if (rqc) 1838 reqs = mmc_blk_prep_packed_list(mq, rqc); 1839 1840 do { 1841 if (rqc) { 1842 /* 1843 * When 4KB native sector is enabled, only 8 blocks 1844 * multiple read or write is allowed 1845 */ 1846 if ((brq->data.blocks & 0x07) && 1847 (card->ext_csd.data_sector_size == 4096)) { 1848 pr_err("%s: Transfer size is not 4KB sector size aligned\n", 1849 req->rq_disk->disk_name); 1850 mq_rq = mq->mqrq_cur; 1851 goto cmd_abort; 1852 } 1853 1854 if (reqs >= packed_nr) 1855 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, 1856 card, mq); 1857 else 1858 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1859 areq = &mq->mqrq_cur->mmc_active; 1860 } else 1861 areq = NULL; 1862 areq = mmc_start_req(card->host, areq, (int *) &status); 1863 if (!areq) { 1864 if (status == MMC_BLK_NEW_REQUEST) 1865 mq->flags |= MMC_QUEUE_NEW_REQUEST; 1866 return 0; 1867 } 1868 1869 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); 1870 brq = &mq_rq->brq; 1871 req = mq_rq->req; 1872 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1873 mmc_queue_bounce_post(mq_rq); 1874 1875 switch (status) { 1876 case MMC_BLK_SUCCESS: 1877 case MMC_BLK_PARTIAL: 1878 /* 1879 * A block was successfully transferred. 1880 */ 1881 mmc_blk_reset_success(md, type); 1882 1883 if (mmc_packed_cmd(mq_rq->cmd_type)) { 1884 ret = mmc_blk_end_packed_req(mq_rq); 1885 break; 1886 } else { 1887 ret = blk_end_request(req, 0, 1888 brq->data.bytes_xfered); 1889 } 1890 1891 /* 1892 * If the blk_end_request function returns non-zero even 1893 * though all data has been transferred and no errors 1894 * were returned by the host controller, it's a bug. 1895 */ 1896 if (status == MMC_BLK_SUCCESS && ret) { 1897 pr_err("%s BUG rq_tot %d d_xfer %d\n", 1898 __func__, blk_rq_bytes(req), 1899 brq->data.bytes_xfered); 1900 rqc = NULL; 1901 goto cmd_abort; 1902 } 1903 break; 1904 case MMC_BLK_CMD_ERR: 1905 ret = mmc_blk_cmd_err(md, card, brq, req, ret); 1906 if (!mmc_blk_reset(md, card->host, type)) 1907 break; 1908 goto cmd_abort; 1909 case MMC_BLK_RETRY: 1910 if (retry++ < 5) 1911 break; 1912 /* Fall through */ 1913 case MMC_BLK_ABORT: 1914 if (!mmc_blk_reset(md, card->host, type)) 1915 break; 1916 goto cmd_abort; 1917 case MMC_BLK_DATA_ERR: { 1918 int err; 1919 1920 err = mmc_blk_reset(md, card->host, type); 1921 if (!err) 1922 break; 1923 if (err == -ENODEV || 1924 mmc_packed_cmd(mq_rq->cmd_type)) 1925 goto cmd_abort; 1926 /* Fall through */ 1927 } 1928 case MMC_BLK_ECC_ERR: 1929 if (brq->data.blocks > 1) { 1930 /* Redo read one sector at a time */ 1931 pr_warning("%s: retrying using single block read\n", 1932 req->rq_disk->disk_name); 1933 disable_multi = 1; 1934 break; 1935 } 1936 /* 1937 * After an error, we redo I/O one sector at a 1938 * time, so we only reach here after trying to 1939 * read a single sector. 1940 */ 1941 ret = blk_end_request(req, -EIO, 1942 brq->data.blksz); 1943 if (!ret) 1944 goto start_new_req; 1945 break; 1946 case MMC_BLK_NOMEDIUM: 1947 goto cmd_abort; 1948 default: 1949 pr_err("%s: Unhandled return value (%d)", 1950 req->rq_disk->disk_name, status); 1951 goto cmd_abort; 1952 } 1953 1954 if (ret) { 1955 if (mmc_packed_cmd(mq_rq->cmd_type)) { 1956 if (!mq_rq->packed->retries) 1957 goto cmd_abort; 1958 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq); 1959 mmc_start_req(card->host, 1960 &mq_rq->mmc_active, NULL); 1961 } else { 1962 1963 /* 1964 * In case of a incomplete request 1965 * prepare it again and resend. 1966 */ 1967 mmc_blk_rw_rq_prep(mq_rq, card, 1968 disable_multi, mq); 1969 mmc_start_req(card->host, 1970 &mq_rq->mmc_active, NULL); 1971 } 1972 } 1973 } while (ret); 1974 1975 return 1; 1976 1977 cmd_abort: 1978 if (mmc_packed_cmd(mq_rq->cmd_type)) { 1979 mmc_blk_abort_packed_req(mq_rq); 1980 } else { 1981 if (mmc_card_removed(card)) 1982 req->cmd_flags |= REQ_QUIET; 1983 while (ret) 1984 ret = blk_end_request(req, -EIO, 1985 blk_rq_cur_bytes(req)); 1986 } 1987 1988 start_new_req: 1989 if (rqc) { 1990 if (mmc_card_removed(card)) { 1991 rqc->cmd_flags |= REQ_QUIET; 1992 blk_end_request_all(rqc, -EIO); 1993 } else { 1994 /* 1995 * If current request is packed, it needs to put back. 1996 */ 1997 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type)) 1998 mmc_blk_revert_packed_req(mq, mq->mqrq_cur); 1999 2000 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 2001 mmc_start_req(card->host, 2002 &mq->mqrq_cur->mmc_active, NULL); 2003 } 2004 } 2005 2006 return 0; 2007} 2008 2009static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 2010{ 2011 int ret; 2012 struct mmc_blk_data *md = mq->data; 2013 struct mmc_card *card = md->queue.card; 2014 struct mmc_host *host = card->host; 2015 unsigned long flags; 2016 unsigned int cmd_flags = req ? req->cmd_flags : 0; 2017 2018 if (req && !mq->mqrq_prev->req) 2019 /* claim host only for the first request */ 2020 mmc_get_card(card); 2021 2022 ret = mmc_blk_part_switch(card, md); 2023 if (ret) { 2024 if (req) { 2025 blk_end_request_all(req, -EIO); 2026 } 2027 ret = 0; 2028 goto out; 2029 } 2030 2031 mq->flags &= ~MMC_QUEUE_NEW_REQUEST; 2032 if (cmd_flags & REQ_DISCARD) { 2033 /* complete ongoing async transfer before issuing discard */ 2034 if (card->host->areq) 2035 mmc_blk_issue_rw_rq(mq, NULL); 2036 if (req->cmd_flags & REQ_SECURE) 2037 ret = mmc_blk_issue_secdiscard_rq(mq, req); 2038 else 2039 ret = mmc_blk_issue_discard_rq(mq, req); 2040 } else if (cmd_flags & REQ_FLUSH) { 2041 /* complete ongoing async transfer before issuing flush */ 2042 if (card->host->areq) 2043 mmc_blk_issue_rw_rq(mq, NULL); 2044 ret = mmc_blk_issue_flush(mq, req); 2045 } else { 2046 if (!req && host->areq) { 2047 spin_lock_irqsave(&host->context_info.lock, flags); 2048 host->context_info.is_waiting_last_req = true; 2049 spin_unlock_irqrestore(&host->context_info.lock, flags); 2050 } 2051 ret = mmc_blk_issue_rw_rq(mq, req); 2052 } 2053 2054out: 2055 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || 2056 (cmd_flags & MMC_REQ_SPECIAL_MASK)) 2057 /* 2058 * Release host when there are no more requests 2059 * and after special request(discard, flush) is done. 2060 * In case sepecial request, there is no reentry to 2061 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'. 2062 */ 2063 mmc_put_card(card); 2064 return ret; 2065} 2066 2067static inline int mmc_blk_readonly(struct mmc_card *card) 2068{ 2069 return mmc_card_readonly(card) || 2070 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 2071} 2072 2073static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, 2074 struct device *parent, 2075 sector_t size, 2076 bool default_ro, 2077 const char *subname, 2078 int area_type) 2079{ 2080 struct mmc_blk_data *md; 2081 int devidx, ret; 2082 2083 devidx = find_first_zero_bit(dev_use, max_devices); 2084 if (devidx >= max_devices) 2085 return ERR_PTR(-ENOSPC); 2086 __set_bit(devidx, dev_use); 2087 2088 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); 2089 if (!md) { 2090 ret = -ENOMEM; 2091 goto out; 2092 } 2093 2094 /* 2095 * !subname implies we are creating main mmc_blk_data that will be 2096 * associated with mmc_card with mmc_set_drvdata. Due to device 2097 * partitions, devidx will not coincide with a per-physical card 2098 * index anymore so we keep track of a name index. 2099 */ 2100 if (!subname) { 2101 md->name_idx = find_first_zero_bit(name_use, max_devices); 2102 __set_bit(md->name_idx, name_use); 2103 } else 2104 md->name_idx = ((struct mmc_blk_data *) 2105 dev_to_disk(parent)->private_data)->name_idx; 2106 2107 md->area_type = area_type; 2108 2109 /* 2110 * Set the read-only status based on the supported commands 2111 * and the write protect switch. 2112 */ 2113 md->read_only = mmc_blk_readonly(card); 2114 2115 md->disk = alloc_disk(perdev_minors); 2116 if (md->disk == NULL) { 2117 ret = -ENOMEM; 2118 goto err_kfree; 2119 } 2120 2121 spin_lock_init(&md->lock); 2122 INIT_LIST_HEAD(&md->part); 2123 md->usage = 1; 2124 2125 ret = mmc_init_queue(&md->queue, card, &md->lock, subname); 2126 if (ret) 2127 goto err_putdisk; 2128 2129 md->queue.issue_fn = mmc_blk_issue_rq; 2130 md->queue.data = md; 2131 2132 md->disk->major = MMC_BLOCK_MAJOR; 2133 md->disk->first_minor = devidx * perdev_minors; 2134 md->disk->fops = &mmc_bdops; 2135 md->disk->private_data = md; 2136 md->disk->queue = md->queue.queue; 2137 md->disk->driverfs_dev = parent; 2138 set_disk_ro(md->disk, md->read_only || default_ro); 2139 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) 2140 md->disk->flags |= GENHD_FL_NO_PART_SCAN; 2141 2142 /* 2143 * As discussed on lkml, GENHD_FL_REMOVABLE should: 2144 * 2145 * - be set for removable media with permanent block devices 2146 * - be unset for removable block devices with permanent media 2147 * 2148 * Since MMC block devices clearly fall under the second 2149 * case, we do not set GENHD_FL_REMOVABLE. Userspace 2150 * should use the block device creation/destruction hotplug 2151 * messages to tell when the card is present. 2152 */ 2153 2154 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 2155 "mmcblk%d%s", md->name_idx, subname ? subname : ""); 2156 2157 if (mmc_card_mmc(card)) 2158 blk_queue_logical_block_size(md->queue.queue, 2159 card->ext_csd.data_sector_size); 2160 else 2161 blk_queue_logical_block_size(md->queue.queue, 512); 2162 2163 set_capacity(md->disk, size); 2164 2165 if (mmc_host_cmd23(card->host)) { 2166 if (mmc_card_mmc(card) || 2167 (mmc_card_sd(card) && 2168 card->scr.cmds & SD_SCR_CMD23_SUPPORT)) 2169 md->flags |= MMC_BLK_CMD23; 2170 } 2171 2172 if (mmc_card_mmc(card) && 2173 md->flags & MMC_BLK_CMD23 && 2174 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || 2175 card->ext_csd.rel_sectors)) { 2176 md->flags |= MMC_BLK_REL_WR; 2177 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); 2178 } 2179 2180 if (mmc_card_mmc(card) && 2181 (area_type == MMC_BLK_DATA_AREA_MAIN) && 2182 (md->flags & MMC_BLK_CMD23) && 2183 card->ext_csd.packed_event_en) { 2184 if (!mmc_packed_init(&md->queue, card)) 2185 md->flags |= MMC_BLK_PACKED_CMD; 2186 } 2187 2188 return md; 2189 2190 err_putdisk: 2191 put_disk(md->disk); 2192 err_kfree: 2193 kfree(md); 2194 out: 2195 return ERR_PTR(ret); 2196} 2197 2198static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 2199{ 2200 sector_t size; 2201 struct mmc_blk_data *md; 2202 2203 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 2204 /* 2205 * The EXT_CSD sector count is in number or 512 byte 2206 * sectors. 2207 */ 2208 size = card->ext_csd.sectors; 2209 } else { 2210 /* 2211 * The CSD capacity field is in units of read_blkbits. 2212 * set_capacity takes units of 512 bytes. 2213 */ 2214 size = card->csd.capacity << (card->csd.read_blkbits - 9); 2215 } 2216 2217 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL, 2218 MMC_BLK_DATA_AREA_MAIN); 2219 return md; 2220} 2221 2222static int mmc_blk_alloc_part(struct mmc_card *card, 2223 struct mmc_blk_data *md, 2224 unsigned int part_type, 2225 sector_t size, 2226 bool default_ro, 2227 const char *subname, 2228 int area_type) 2229{ 2230 char cap_str[10]; 2231 struct mmc_blk_data *part_md; 2232 2233 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, 2234 subname, area_type); 2235 if (IS_ERR(part_md)) 2236 return PTR_ERR(part_md); 2237 part_md->part_type = part_type; 2238 list_add(&part_md->part, &md->part); 2239 2240 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, 2241 cap_str, sizeof(cap_str)); 2242 pr_info("%s: %s %s partition %u %s\n", 2243 part_md->disk->disk_name, mmc_card_id(card), 2244 mmc_card_name(card), part_md->part_type, cap_str); 2245 return 0; 2246} 2247 2248/* MMC Physical partitions consist of two boot partitions and 2249 * up to four general purpose partitions. 2250 * For each partition enabled in EXT_CSD a block device will be allocatedi 2251 * to provide access to the partition. 2252 */ 2253 2254static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) 2255{ 2256 int idx, ret = 0; 2257 2258 if (!mmc_card_mmc(card)) 2259 return 0; 2260 2261 for (idx = 0; idx < card->nr_parts; idx++) { 2262 if (card->part[idx].size) { 2263 ret = mmc_blk_alloc_part(card, md, 2264 card->part[idx].part_cfg, 2265 card->part[idx].size >> 9, 2266 card->part[idx].force_ro, 2267 card->part[idx].name, 2268 card->part[idx].area_type); 2269 if (ret) 2270 return ret; 2271 } 2272 } 2273 2274 return ret; 2275} 2276 2277static void mmc_blk_remove_req(struct mmc_blk_data *md) 2278{ 2279 struct mmc_card *card; 2280 2281 if (md) { 2282 /* 2283 * Flush remaining requests and free queues. It 2284 * is freeing the queue that stops new requests 2285 * from being accepted. 2286 */ 2287 card = md->queue.card; 2288 mmc_cleanup_queue(&md->queue); 2289 if (md->flags & MMC_BLK_PACKED_CMD) 2290 mmc_packed_clean(&md->queue); 2291 if (md->disk->flags & GENHD_FL_UP) { 2292 device_remove_file(disk_to_dev(md->disk), &md->force_ro); 2293 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && 2294 card->ext_csd.boot_ro_lockable) 2295 device_remove_file(disk_to_dev(md->disk), 2296 &md->power_ro_lock); 2297 2298 del_gendisk(md->disk); 2299 } 2300 mmc_blk_put(md); 2301 } 2302} 2303 2304static void mmc_blk_remove_parts(struct mmc_card *card, 2305 struct mmc_blk_data *md) 2306{ 2307 struct list_head *pos, *q; 2308 struct mmc_blk_data *part_md; 2309 2310 __clear_bit(md->name_idx, name_use); 2311 list_for_each_safe(pos, q, &md->part) { 2312 part_md = list_entry(pos, struct mmc_blk_data, part); 2313 list_del(pos); 2314 mmc_blk_remove_req(part_md); 2315 } 2316} 2317 2318static int mmc_add_disk(struct mmc_blk_data *md) 2319{ 2320 int ret; 2321 struct mmc_card *card = md->queue.card; 2322 2323 add_disk(md->disk); 2324 md->force_ro.show = force_ro_show; 2325 md->force_ro.store = force_ro_store; 2326 sysfs_attr_init(&md->force_ro.attr); 2327 md->force_ro.attr.name = "force_ro"; 2328 md->force_ro.attr.mode = S_IRUGO | S_IWUSR; 2329 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); 2330 if (ret) 2331 goto force_ro_fail; 2332 2333 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && 2334 card->ext_csd.boot_ro_lockable) { 2335 umode_t mode; 2336 2337 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) 2338 mode = S_IRUGO; 2339 else 2340 mode = S_IRUGO | S_IWUSR; 2341 2342 md->power_ro_lock.show = power_ro_lock_show; 2343 md->power_ro_lock.store = power_ro_lock_store; 2344 sysfs_attr_init(&md->power_ro_lock.attr); 2345 md->power_ro_lock.attr.mode = mode; 2346 md->power_ro_lock.attr.name = 2347 "ro_lock_until_next_power_on"; 2348 ret = device_create_file(disk_to_dev(md->disk), 2349 &md->power_ro_lock); 2350 if (ret) 2351 goto power_ro_lock_fail; 2352 } 2353 return ret; 2354 2355power_ro_lock_fail: 2356 device_remove_file(disk_to_dev(md->disk), &md->force_ro); 2357force_ro_fail: 2358 del_gendisk(md->disk); 2359 2360 return ret; 2361} 2362 2363#define CID_MANFID_SANDISK 0x2 2364#define CID_MANFID_TOSHIBA 0x11 2365#define CID_MANFID_MICRON 0x13 2366#define CID_MANFID_SAMSUNG 0x15 2367 2368static const struct mmc_fixup blk_fixups[] = 2369{ 2370 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, 2371 MMC_QUIRK_INAND_CMD38), 2372 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk, 2373 MMC_QUIRK_INAND_CMD38), 2374 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk, 2375 MMC_QUIRK_INAND_CMD38), 2376 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk, 2377 MMC_QUIRK_INAND_CMD38), 2378 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk, 2379 MMC_QUIRK_INAND_CMD38), 2380 2381 /* 2382 * Some MMC cards experience performance degradation with CMD23 2383 * instead of CMD12-bounded multiblock transfers. For now we'll 2384 * black list what's bad... 2385 * - Certain Toshiba cards. 2386 * 2387 * N.B. This doesn't affect SD cards. 2388 */ 2389 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 2390 MMC_QUIRK_BLK_NO_CMD23), 2391 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 2392 MMC_QUIRK_BLK_NO_CMD23), 2393 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 2394 MMC_QUIRK_BLK_NO_CMD23), 2395 2396 /* 2397 * Some Micron MMC cards needs longer data read timeout than 2398 * indicated in CSD. 2399 */ 2400 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, 2401 MMC_QUIRK_LONG_READ_TIME), 2402 2403 /* 2404 * On these Samsung MoviNAND parts, performing secure erase or 2405 * secure trim can result in unrecoverable corruption due to a 2406 * firmware bug. 2407 */ 2408 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 2409 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 2410 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 2411 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 2412 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 2413 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 2414 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 2415 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 2416 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 2417 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 2418 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 2419 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 2420 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 2421 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 2422 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 2423 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 2424 2425 END_FIXUP 2426}; 2427 2428static int mmc_blk_probe(struct mmc_card *card) 2429{ 2430 struct mmc_blk_data *md, *part_md; 2431 char cap_str[10]; 2432 2433 /* 2434 * Check that the card supports the command class(es) we need. 2435 */ 2436 if (!(card->csd.cmdclass & CCC_BLOCK_READ)) 2437 return -ENODEV; 2438 2439 mmc_fixup_device(card, blk_fixups); 2440 2441 md = mmc_blk_alloc(card); 2442 if (IS_ERR(md)) 2443 return PTR_ERR(md); 2444 2445 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, 2446 cap_str, sizeof(cap_str)); 2447 pr_info("%s: %s %s %s %s\n", 2448 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 2449 cap_str, md->read_only ? "(ro)" : ""); 2450 2451 if (mmc_blk_alloc_parts(card, md)) 2452 goto out; 2453 2454 mmc_set_drvdata(card, md); 2455 2456 if (mmc_add_disk(md)) 2457 goto out; 2458 2459 list_for_each_entry(part_md, &md->part, part) { 2460 if (mmc_add_disk(part_md)) 2461 goto out; 2462 } 2463 2464 pm_runtime_set_autosuspend_delay(&card->dev, 3000); 2465 pm_runtime_use_autosuspend(&card->dev); 2466 2467 /* 2468 * Don't enable runtime PM for SD-combo cards here. Leave that 2469 * decision to be taken during the SDIO init sequence instead. 2470 */ 2471 if (card->type != MMC_TYPE_SD_COMBO) { 2472 pm_runtime_set_active(&card->dev); 2473 pm_runtime_enable(&card->dev); 2474 } 2475 2476 return 0; 2477 2478 out: 2479 mmc_blk_remove_parts(card, md); 2480 mmc_blk_remove_req(md); 2481 return 0; 2482} 2483 2484static void mmc_blk_remove(struct mmc_card *card) 2485{ 2486 struct mmc_blk_data *md = mmc_get_drvdata(card); 2487 2488 mmc_blk_remove_parts(card, md); 2489 pm_runtime_get_sync(&card->dev); 2490 mmc_claim_host(card->host); 2491 mmc_blk_part_switch(card, md); 2492 mmc_release_host(card->host); 2493 if (card->type != MMC_TYPE_SD_COMBO) 2494 pm_runtime_disable(&card->dev); 2495 pm_runtime_put_noidle(&card->dev); 2496 mmc_blk_remove_req(md); 2497 mmc_set_drvdata(card, NULL); 2498} 2499 2500static int _mmc_blk_suspend(struct mmc_card *card) 2501{ 2502 struct mmc_blk_data *part_md; 2503 struct mmc_blk_data *md = mmc_get_drvdata(card); 2504 2505 if (md) { 2506 mmc_queue_suspend(&md->queue); 2507 list_for_each_entry(part_md, &md->part, part) { 2508 mmc_queue_suspend(&part_md->queue); 2509 } 2510 } 2511 return 0; 2512} 2513 2514static void mmc_blk_shutdown(struct mmc_card *card) 2515{ 2516 _mmc_blk_suspend(card); 2517} 2518 2519#ifdef CONFIG_PM 2520static int mmc_blk_suspend(struct mmc_card *card) 2521{ 2522 return _mmc_blk_suspend(card); 2523} 2524 2525static int mmc_blk_resume(struct mmc_card *card) 2526{ 2527 struct mmc_blk_data *part_md; 2528 struct mmc_blk_data *md = mmc_get_drvdata(card); 2529 2530 if (md) { 2531 /* 2532 * Resume involves the card going into idle state, 2533 * so current partition is always the main one. 2534 */ 2535 md->part_curr = md->part_type; 2536 mmc_queue_resume(&md->queue); 2537 list_for_each_entry(part_md, &md->part, part) { 2538 mmc_queue_resume(&part_md->queue); 2539 } 2540 } 2541 return 0; 2542} 2543#else 2544#define mmc_blk_suspend NULL 2545#define mmc_blk_resume NULL 2546#endif 2547 2548static struct mmc_driver mmc_driver = { 2549 .drv = { 2550 .name = "mmcblk", 2551 }, 2552 .probe = mmc_blk_probe, 2553 .remove = mmc_blk_remove, 2554 .suspend = mmc_blk_suspend, 2555 .resume = mmc_blk_resume, 2556 .shutdown = mmc_blk_shutdown, 2557}; 2558 2559static int __init mmc_blk_init(void) 2560{ 2561 int res; 2562 2563 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) 2564 pr_info("mmcblk: using %d minors per device\n", perdev_minors); 2565 2566 max_devices = 256 / perdev_minors; 2567 2568 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 2569 if (res) 2570 goto out; 2571 2572 res = mmc_register_driver(&mmc_driver); 2573 if (res) 2574 goto out2; 2575 2576 return 0; 2577 out2: 2578 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 2579 out: 2580 return res; 2581} 2582 2583static void __exit mmc_blk_exit(void) 2584{ 2585 mmc_unregister_driver(&mmc_driver); 2586 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 2587} 2588 2589module_init(mmc_blk_init); 2590module_exit(mmc_blk_exit); 2591 2592MODULE_LICENSE("GPL"); 2593MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); 2594 2595