1/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author:  Andrew Christian
18 *          28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/slab.h>
27#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
31#include <linux/mutex.h>
32#include <linux/scatterlist.h>
33#include <linux/string_helpers.h>
34#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
37
38#include <linux/mmc/ioctl.h>
39#include <linux/mmc/card.h>
40#include <linux/mmc/host.h>
41#include <linux/mmc/mmc.h>
42#include <linux/mmc/sd.h>
43
44#include <asm/system.h>
45#include <asm/uaccess.h>
46
47#include "queue.h"
48
49MODULE_ALIAS("mmc:block");
50#ifdef MODULE_PARAM_PREFIX
51#undef MODULE_PARAM_PREFIX
52#endif
53#define MODULE_PARAM_PREFIX "mmcblk."
54
55#define INAND_CMD38_ARG_EXT_CSD  113
56#define INAND_CMD38_ARG_ERASE    0x00
57#define INAND_CMD38_ARG_TRIM     0x01
58#define INAND_CMD38_ARG_SECERASE 0x80
59#define INAND_CMD38_ARG_SECTRIM1 0x81
60#define INAND_CMD38_ARG_SECTRIM2 0x88
61
62static DEFINE_MUTEX(block_mutex);
63
64/*
65 * The defaults come from config options but can be overriden by module
66 * or bootarg options.
67 */
68static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
69
70/*
71 * We've only got one major, so number of mmcblk devices is
72 * limited to 256 / number of minors per device.
73 */
74static int max_devices;
75
76/* 256 minors, so at most 256 separate devices */
77static DECLARE_BITMAP(dev_use, 256);
78static DECLARE_BITMAP(name_use, 256);
79
80/*
81 * There is one mmc_blk_data per slot.
82 */
83struct mmc_blk_data {
84	spinlock_t	lock;
85	struct gendisk	*disk;
86	struct mmc_queue queue;
87	struct list_head part;
88
89	unsigned int	flags;
90#define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
91#define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
92
93	unsigned int	usage;
94	unsigned int	read_only;
95	unsigned int	part_type;
96	unsigned int	name_idx;
97	unsigned int	reset_done;
98#define MMC_BLK_READ		BIT(0)
99#define MMC_BLK_WRITE		BIT(1)
100#define MMC_BLK_DISCARD		BIT(2)
101#define MMC_BLK_SECDISCARD	BIT(3)
102
103	/*
104	 * Only set in main mmc_blk_data associated
105	 * with mmc_card with mmc_set_drvdata, and keeps
106	 * track of the current selected device partition.
107	 */
108	unsigned int	part_curr;
109	struct device_attribute force_ro;
110	struct device_attribute power_ro_lock;
111	int	area_type;
112};
113
114static DEFINE_MUTEX(open_lock);
115
116enum mmc_blk_status {
117	MMC_BLK_SUCCESS = 0,
118	MMC_BLK_PARTIAL,
119	MMC_BLK_CMD_ERR,
120	MMC_BLK_RETRY,
121	MMC_BLK_ABORT,
122	MMC_BLK_DATA_ERR,
123	MMC_BLK_ECC_ERR,
124	MMC_BLK_NOMEDIUM,
125};
126
127module_param(perdev_minors, int, 0444);
128MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
129
130static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
131{
132	struct mmc_blk_data *md;
133
134	mutex_lock(&open_lock);
135	md = disk->private_data;
136	if (md && md->usage == 0)
137		md = NULL;
138	if (md)
139		md->usage++;
140	mutex_unlock(&open_lock);
141
142	return md;
143}
144
145static inline int mmc_get_devidx(struct gendisk *disk)
146{
147	int devidx = disk->first_minor / perdev_minors;
148	return devidx;
149}
150
151static void mmc_blk_put(struct mmc_blk_data *md)
152{
153	mutex_lock(&open_lock);
154	md->usage--;
155	if (md->usage == 0) {
156		int devidx = mmc_get_devidx(md->disk);
157		blk_cleanup_queue(md->queue.queue);
158
159		__clear_bit(devidx, dev_use);
160
161		put_disk(md->disk);
162		kfree(md);
163	}
164	mutex_unlock(&open_lock);
165}
166
167static ssize_t power_ro_lock_show(struct device *dev,
168		struct device_attribute *attr, char *buf)
169{
170	int ret;
171	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
172	struct mmc_card *card = md->queue.card;
173	int locked = 0;
174
175	if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
176		locked = 2;
177	else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
178		locked = 1;
179
180	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
181
182	return ret;
183}
184
185static ssize_t power_ro_lock_store(struct device *dev,
186		struct device_attribute *attr, const char *buf, size_t count)
187{
188	int ret;
189	struct mmc_blk_data *md, *part_md;
190	struct mmc_card *card;
191	unsigned long set;
192
193	if (kstrtoul(buf, 0, &set))
194		return -EINVAL;
195
196	if (set != 1)
197		return count;
198
199	md = mmc_blk_get(dev_to_disk(dev));
200	card = md->queue.card;
201
202	mmc_claim_host(card->host);
203
204	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
205				card->ext_csd.boot_ro_lock |
206				EXT_CSD_BOOT_WP_B_PWR_WP_EN,
207				card->ext_csd.part_time);
208	if (ret)
209		pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
210	else
211		card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
212
213	mmc_release_host(card->host);
214
215	if (!ret) {
216		pr_info("%s: Locking boot partition ro until next power on\n",
217			md->disk->disk_name);
218		set_disk_ro(md->disk, 1);
219
220		list_for_each_entry(part_md, &md->part, part)
221			if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
222				pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
223				set_disk_ro(part_md->disk, 1);
224			}
225	}
226
227	mmc_blk_put(md);
228	return count;
229}
230
231static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
232			     char *buf)
233{
234	int ret;
235	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
236
237	ret = snprintf(buf, PAGE_SIZE, "%d",
238		       get_disk_ro(dev_to_disk(dev)) ^
239		       md->read_only);
240	mmc_blk_put(md);
241	return ret;
242}
243
244static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
245			      const char *buf, size_t count)
246{
247	int ret;
248	char *end;
249	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
250	unsigned long set = simple_strtoul(buf, &end, 0);
251	if (end == buf) {
252		ret = -EINVAL;
253		goto out;
254	}
255
256	set_disk_ro(dev_to_disk(dev), set || md->read_only);
257	ret = count;
258out:
259	mmc_blk_put(md);
260	return ret;
261}
262
263static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
264{
265	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
266	int ret = -ENXIO;
267
268	mutex_lock(&block_mutex);
269	if (md) {
270		if (md->usage == 2)
271			check_disk_change(bdev);
272		ret = 0;
273
274		if ((mode & FMODE_WRITE) && md->read_only) {
275			mmc_blk_put(md);
276			ret = -EROFS;
277		}
278	}
279	mutex_unlock(&block_mutex);
280
281	return ret;
282}
283
284static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
285{
286	struct mmc_blk_data *md = disk->private_data;
287
288	mutex_lock(&block_mutex);
289	mmc_blk_put(md);
290	mutex_unlock(&block_mutex);
291	return 0;
292}
293
294static int
295mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
296{
297	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
298	geo->heads = 4;
299	geo->sectors = 16;
300	return 0;
301}
302
303struct mmc_blk_ioc_data {
304	struct mmc_ioc_cmd ic;
305	unsigned char *buf;
306	u64 buf_bytes;
307};
308
309static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
310	struct mmc_ioc_cmd __user *user)
311{
312	struct mmc_blk_ioc_data *idata;
313	int err;
314
315	idata = kzalloc(sizeof(*idata), GFP_KERNEL);
316	if (!idata) {
317		err = -ENOMEM;
318		goto out;
319	}
320
321	if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
322		err = -EFAULT;
323		goto idata_err;
324	}
325
326	idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
327	if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
328		err = -EOVERFLOW;
329		goto idata_err;
330	}
331
332	if (!idata->buf_bytes)
333		return idata;
334
335	idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
336	if (!idata->buf) {
337		err = -ENOMEM;
338		goto idata_err;
339	}
340
341	if (copy_from_user(idata->buf, (void __user *)(unsigned long)
342					idata->ic.data_ptr, idata->buf_bytes)) {
343		err = -EFAULT;
344		goto copy_err;
345	}
346
347	return idata;
348
349copy_err:
350	kfree(idata->buf);
351idata_err:
352	kfree(idata);
353out:
354	return ERR_PTR(err);
355}
356
357static int mmc_blk_ioctl_cmd(struct block_device *bdev,
358	struct mmc_ioc_cmd __user *ic_ptr)
359{
360	struct mmc_blk_ioc_data *idata;
361	struct mmc_blk_data *md;
362	struct mmc_card *card;
363	struct mmc_command cmd = {0};
364	struct mmc_data data = {0};
365	struct mmc_request mrq = {NULL};
366	struct scatterlist sg;
367	int err;
368
369	/*
370	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
371	 * whole block device, not on a partition.  This prevents overspray
372	 * between sibling partitions.
373	 */
374	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
375		return -EPERM;
376
377	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
378	if (IS_ERR(idata))
379		return PTR_ERR(idata);
380
381	md = mmc_blk_get(bdev->bd_disk);
382	if (!md) {
383		err = -EINVAL;
384		goto cmd_done;
385	}
386
387	card = md->queue.card;
388	if (IS_ERR(card)) {
389		err = PTR_ERR(card);
390		goto cmd_done;
391	}
392
393	cmd.opcode = idata->ic.opcode;
394	cmd.arg = idata->ic.arg;
395	cmd.flags = idata->ic.flags;
396
397	if (idata->buf_bytes) {
398		data.sg = &sg;
399		data.sg_len = 1;
400		data.blksz = idata->ic.blksz;
401		data.blocks = idata->ic.blocks;
402
403		sg_init_one(data.sg, idata->buf, idata->buf_bytes);
404
405		if (idata->ic.write_flag)
406			data.flags = MMC_DATA_WRITE;
407		else
408			data.flags = MMC_DATA_READ;
409
410		/* data.flags must already be set before doing this. */
411		mmc_set_data_timeout(&data, card);
412
413		/* Allow overriding the timeout_ns for empirical tuning. */
414		if (idata->ic.data_timeout_ns)
415			data.timeout_ns = idata->ic.data_timeout_ns;
416
417		if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
418			/*
419			 * Pretend this is a data transfer and rely on the
420			 * host driver to compute timeout.  When all host
421			 * drivers support cmd.cmd_timeout for R1B, this
422			 * can be changed to:
423			 *
424			 *     mrq.data = NULL;
425			 *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
426			 */
427			data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
428		}
429
430		mrq.data = &data;
431	}
432
433	mrq.cmd = &cmd;
434
435	mmc_claim_host(card->host);
436
437	if (idata->ic.is_acmd) {
438		err = mmc_app_cmd(card->host, card);
439		if (err)
440			goto cmd_rel_host;
441	}
442
443	mmc_wait_for_req(card->host, &mrq);
444
445	if (cmd.error) {
446		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
447						__func__, cmd.error);
448		err = cmd.error;
449		goto cmd_rel_host;
450	}
451	if (data.error) {
452		dev_err(mmc_dev(card->host), "%s: data error %d\n",
453						__func__, data.error);
454		err = data.error;
455		goto cmd_rel_host;
456	}
457
458	/*
459	 * According to the SD specs, some commands require a delay after
460	 * issuing the command.
461	 */
462	if (idata->ic.postsleep_min_us)
463		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
464
465	if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
466		err = -EFAULT;
467		goto cmd_rel_host;
468	}
469
470	if (!idata->ic.write_flag) {
471		if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
472						idata->buf, idata->buf_bytes)) {
473			err = -EFAULT;
474			goto cmd_rel_host;
475		}
476	}
477
478cmd_rel_host:
479	mmc_release_host(card->host);
480
481cmd_done:
482	mmc_blk_put(md);
483	kfree(idata->buf);
484	kfree(idata);
485	return err;
486}
487
488static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
489	unsigned int cmd, unsigned long arg)
490{
491	int ret = -EINVAL;
492	if (cmd == MMC_IOC_CMD)
493		ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
494	return ret;
495}
496
497#ifdef CONFIG_COMPAT
498static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
499	unsigned int cmd, unsigned long arg)
500{
501	return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
502}
503#endif
504
505static const struct block_device_operations mmc_bdops = {
506	.open			= mmc_blk_open,
507	.release		= mmc_blk_release,
508	.getgeo			= mmc_blk_getgeo,
509	.owner			= THIS_MODULE,
510	.ioctl			= mmc_blk_ioctl,
511#ifdef CONFIG_COMPAT
512	.compat_ioctl		= mmc_blk_compat_ioctl,
513#endif
514};
515
516static inline int mmc_blk_part_switch(struct mmc_card *card,
517				      struct mmc_blk_data *md)
518{
519	int ret;
520	struct mmc_blk_data *main_md = mmc_get_drvdata(card);
521
522	if (main_md->part_curr == md->part_type)
523		return 0;
524
525	if (mmc_card_mmc(card)) {
526		u8 part_config = card->ext_csd.part_config;
527
528		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
529		part_config |= md->part_type;
530
531		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
532				 EXT_CSD_PART_CONFIG, part_config,
533				 card->ext_csd.part_time);
534		if (ret)
535			return ret;
536
537		card->ext_csd.part_config = part_config;
538	}
539
540	main_md->part_curr = md->part_type;
541	return 0;
542}
543
544static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
545{
546	int err;
547	u32 result;
548	__be32 *blocks;
549
550	struct mmc_request mrq = {NULL};
551	struct mmc_command cmd = {0};
552	struct mmc_data data = {0};
553	unsigned int timeout_us;
554
555	struct scatterlist sg;
556
557	cmd.opcode = MMC_APP_CMD;
558	cmd.arg = card->rca << 16;
559	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
560
561	err = mmc_wait_for_cmd(card->host, &cmd, 0);
562	if (err)
563		return (u32)-1;
564	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
565		return (u32)-1;
566
567	memset(&cmd, 0, sizeof(struct mmc_command));
568
569	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
570	cmd.arg = 0;
571	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
572
573	data.timeout_ns = card->csd.tacc_ns * 100;
574	data.timeout_clks = card->csd.tacc_clks * 100;
575
576	timeout_us = data.timeout_ns / 1000;
577	timeout_us += data.timeout_clks * 1000 /
578		(card->host->ios.clock / 1000);
579
580	if (timeout_us > 100000) {
581		data.timeout_ns = 100000000;
582		data.timeout_clks = 0;
583	}
584
585	data.blksz = 4;
586	data.blocks = 1;
587	data.flags = MMC_DATA_READ;
588	data.sg = &sg;
589	data.sg_len = 1;
590
591	mrq.cmd = &cmd;
592	mrq.data = &data;
593
594	blocks = kmalloc(4, GFP_KERNEL);
595	if (!blocks)
596		return (u32)-1;
597
598	sg_init_one(&sg, blocks, 4);
599
600	mmc_wait_for_req(card->host, &mrq);
601
602	result = ntohl(*blocks);
603	kfree(blocks);
604
605	if (cmd.error || data.error)
606		result = (u32)-1;
607
608	return result;
609}
610
611static int send_stop(struct mmc_card *card, u32 *status)
612{
613	struct mmc_command cmd = {0};
614	int err;
615
616	cmd.opcode = MMC_STOP_TRANSMISSION;
617	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
618	err = mmc_wait_for_cmd(card->host, &cmd, 5);
619	if (err == 0)
620		*status = cmd.resp[0];
621	return err;
622}
623
624static int get_card_status(struct mmc_card *card, u32 *status, int retries)
625{
626	struct mmc_command cmd = {0};
627	int err;
628
629	cmd.opcode = MMC_SEND_STATUS;
630	if (!mmc_host_is_spi(card->host))
631		cmd.arg = card->rca << 16;
632	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
633	err = mmc_wait_for_cmd(card->host, &cmd, retries);
634	if (err == 0)
635		*status = cmd.resp[0];
636	return err;
637}
638
639#define ERR_NOMEDIUM	3
640#define ERR_RETRY	2
641#define ERR_ABORT	1
642#define ERR_CONTINUE	0
643
644static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
645	bool status_valid, u32 status)
646{
647	switch (error) {
648	case -EILSEQ:
649		/* response crc error, retry the r/w cmd */
650		pr_err("%s: %s sending %s command, card status %#x\n",
651			req->rq_disk->disk_name, "response CRC error",
652			name, status);
653		return ERR_RETRY;
654
655	case -ETIMEDOUT:
656		pr_err("%s: %s sending %s command, card status %#x\n",
657			req->rq_disk->disk_name, "timed out", name, status);
658
659		/* If the status cmd initially failed, retry the r/w cmd */
660		if (!status_valid) {
661			pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
662			return ERR_RETRY;
663		}
664		/*
665		 * If it was a r/w cmd crc error, or illegal command
666		 * (eg, issued in wrong state) then retry - we should
667		 * have corrected the state problem above.
668		 */
669		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
670			pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
671			return ERR_RETRY;
672		}
673
674		/* Otherwise abort the command */
675		pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
676		return ERR_ABORT;
677
678	default:
679		/* We don't understand the error code the driver gave us */
680		pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
681		       req->rq_disk->disk_name, error, status);
682		return ERR_ABORT;
683	}
684}
685
686/*
687 * Initial r/w and stop cmd error recovery.
688 * We don't know whether the card received the r/w cmd or not, so try to
689 * restore things back to a sane state.  Essentially, we do this as follows:
690 * - Obtain card status.  If the first attempt to obtain card status fails,
691 *   the status word will reflect the failed status cmd, not the failed
692 *   r/w cmd.  If we fail to obtain card status, it suggests we can no
693 *   longer communicate with the card.
694 * - Check the card state.  If the card received the cmd but there was a
695 *   transient problem with the response, it might still be in a data transfer
696 *   mode.  Try to send it a stop command.  If this fails, we can't recover.
697 * - If the r/w cmd failed due to a response CRC error, it was probably
698 *   transient, so retry the cmd.
699 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
700 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
701 *   illegal cmd, retry.
702 * Otherwise we don't understand what happened, so abort.
703 */
704static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
705	struct mmc_blk_request *brq, int *ecc_err)
706{
707	bool prev_cmd_status_valid = true;
708	u32 status, stop_status = 0;
709	int err, retry;
710
711	if (mmc_card_removed(card))
712		return ERR_NOMEDIUM;
713
714	/*
715	 * Try to get card status which indicates both the card state
716	 * and why there was no response.  If the first attempt fails,
717	 * we can't be sure the returned status is for the r/w command.
718	 */
719	for (retry = 2; retry >= 0; retry--) {
720		err = get_card_status(card, &status, 0);
721		if (!err)
722			break;
723
724		prev_cmd_status_valid = false;
725		pr_err("%s: error %d sending status command, %sing\n",
726		       req->rq_disk->disk_name, err, retry ? "retry" : "abort");
727	}
728
729	/* We couldn't get a response from the card.  Give up. */
730	if (err) {
731		/* Check if the card is removed */
732		if (mmc_detect_card_removed(card->host))
733			return ERR_NOMEDIUM;
734		return ERR_ABORT;
735	}
736
737	/* Flag ECC errors */
738	if ((status & R1_CARD_ECC_FAILED) ||
739	    (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
740	    (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
741		*ecc_err = 1;
742
743	/*
744	 * Check the current card state.  If it is in some data transfer
745	 * mode, tell it to stop (and hopefully transition back to TRAN.)
746	 */
747	if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
748	    R1_CURRENT_STATE(status) == R1_STATE_RCV) {
749		err = send_stop(card, &stop_status);
750		if (err)
751			pr_err("%s: error %d sending stop command\n",
752			       req->rq_disk->disk_name, err);
753
754		/*
755		 * If the stop cmd also timed out, the card is probably
756		 * not present, so abort.  Other errors are bad news too.
757		 */
758		if (err)
759			return ERR_ABORT;
760		if (stop_status & R1_CARD_ECC_FAILED)
761			*ecc_err = 1;
762	}
763
764	/* Check for set block count errors */
765	if (brq->sbc.error)
766		return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
767				prev_cmd_status_valid, status);
768
769	/* Check for r/w command errors */
770	if (brq->cmd.error)
771		return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
772				prev_cmd_status_valid, status);
773
774	/* Data errors */
775	if (!brq->stop.error)
776		return ERR_CONTINUE;
777
778	/* Now for stop errors.  These aren't fatal to the transfer. */
779	pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
780	       req->rq_disk->disk_name, brq->stop.error,
781	       brq->cmd.resp[0], status);
782
783	/*
784	 * Subsitute in our own stop status as this will give the error
785	 * state which happened during the execution of the r/w command.
786	 */
787	if (stop_status) {
788		brq->stop.resp[0] = stop_status;
789		brq->stop.error = 0;
790	}
791	return ERR_CONTINUE;
792}
793
794static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
795			 int type)
796{
797	int err;
798
799	if (md->reset_done & type)
800		return -EEXIST;
801
802	md->reset_done |= type;
803	err = mmc_hw_reset(host);
804	/* Ensure we switch back to the correct partition */
805	if (err != -EOPNOTSUPP) {
806		struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
807		int part_err;
808
809		main_md->part_curr = main_md->part_type;
810		part_err = mmc_blk_part_switch(host->card, md);
811		if (part_err) {
812			/*
813			 * We have failed to get back into the correct
814			 * partition, so we need to abort the whole request.
815			 */
816			return -ENODEV;
817		}
818	}
819	return err;
820}
821
822static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
823{
824	md->reset_done &= ~type;
825}
826
827static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
828{
829	struct mmc_blk_data *md = mq->data;
830	struct mmc_card *card = md->queue.card;
831	unsigned int from, nr, arg;
832	int err = 0, type = MMC_BLK_DISCARD;
833
834	if (!mmc_can_erase(card)) {
835		err = -EOPNOTSUPP;
836		goto out;
837	}
838
839	from = blk_rq_pos(req);
840	nr = blk_rq_sectors(req);
841
842	if (mmc_can_discard(card))
843		arg = MMC_DISCARD_ARG;
844	else if (mmc_can_trim(card))
845		arg = MMC_TRIM_ARG;
846	else
847		arg = MMC_ERASE_ARG;
848retry:
849	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
850		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
851				 INAND_CMD38_ARG_EXT_CSD,
852				 arg == MMC_TRIM_ARG ?
853				 INAND_CMD38_ARG_TRIM :
854				 INAND_CMD38_ARG_ERASE,
855				 0);
856		if (err)
857			goto out;
858	}
859	err = mmc_erase(card, from, nr, arg);
860out:
861	if (err == -EIO && !mmc_blk_reset(md, card->host, type))
862		goto retry;
863	if (!err)
864		mmc_blk_reset_success(md, type);
865	spin_lock_irq(&md->lock);
866	__blk_end_request(req, err, blk_rq_bytes(req));
867	spin_unlock_irq(&md->lock);
868
869	return err ? 0 : 1;
870}
871
872static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
873				       struct request *req)
874{
875	struct mmc_blk_data *md = mq->data;
876	struct mmc_card *card = md->queue.card;
877	unsigned int from, nr, arg;
878	int err = 0, type = MMC_BLK_SECDISCARD;
879
880	if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
881		err = -EOPNOTSUPP;
882		goto out;
883	}
884
885	/* The sanitize operation is supported at v4.5 only */
886	if (mmc_can_sanitize(card)) {
887		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
888				EXT_CSD_SANITIZE_START, 1, 0);
889		goto out;
890	}
891
892	from = blk_rq_pos(req);
893	nr = blk_rq_sectors(req);
894
895	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
896		arg = MMC_SECURE_TRIM1_ARG;
897	else
898		arg = MMC_SECURE_ERASE_ARG;
899retry:
900	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
901		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
902				 INAND_CMD38_ARG_EXT_CSD,
903				 arg == MMC_SECURE_TRIM1_ARG ?
904				 INAND_CMD38_ARG_SECTRIM1 :
905				 INAND_CMD38_ARG_SECERASE,
906				 0);
907		if (err)
908			goto out;
909	}
910	err = mmc_erase(card, from, nr, arg);
911	if (!err && arg == MMC_SECURE_TRIM1_ARG) {
912		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
913			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
914					 INAND_CMD38_ARG_EXT_CSD,
915					 INAND_CMD38_ARG_SECTRIM2,
916					 0);
917			if (err)
918				goto out;
919		}
920		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
921	}
922out:
923	if (err == -EIO && !mmc_blk_reset(md, card->host, type))
924		goto retry;
925	if (!err)
926		mmc_blk_reset_success(md, type);
927	spin_lock_irq(&md->lock);
928	__blk_end_request(req, err, blk_rq_bytes(req));
929	spin_unlock_irq(&md->lock);
930
931	return err ? 0 : 1;
932}
933
934static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
935{
936	struct mmc_blk_data *md = mq->data;
937	struct mmc_card *card = md->queue.card;
938	int ret = 0;
939
940	ret = mmc_flush_cache(card);
941	if (ret)
942		ret = -EIO;
943
944	spin_lock_irq(&md->lock);
945	__blk_end_request_all(req, ret);
946	spin_unlock_irq(&md->lock);
947
948	return ret ? 0 : 1;
949}
950
951/*
952 * Reformat current write as a reliable write, supporting
953 * both legacy and the enhanced reliable write MMC cards.
954 * In each transfer we'll handle only as much as a single
955 * reliable write can handle, thus finish the request in
956 * partial completions.
957 */
958static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
959				    struct mmc_card *card,
960				    struct request *req)
961{
962	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
963		/* Legacy mode imposes restrictions on transfers. */
964		if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
965			brq->data.blocks = 1;
966
967		if (brq->data.blocks > card->ext_csd.rel_sectors)
968			brq->data.blocks = card->ext_csd.rel_sectors;
969		else if (brq->data.blocks < card->ext_csd.rel_sectors)
970			brq->data.blocks = 1;
971	}
972}
973
974#define CMD_ERRORS							\
975	(R1_OUT_OF_RANGE |	/* Command argument out of range */	\
976	 R1_ADDRESS_ERROR |	/* Misaligned address */		\
977	 R1_BLOCK_LEN_ERROR |	/* Transferred block length incorrect */\
978	 R1_WP_VIOLATION |	/* Tried to write to protected block */	\
979	 R1_CC_ERROR |		/* Card controller error */		\
980	 R1_ERROR)		/* General/unknown error */
981
982static int mmc_blk_err_check(struct mmc_card *card,
983			     struct mmc_async_req *areq)
984{
985	struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
986						    mmc_active);
987	struct mmc_blk_request *brq = &mq_mrq->brq;
988	struct request *req = mq_mrq->req;
989	int ecc_err = 0;
990
991	/*
992	 * sbc.error indicates a problem with the set block count
993	 * command.  No data will have been transferred.
994	 *
995	 * cmd.error indicates a problem with the r/w command.  No
996	 * data will have been transferred.
997	 *
998	 * stop.error indicates a problem with the stop command.  Data
999	 * may have been transferred, or may still be transferring.
1000	 */
1001	if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1002	    brq->data.error) {
1003		switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
1004		case ERR_RETRY:
1005			return MMC_BLK_RETRY;
1006		case ERR_ABORT:
1007			return MMC_BLK_ABORT;
1008		case ERR_NOMEDIUM:
1009			return MMC_BLK_NOMEDIUM;
1010		case ERR_CONTINUE:
1011			break;
1012		}
1013	}
1014
1015	/*
1016	 * Check for errors relating to the execution of the
1017	 * initial command - such as address errors.  No data
1018	 * has been transferred.
1019	 */
1020	if (brq->cmd.resp[0] & CMD_ERRORS) {
1021		pr_err("%s: r/w command failed, status = %#x\n",
1022		       req->rq_disk->disk_name, brq->cmd.resp[0]);
1023		return MMC_BLK_ABORT;
1024	}
1025
1026	/*
1027	 * Everything else is either success, or a data error of some
1028	 * kind.  If it was a write, we may have transitioned to
1029	 * program mode, which we have to wait for it to complete.
1030	 */
1031	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1032		u32 status;
1033		do {
1034			int err = get_card_status(card, &status, 5);
1035			if (err) {
1036				pr_err("%s: error %d requesting status\n",
1037				       req->rq_disk->disk_name, err);
1038				return MMC_BLK_CMD_ERR;
1039			}
1040			/*
1041			 * Some cards mishandle the status bits,
1042			 * so make sure to check both the busy
1043			 * indication and the card state.
1044			 */
1045		} while (!(status & R1_READY_FOR_DATA) ||
1046			 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1047	}
1048
1049	if (brq->data.error) {
1050		pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1051		       req->rq_disk->disk_name, brq->data.error,
1052		       (unsigned)blk_rq_pos(req),
1053		       (unsigned)blk_rq_sectors(req),
1054		       brq->cmd.resp[0], brq->stop.resp[0]);
1055
1056		if (rq_data_dir(req) == READ) {
1057			if (ecc_err)
1058				return MMC_BLK_ECC_ERR;
1059			return MMC_BLK_DATA_ERR;
1060		} else {
1061			return MMC_BLK_CMD_ERR;
1062		}
1063	}
1064
1065	if (!brq->data.bytes_xfered)
1066		return MMC_BLK_RETRY;
1067
1068	if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1069		return MMC_BLK_PARTIAL;
1070
1071	return MMC_BLK_SUCCESS;
1072}
1073
1074static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1075			       struct mmc_card *card,
1076			       int disable_multi,
1077			       struct mmc_queue *mq)
1078{
1079	u32 readcmd, writecmd;
1080	struct mmc_blk_request *brq = &mqrq->brq;
1081	struct request *req = mqrq->req;
1082	struct mmc_blk_data *md = mq->data;
1083
1084	/*
1085	 * Reliable writes are used to implement Forced Unit Access and
1086	 * REQ_META accesses, and are supported only on MMCs.
1087	 *
1088	 * XXX: this really needs a good explanation of why REQ_META
1089	 * is treated special.
1090	 */
1091	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1092			  (req->cmd_flags & REQ_META)) &&
1093		(rq_data_dir(req) == WRITE) &&
1094		(md->flags & MMC_BLK_REL_WR);
1095
1096	memset(brq, 0, sizeof(struct mmc_blk_request));
1097	brq->mrq.cmd = &brq->cmd;
1098	brq->mrq.data = &brq->data;
1099
1100	brq->cmd.arg = blk_rq_pos(req);
1101	if (!mmc_card_blockaddr(card))
1102		brq->cmd.arg <<= 9;
1103	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1104	brq->data.blksz = 512;
1105	brq->stop.opcode = MMC_STOP_TRANSMISSION;
1106	brq->stop.arg = 0;
1107	brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1108	brq->data.blocks = blk_rq_sectors(req);
1109
1110	/*
1111	 * The block layer doesn't support all sector count
1112	 * restrictions, so we need to be prepared for too big
1113	 * requests.
1114	 */
1115	if (brq->data.blocks > card->host->max_blk_count)
1116		brq->data.blocks = card->host->max_blk_count;
1117
1118	if (brq->data.blocks > 1) {
1119		/*
1120		 * After a read error, we redo the request one sector
1121		 * at a time in order to accurately determine which
1122		 * sectors can be read successfully.
1123		 */
1124		if (disable_multi)
1125			brq->data.blocks = 1;
1126
1127		/* Some controllers can't do multiblock reads due to hw bugs */
1128		if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1129		    rq_data_dir(req) == READ)
1130			brq->data.blocks = 1;
1131	}
1132
1133	if (brq->data.blocks > 1 || do_rel_wr) {
1134		/* SPI multiblock writes terminate using a special
1135		 * token, not a STOP_TRANSMISSION request.
1136		 */
1137		if (!mmc_host_is_spi(card->host) ||
1138		    rq_data_dir(req) == READ)
1139			brq->mrq.stop = &brq->stop;
1140		readcmd = MMC_READ_MULTIPLE_BLOCK;
1141		writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1142	} else {
1143		brq->mrq.stop = NULL;
1144		readcmd = MMC_READ_SINGLE_BLOCK;
1145		writecmd = MMC_WRITE_BLOCK;
1146	}
1147	if (rq_data_dir(req) == READ) {
1148		brq->cmd.opcode = readcmd;
1149		brq->data.flags |= MMC_DATA_READ;
1150	} else {
1151		brq->cmd.opcode = writecmd;
1152		brq->data.flags |= MMC_DATA_WRITE;
1153	}
1154
1155	if (do_rel_wr)
1156		mmc_apply_rel_rw(brq, card, req);
1157
1158	/*
1159	 * Pre-defined multi-block transfers are preferable to
1160	 * open ended-ones (and necessary for reliable writes).
1161	 * However, it is not sufficient to just send CMD23,
1162	 * and avoid the final CMD12, as on an error condition
1163	 * CMD12 (stop) needs to be sent anyway. This, coupled
1164	 * with Auto-CMD23 enhancements provided by some
1165	 * hosts, means that the complexity of dealing
1166	 * with this is best left to the host. If CMD23 is
1167	 * supported by card and host, we'll fill sbc in and let
1168	 * the host deal with handling it correctly. This means
1169	 * that for hosts that don't expose MMC_CAP_CMD23, no
1170	 * change of behavior will be observed.
1171	 *
1172	 * N.B: Some MMC cards experience perf degradation.
1173	 * We'll avoid using CMD23-bounded multiblock writes for
1174	 * these, while retaining features like reliable writes.
1175	 */
1176
1177	if ((md->flags & MMC_BLK_CMD23) &&
1178	    mmc_op_multi(brq->cmd.opcode) &&
1179	    (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
1180		brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1181		brq->sbc.arg = brq->data.blocks |
1182			(do_rel_wr ? (1 << 31) : 0);
1183		brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1184		brq->mrq.sbc = &brq->sbc;
1185	}
1186
1187	mmc_set_data_timeout(&brq->data, card);
1188
1189	brq->data.sg = mqrq->sg;
1190	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1191
1192	/*
1193	 * Adjust the sg list so it is the same size as the
1194	 * request.
1195	 */
1196	if (brq->data.blocks != blk_rq_sectors(req)) {
1197		int i, data_size = brq->data.blocks << 9;
1198		struct scatterlist *sg;
1199
1200		for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1201			data_size -= sg->length;
1202			if (data_size <= 0) {
1203				sg->length += data_size;
1204				i++;
1205				break;
1206			}
1207		}
1208		brq->data.sg_len = i;
1209	}
1210
1211	mqrq->mmc_active.mrq = &brq->mrq;
1212	mqrq->mmc_active.err_check = mmc_blk_err_check;
1213
1214	mmc_queue_bounce_pre(mqrq);
1215}
1216
1217static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1218			   struct mmc_blk_request *brq, struct request *req,
1219			   int ret)
1220{
1221	/*
1222	 * If this is an SD card and we're writing, we can first
1223	 * mark the known good sectors as ok.
1224	 *
1225	 * If the card is not SD, we can still ok written sectors
1226	 * as reported by the controller (which might be less than
1227	 * the real number of written sectors, but never more).
1228	 */
1229	if (mmc_card_sd(card)) {
1230		u32 blocks;
1231
1232		blocks = mmc_sd_num_wr_blocks(card);
1233		if (blocks != (u32)-1) {
1234			spin_lock_irq(&md->lock);
1235			ret = __blk_end_request(req, 0, blocks << 9);
1236			spin_unlock_irq(&md->lock);
1237		}
1238	} else {
1239		spin_lock_irq(&md->lock);
1240		ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1241		spin_unlock_irq(&md->lock);
1242	}
1243	return ret;
1244}
1245
1246static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1247{
1248	struct mmc_blk_data *md = mq->data;
1249	struct mmc_card *card = md->queue.card;
1250	struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1251	int ret = 1, disable_multi = 0, retry = 0, type;
1252	enum mmc_blk_status status;
1253	struct mmc_queue_req *mq_rq;
1254	struct request *req;
1255	struct mmc_async_req *areq;
1256
1257	if (!rqc && !mq->mqrq_prev->req)
1258		return 0;
1259
1260	do {
1261		if (rqc) {
1262			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1263			areq = &mq->mqrq_cur->mmc_active;
1264		} else
1265			areq = NULL;
1266		areq = mmc_start_req(card->host, areq, (int *) &status);
1267		if (!areq)
1268			return 0;
1269
1270		mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1271		brq = &mq_rq->brq;
1272		req = mq_rq->req;
1273		type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1274		mmc_queue_bounce_post(mq_rq);
1275
1276		switch (status) {
1277		case MMC_BLK_SUCCESS:
1278		case MMC_BLK_PARTIAL:
1279			/*
1280			 * A block was successfully transferred.
1281			 */
1282			mmc_blk_reset_success(md, type);
1283			spin_lock_irq(&md->lock);
1284			ret = __blk_end_request(req, 0,
1285						brq->data.bytes_xfered);
1286			spin_unlock_irq(&md->lock);
1287			/*
1288			 * If the blk_end_request function returns non-zero even
1289			 * though all data has been transferred and no errors
1290			 * were returned by the host controller, it's a bug.
1291			 */
1292			if (status == MMC_BLK_SUCCESS && ret) {
1293				pr_err("%s BUG rq_tot %d d_xfer %d\n",
1294				       __func__, blk_rq_bytes(req),
1295				       brq->data.bytes_xfered);
1296				rqc = NULL;
1297				goto cmd_abort;
1298			}
1299			break;
1300		case MMC_BLK_CMD_ERR:
1301			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1302			if (!mmc_blk_reset(md, card->host, type))
1303				break;
1304			goto cmd_abort;
1305		case MMC_BLK_RETRY:
1306			if (retry++ < 5)
1307				break;
1308			/* Fall through */
1309		case MMC_BLK_ABORT:
1310			if (!mmc_blk_reset(md, card->host, type))
1311				break;
1312			goto cmd_abort;
1313		case MMC_BLK_DATA_ERR: {
1314			int err;
1315
1316			err = mmc_blk_reset(md, card->host, type);
1317			if (!err)
1318				break;
1319			if (err == -ENODEV)
1320				goto cmd_abort;
1321			/* Fall through */
1322		}
1323		case MMC_BLK_ECC_ERR:
1324			if (brq->data.blocks > 1) {
1325				/* Redo read one sector at a time */
1326				pr_warning("%s: retrying using single block read\n",
1327					   req->rq_disk->disk_name);
1328				disable_multi = 1;
1329				break;
1330			}
1331			/*
1332			 * After an error, we redo I/O one sector at a
1333			 * time, so we only reach here after trying to
1334			 * read a single sector.
1335			 */
1336			spin_lock_irq(&md->lock);
1337			ret = __blk_end_request(req, -EIO,
1338						brq->data.blksz);
1339			spin_unlock_irq(&md->lock);
1340			if (!ret)
1341				goto start_new_req;
1342			break;
1343		case MMC_BLK_NOMEDIUM:
1344			goto cmd_abort;
1345		}
1346
1347		if (ret) {
1348			/*
1349			 * In case of a incomplete request
1350			 * prepare it again and resend.
1351			 */
1352			mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
1353			mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
1354		}
1355	} while (ret);
1356
1357	return 1;
1358
1359 cmd_abort:
1360	spin_lock_irq(&md->lock);
1361	if (mmc_card_removed(card))
1362		req->cmd_flags |= REQ_QUIET;
1363	while (ret)
1364		ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
1365	spin_unlock_irq(&md->lock);
1366
1367 start_new_req:
1368	if (rqc) {
1369		mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1370		mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
1371	}
1372
1373	return 0;
1374}
1375
1376static int
1377mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card);
1378
1379static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1380{
1381	int ret;
1382	struct mmc_blk_data *md = mq->data;
1383	struct mmc_card *card = md->queue.card;
1384
1385#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1386	if (mmc_bus_needs_resume(card->host)) {
1387		mmc_resume_bus(card->host);
1388		mmc_blk_set_blksize(md, card);
1389	}
1390#endif
1391
1392	if (req && !mq->mqrq_prev->req)
1393		/* claim host only for the first request */
1394		mmc_claim_host(card->host);
1395
1396	ret = mmc_blk_part_switch(card, md);
1397	if (ret) {
1398		if (req) {
1399			spin_lock_irq(&md->lock);
1400			__blk_end_request_all(req, -EIO);
1401			spin_unlock_irq(&md->lock);
1402		}
1403		ret = 0;
1404		goto out;
1405	}
1406
1407	if (req && req->cmd_flags & REQ_DISCARD) {
1408		/* complete ongoing async transfer before issuing discard */
1409		if (card->host->areq)
1410			mmc_blk_issue_rw_rq(mq, NULL);
1411		if (req->cmd_flags & REQ_SECURE)
1412			ret = mmc_blk_issue_secdiscard_rq(mq, req);
1413		else
1414			ret = mmc_blk_issue_discard_rq(mq, req);
1415	} else if (req && req->cmd_flags & REQ_FLUSH) {
1416		/* complete ongoing async transfer before issuing flush */
1417		if (card->host->areq)
1418			mmc_blk_issue_rw_rq(mq, NULL);
1419		ret = mmc_blk_issue_flush(mq, req);
1420	} else {
1421		ret = mmc_blk_issue_rw_rq(mq, req);
1422	}
1423
1424out:
1425	if (!req)
1426		/* release host only when there are no more requests */
1427		mmc_release_host(card->host);
1428	return ret;
1429}
1430
1431static inline int mmc_blk_readonly(struct mmc_card *card)
1432{
1433	return mmc_card_readonly(card) ||
1434	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
1435}
1436
1437static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1438					      struct device *parent,
1439					      sector_t size,
1440					      bool default_ro,
1441					      const char *subname,
1442					      int area_type)
1443{
1444	struct mmc_blk_data *md;
1445	int devidx, ret;
1446
1447	devidx = find_first_zero_bit(dev_use, max_devices);
1448	if (devidx >= max_devices)
1449		return ERR_PTR(-ENOSPC);
1450	__set_bit(devidx, dev_use);
1451
1452	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
1453	if (!md) {
1454		ret = -ENOMEM;
1455		goto out;
1456	}
1457
1458	/*
1459	 * !subname implies we are creating main mmc_blk_data that will be
1460	 * associated with mmc_card with mmc_set_drvdata. Due to device
1461	 * partitions, devidx will not coincide with a per-physical card
1462	 * index anymore so we keep track of a name index.
1463	 */
1464	if (!subname) {
1465		md->name_idx = find_first_zero_bit(name_use, max_devices);
1466		__set_bit(md->name_idx, name_use);
1467	} else
1468		md->name_idx = ((struct mmc_blk_data *)
1469				dev_to_disk(parent)->private_data)->name_idx;
1470
1471	md->area_type = area_type;
1472
1473	/*
1474	 * Set the read-only status based on the supported commands
1475	 * and the write protect switch.
1476	 */
1477	md->read_only = mmc_blk_readonly(card);
1478
1479	md->disk = alloc_disk(perdev_minors);
1480	if (md->disk == NULL) {
1481		ret = -ENOMEM;
1482		goto err_kfree;
1483	}
1484
1485	spin_lock_init(&md->lock);
1486	INIT_LIST_HEAD(&md->part);
1487	md->usage = 1;
1488
1489	ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
1490	if (ret)
1491		goto err_putdisk;
1492
1493	md->queue.issue_fn = mmc_blk_issue_rq;
1494	md->queue.data = md;
1495
1496	md->disk->major	= MMC_BLOCK_MAJOR;
1497	md->disk->first_minor = devidx * perdev_minors;
1498	md->disk->fops = &mmc_bdops;
1499	md->disk->private_data = md;
1500	md->disk->queue = md->queue.queue;
1501	md->disk->driverfs_dev = parent;
1502	set_disk_ro(md->disk, md->read_only || default_ro);
1503	md->disk->flags = GENHD_FL_EXT_DEVT;
1504
1505	/*
1506	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1507	 *
1508	 * - be set for removable media with permanent block devices
1509	 * - be unset for removable block devices with permanent media
1510	 *
1511	 * Since MMC block devices clearly fall under the second
1512	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
1513	 * should use the block device creation/destruction hotplug
1514	 * messages to tell when the card is present.
1515	 */
1516
1517	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1518		 "mmcblk%d%s", md->name_idx, subname ? subname : "");
1519
1520	blk_queue_logical_block_size(md->queue.queue, 512);
1521	set_capacity(md->disk, size);
1522
1523	if (mmc_host_cmd23(card->host)) {
1524		if (mmc_card_mmc(card) ||
1525		    (mmc_card_sd(card) &&
1526		     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1527			md->flags |= MMC_BLK_CMD23;
1528	}
1529
1530	if (mmc_card_mmc(card) &&
1531	    md->flags & MMC_BLK_CMD23 &&
1532	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1533	     card->ext_csd.rel_sectors)) {
1534		md->flags |= MMC_BLK_REL_WR;
1535		blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1536	}
1537
1538	return md;
1539
1540 err_putdisk:
1541	put_disk(md->disk);
1542 err_kfree:
1543	kfree(md);
1544 out:
1545	return ERR_PTR(ret);
1546}
1547
1548static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1549{
1550	sector_t size;
1551	struct mmc_blk_data *md;
1552
1553	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
1554		/*
1555		 * The EXT_CSD sector count is in number or 512 byte
1556		 * sectors.
1557		 */
1558		size = card->ext_csd.sectors;
1559	} else {
1560		/*
1561		 * The CSD capacity field is in units of read_blkbits.
1562		 * set_capacity takes units of 512 bytes.
1563		 */
1564		size = card->csd.capacity << (card->csd.read_blkbits - 9);
1565	}
1566
1567	md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
1568					MMC_BLK_DATA_AREA_MAIN);
1569	return md;
1570}
1571
1572static int mmc_blk_alloc_part(struct mmc_card *card,
1573			      struct mmc_blk_data *md,
1574			      unsigned int part_type,
1575			      sector_t size,
1576			      bool default_ro,
1577			      const char *subname,
1578			      int area_type)
1579{
1580	char cap_str[10];
1581	struct mmc_blk_data *part_md;
1582
1583	part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1584				    subname, area_type);
1585	if (IS_ERR(part_md))
1586		return PTR_ERR(part_md);
1587	part_md->part_type = part_type;
1588	list_add(&part_md->part, &md->part);
1589
1590	string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1591			cap_str, sizeof(cap_str));
1592	pr_info("%s: %s %s partition %u %s\n",
1593	       part_md->disk->disk_name, mmc_card_id(card),
1594	       mmc_card_name(card), part_md->part_type, cap_str);
1595	return 0;
1596}
1597
1598/* MMC Physical partitions consist of two boot partitions and
1599 * up to four general purpose partitions.
1600 * For each partition enabled in EXT_CSD a block device will be allocatedi
1601 * to provide access to the partition.
1602 */
1603
1604static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1605{
1606	int idx, ret = 0;
1607
1608	if (!mmc_card_mmc(card))
1609		return 0;
1610
1611	for (idx = 0; idx < card->nr_parts; idx++) {
1612		if (card->part[idx].size) {
1613			ret = mmc_blk_alloc_part(card, md,
1614				card->part[idx].part_cfg,
1615				card->part[idx].size >> 9,
1616				card->part[idx].force_ro,
1617				card->part[idx].name,
1618				card->part[idx].area_type);
1619			if (ret)
1620				return ret;
1621		}
1622	}
1623
1624	return ret;
1625}
1626
1627static int
1628mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
1629{
1630	int err;
1631
1632	mmc_claim_host(card->host);
1633	err = mmc_set_blocklen(card, 512);
1634	mmc_release_host(card->host);
1635
1636	if (err) {
1637		pr_err("%s: unable to set block size to 512: %d\n",
1638			md->disk->disk_name, err);
1639		return -EINVAL;
1640	}
1641
1642	return 0;
1643}
1644
1645static void mmc_blk_remove_req(struct mmc_blk_data *md)
1646{
1647	struct mmc_card *card;
1648
1649	if (md) {
1650		card = md->queue.card;
1651		if (md->disk->flags & GENHD_FL_UP) {
1652			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1653			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
1654					card->ext_csd.boot_ro_lockable)
1655				device_remove_file(disk_to_dev(md->disk),
1656					&md->power_ro_lock);
1657
1658			/* Stop new requests from getting into the queue */
1659			del_gendisk(md->disk);
1660		}
1661
1662		/* Then flush out any already in there */
1663		mmc_cleanup_queue(&md->queue);
1664		mmc_blk_put(md);
1665	}
1666}
1667
1668static void mmc_blk_remove_parts(struct mmc_card *card,
1669				 struct mmc_blk_data *md)
1670{
1671	struct list_head *pos, *q;
1672	struct mmc_blk_data *part_md;
1673
1674	__clear_bit(md->name_idx, name_use);
1675	list_for_each_safe(pos, q, &md->part) {
1676		part_md = list_entry(pos, struct mmc_blk_data, part);
1677		list_del(pos);
1678		mmc_blk_remove_req(part_md);
1679	}
1680}
1681
1682static int mmc_add_disk(struct mmc_blk_data *md)
1683{
1684	int ret;
1685	struct mmc_card *card = md->queue.card;
1686
1687	add_disk(md->disk);
1688	md->force_ro.show = force_ro_show;
1689	md->force_ro.store = force_ro_store;
1690	sysfs_attr_init(&md->force_ro.attr);
1691	md->force_ro.attr.name = "force_ro";
1692	md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1693	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1694	if (ret)
1695		goto force_ro_fail;
1696
1697	if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
1698	     card->ext_csd.boot_ro_lockable) {
1699		mode_t mode;
1700
1701		if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
1702			mode = S_IRUGO;
1703		else
1704			mode = S_IRUGO | S_IWUSR;
1705
1706		md->power_ro_lock.show = power_ro_lock_show;
1707		md->power_ro_lock.store = power_ro_lock_store;
1708		sysfs_attr_init(&md->power_ro_lock.attr);
1709		md->power_ro_lock.attr.mode = mode;
1710		md->power_ro_lock.attr.name =
1711					"ro_lock_until_next_power_on";
1712		ret = device_create_file(disk_to_dev(md->disk),
1713				&md->power_ro_lock);
1714		if (ret)
1715			goto power_ro_lock_fail;
1716	}
1717	return ret;
1718
1719power_ro_lock_fail:
1720	device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1721force_ro_fail:
1722	del_gendisk(md->disk);
1723
1724	return ret;
1725}
1726
1727#define CID_MANFID_SANDISK	0x2
1728#define CID_MANFID_TOSHIBA	0x11
1729#define CID_MANFID_MICRON	0x13
1730
1731static const struct mmc_fixup blk_fixups[] =
1732{
1733	MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
1734		  MMC_QUIRK_INAND_CMD38),
1735	MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
1736		  MMC_QUIRK_INAND_CMD38),
1737	MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
1738		  MMC_QUIRK_INAND_CMD38),
1739	MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
1740		  MMC_QUIRK_INAND_CMD38),
1741	MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
1742		  MMC_QUIRK_INAND_CMD38),
1743
1744	/*
1745	 * Some MMC cards experience performance degradation with CMD23
1746	 * instead of CMD12-bounded multiblock transfers. For now we'll
1747	 * black list what's bad...
1748	 * - Certain Toshiba cards.
1749	 *
1750	 * N.B. This doesn't affect SD cards.
1751	 */
1752	MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1753		  MMC_QUIRK_BLK_NO_CMD23),
1754	MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1755		  MMC_QUIRK_BLK_NO_CMD23),
1756	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1757		  MMC_QUIRK_BLK_NO_CMD23),
1758
1759	/*
1760	 * Some Micron MMC cards needs longer data read timeout than
1761	 * indicated in CSD.
1762	 */
1763	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
1764		  MMC_QUIRK_LONG_READ_TIME),
1765
1766	END_FIXUP
1767};
1768
1769static int mmc_blk_probe(struct mmc_card *card)
1770{
1771	struct mmc_blk_data *md, *part_md;
1772	int err;
1773	char cap_str[10];
1774
1775	/*
1776	 * Check that the card supports the command class(es) we need.
1777	 */
1778	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1779		return -ENODEV;
1780
1781	md = mmc_blk_alloc(card);
1782	if (IS_ERR(md))
1783		return PTR_ERR(md);
1784
1785	err = mmc_blk_set_blksize(md, card);
1786	if (err)
1787		goto out;
1788
1789	string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
1790			cap_str, sizeof(cap_str));
1791	pr_info("%s: %s %s %s %s\n",
1792		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
1793		cap_str, md->read_only ? "(ro)" : "");
1794
1795	if (mmc_blk_alloc_parts(card, md))
1796		goto out;
1797
1798	mmc_set_drvdata(card, md);
1799	mmc_fixup_device(card, blk_fixups);
1800
1801#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1802	mmc_set_bus_resume_policy(card->host, 1);
1803#endif
1804	if (mmc_add_disk(md))
1805		goto out;
1806
1807	list_for_each_entry(part_md, &md->part, part) {
1808		if (mmc_add_disk(part_md))
1809			goto out;
1810	}
1811	return 0;
1812
1813 out:
1814	mmc_blk_remove_parts(card, md);
1815	mmc_blk_remove_req(md);
1816	return err;
1817}
1818
1819static void mmc_blk_remove(struct mmc_card *card)
1820{
1821	struct mmc_blk_data *md = mmc_get_drvdata(card);
1822
1823	mmc_blk_remove_parts(card, md);
1824	mmc_claim_host(card->host);
1825	mmc_blk_part_switch(card, md);
1826	mmc_release_host(card->host);
1827	mmc_blk_remove_req(md);
1828	mmc_set_drvdata(card, NULL);
1829#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1830	mmc_set_bus_resume_policy(card->host, 0);
1831#endif
1832}
1833
1834#ifdef CONFIG_PM
1835static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
1836{
1837	struct mmc_blk_data *part_md;
1838	struct mmc_blk_data *md = mmc_get_drvdata(card);
1839
1840	if (md) {
1841		mmc_queue_suspend(&md->queue);
1842		list_for_each_entry(part_md, &md->part, part) {
1843			mmc_queue_suspend(&part_md->queue);
1844		}
1845	}
1846	return 0;
1847}
1848
1849static int mmc_blk_resume(struct mmc_card *card)
1850{
1851	struct mmc_blk_data *part_md;
1852	struct mmc_blk_data *md = mmc_get_drvdata(card);
1853
1854	if (md) {
1855#ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1856		mmc_blk_set_blksize(md, card);
1857#endif
1858
1859		/*
1860		 * Resume involves the card going into idle state,
1861		 * so current partition is always the main one.
1862		 */
1863		md->part_curr = md->part_type;
1864		mmc_queue_resume(&md->queue);
1865		list_for_each_entry(part_md, &md->part, part) {
1866			mmc_queue_resume(&part_md->queue);
1867		}
1868	}
1869	return 0;
1870}
1871#else
1872#define	mmc_blk_suspend	NULL
1873#define mmc_blk_resume	NULL
1874#endif
1875
1876static struct mmc_driver mmc_driver = {
1877	.drv		= {
1878		.name	= "mmcblk",
1879	},
1880	.probe		= mmc_blk_probe,
1881	.remove		= mmc_blk_remove,
1882	.suspend	= mmc_blk_suspend,
1883	.resume		= mmc_blk_resume,
1884};
1885
1886static int __init mmc_blk_init(void)
1887{
1888	int res;
1889
1890	if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1891		pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1892
1893	max_devices = 256 / perdev_minors;
1894
1895	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
1896	if (res)
1897		goto out;
1898
1899	res = mmc_register_driver(&mmc_driver);
1900	if (res)
1901		goto out2;
1902
1903	return 0;
1904 out2:
1905	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1906 out:
1907	return res;
1908}
1909
1910static void __exit mmc_blk_exit(void)
1911{
1912	mmc_unregister_driver(&mmc_driver);
1913	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1914}
1915
1916module_init(mmc_blk_init);
1917module_exit(mmc_blk_exit);
1918
1919MODULE_LICENSE("GPL");
1920MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
1921
1922