block.c revision 9d4e98e9609bc19d4a8ac4a5c3218358d1820114
1/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author:  Andrew Christian
18 *          28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/errno.h>
27#include <linux/hdreg.h>
28#include <linux/kdev_t.h>
29#include <linux/blkdev.h>
30#include <linux/mutex.h>
31#include <linux/scatterlist.h>
32
33#include <linux/mmc/card.h>
34#include <linux/mmc/host.h>
35#include <linux/mmc/mmc.h>
36#include <linux/mmc/sd.h>
37
38#include <asm/system.h>
39#include <asm/uaccess.h>
40
41#include "queue.h"
42
43/*
44 * max 8 partitions per card
45 */
46#define MMC_SHIFT	3
47#define MMC_NUM_MINORS	(256 >> MMC_SHIFT)
48
49static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS);
50
51/*
52 * There is one mmc_blk_data per slot.
53 */
54struct mmc_blk_data {
55	spinlock_t	lock;
56	struct gendisk	*disk;
57	struct mmc_queue queue;
58
59	unsigned int	usage;
60	unsigned int	block_bits;
61	unsigned int	read_only;
62};
63
64static DEFINE_MUTEX(open_lock);
65
66static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
67{
68	struct mmc_blk_data *md;
69
70	mutex_lock(&open_lock);
71	md = disk->private_data;
72	if (md && md->usage == 0)
73		md = NULL;
74	if (md)
75		md->usage++;
76	mutex_unlock(&open_lock);
77
78	return md;
79}
80
81static void mmc_blk_put(struct mmc_blk_data *md)
82{
83	mutex_lock(&open_lock);
84	md->usage--;
85	if (md->usage == 0) {
86		int devidx = md->disk->first_minor >> MMC_SHIFT;
87		__clear_bit(devidx, dev_use);
88
89		put_disk(md->disk);
90		kfree(md);
91	}
92	mutex_unlock(&open_lock);
93}
94
95static int mmc_blk_open(struct inode *inode, struct file *filp)
96{
97	struct mmc_blk_data *md;
98	int ret = -ENXIO;
99
100	md = mmc_blk_get(inode->i_bdev->bd_disk);
101	if (md) {
102		if (md->usage == 2)
103			check_disk_change(inode->i_bdev);
104		ret = 0;
105
106		if ((filp->f_mode & FMODE_WRITE) && md->read_only)
107			ret = -EROFS;
108	}
109
110	return ret;
111}
112
113static int mmc_blk_release(struct inode *inode, struct file *filp)
114{
115	struct mmc_blk_data *md = inode->i_bdev->bd_disk->private_data;
116
117	mmc_blk_put(md);
118	return 0;
119}
120
121static int
122mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
123{
124	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
125	geo->heads = 4;
126	geo->sectors = 16;
127	return 0;
128}
129
130static struct block_device_operations mmc_bdops = {
131	.open			= mmc_blk_open,
132	.release		= mmc_blk_release,
133	.getgeo			= mmc_blk_getgeo,
134	.owner			= THIS_MODULE,
135};
136
137struct mmc_blk_request {
138	struct mmc_request	mrq;
139	struct mmc_command	cmd;
140	struct mmc_command	stop;
141	struct mmc_data		data;
142};
143
144static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
145{
146	int err;
147	u32 blocks;
148
149	struct mmc_request mrq;
150	struct mmc_command cmd;
151	struct mmc_data data;
152	unsigned int timeout_us;
153
154	struct scatterlist sg;
155
156	memset(&cmd, 0, sizeof(struct mmc_command));
157
158	cmd.opcode = MMC_APP_CMD;
159	cmd.arg = card->rca << 16;
160	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
161
162	err = mmc_wait_for_cmd(card->host, &cmd, 0);
163	if (err)
164		return (u32)-1;
165	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
166		return (u32)-1;
167
168	memset(&cmd, 0, sizeof(struct mmc_command));
169
170	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
171	cmd.arg = 0;
172	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
173
174	memset(&data, 0, sizeof(struct mmc_data));
175
176	data.timeout_ns = card->csd.tacc_ns * 100;
177	data.timeout_clks = card->csd.tacc_clks * 100;
178
179	timeout_us = data.timeout_ns / 1000;
180	timeout_us += data.timeout_clks * 1000 /
181		(card->host->ios.clock / 1000);
182
183	if (timeout_us > 100000) {
184		data.timeout_ns = 100000000;
185		data.timeout_clks = 0;
186	}
187
188	data.blksz = 4;
189	data.blocks = 1;
190	data.flags = MMC_DATA_READ;
191	data.sg = &sg;
192	data.sg_len = 1;
193
194	memset(&mrq, 0, sizeof(struct mmc_request));
195
196	mrq.cmd = &cmd;
197	mrq.data = &data;
198
199	sg_init_one(&sg, &blocks, 4);
200
201	mmc_wait_for_req(card->host, &mrq);
202
203	if (cmd.error || data.error)
204		return (u32)-1;
205
206	blocks = ntohl(blocks);
207
208	return blocks;
209}
210
211static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
212{
213	struct mmc_blk_data *md = mq->data;
214	struct mmc_card *card = md->queue.card;
215	struct mmc_blk_request brq;
216	int ret = 1, data_size, i;
217	struct scatterlist *sg;
218
219	mmc_claim_host(card->host);
220
221	do {
222		struct mmc_command cmd;
223		u32 readcmd, writecmd;
224
225		memset(&brq, 0, sizeof(struct mmc_blk_request));
226		brq.mrq.cmd = &brq.cmd;
227		brq.mrq.data = &brq.data;
228
229		brq.cmd.arg = req->sector;
230		if (!mmc_card_blockaddr(card))
231			brq.cmd.arg <<= 9;
232		brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
233		brq.data.blksz = 1 << md->block_bits;
234		brq.stop.opcode = MMC_STOP_TRANSMISSION;
235		brq.stop.arg = 0;
236		brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
237		brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
238		if (brq.data.blocks > card->host->max_blk_count)
239			brq.data.blocks = card->host->max_blk_count;
240
241		if (brq.data.blocks > 1) {
242			/* SPI multiblock writes terminate using a special
243			 * token, not a STOP_TRANSMISSION request.
244			 */
245			if (!mmc_host_is_spi(card->host)
246					|| rq_data_dir(req) == READ)
247				brq.mrq.stop = &brq.stop;
248			readcmd = MMC_READ_MULTIPLE_BLOCK;
249			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
250		} else {
251			brq.mrq.stop = NULL;
252			readcmd = MMC_READ_SINGLE_BLOCK;
253			writecmd = MMC_WRITE_BLOCK;
254		}
255
256		if (rq_data_dir(req) == READ) {
257			brq.cmd.opcode = readcmd;
258			brq.data.flags |= MMC_DATA_READ;
259		} else {
260			brq.cmd.opcode = writecmd;
261			brq.data.flags |= MMC_DATA_WRITE;
262		}
263
264		mmc_set_data_timeout(&brq.data, card);
265
266		brq.data.sg = mq->sg;
267		brq.data.sg_len = mmc_queue_map_sg(mq);
268
269		mmc_queue_bounce_pre(mq);
270
271		/*
272		 * Adjust the sg list so it is the same size as the
273		 * request.
274		 */
275		if (brq.data.blocks !=
276		    (req->nr_sectors >> (md->block_bits - 9))) {
277			data_size = brq.data.blocks * brq.data.blksz;
278			for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
279				data_size -= sg->length;
280				if (data_size <= 0) {
281					sg->length += data_size;
282					i++;
283					break;
284				}
285			}
286			brq.data.sg_len = i;
287		}
288
289		mmc_wait_for_req(card->host, &brq.mrq);
290
291		mmc_queue_bounce_post(mq);
292
293		/*
294		 * Check for errors here, but don't jump to cmd_err
295		 * until later as we need to wait for the card to leave
296		 * programming mode even when things go wrong.
297		 */
298		if (brq.cmd.error) {
299			printk(KERN_ERR "%s: error %d sending read/write command\n",
300			       req->rq_disk->disk_name, brq.cmd.error);
301		}
302
303		if (brq.data.error) {
304			printk(KERN_ERR "%s: error %d transferring data\n",
305			       req->rq_disk->disk_name, brq.data.error);
306		}
307
308		if (brq.stop.error) {
309			printk(KERN_ERR "%s: error %d sending stop command\n",
310			       req->rq_disk->disk_name, brq.stop.error);
311		}
312
313		if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
314			do {
315				int err;
316
317				cmd.opcode = MMC_SEND_STATUS;
318				cmd.arg = card->rca << 16;
319				cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
320				err = mmc_wait_for_cmd(card->host, &cmd, 5);
321				if (err) {
322					printk(KERN_ERR "%s: error %d requesting status\n",
323					       req->rq_disk->disk_name, err);
324					goto cmd_err;
325				}
326				/*
327				 * Some cards mishandle the status bits,
328				 * so make sure to check both the busy
329				 * indication and the card state.
330				 */
331			} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
332				(R1_CURRENT_STATE(cmd.resp[0]) == 7));
333
334#if 0
335			if (cmd.resp[0] & ~0x00000900)
336				printk(KERN_ERR "%s: status = %08x\n",
337				       req->rq_disk->disk_name, cmd.resp[0]);
338			if (mmc_decode_status(cmd.resp))
339				goto cmd_err;
340#endif
341		}
342
343		if (brq.cmd.error || brq.data.error || brq.stop.error)
344			goto cmd_err;
345
346		/*
347		 * A block was successfully transferred.
348		 */
349		spin_lock_irq(&md->lock);
350		ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
351		spin_unlock_irq(&md->lock);
352	} while (ret);
353
354	mmc_release_host(card->host);
355
356	return 1;
357
358 cmd_err:
359 	/*
360 	 * If this is an SD card and we're writing, we can first
361 	 * mark the known good sectors as ok.
362 	 *
363	 * If the card is not SD, we can still ok written sectors
364	 * as reported by the controller (which might be less than
365	 * the real number of written sectors, but never more).
366	 *
367	 * For reads we just fail the entire chunk as that should
368	 * be safe in all cases.
369	 */
370	if (rq_data_dir(req) != READ) {
371		if (mmc_card_sd(card)) {
372			u32 blocks;
373			unsigned int bytes;
374
375			blocks = mmc_sd_num_wr_blocks(card);
376			if (blocks != (u32)-1) {
377				if (card->csd.write_partial)
378					bytes = blocks << md->block_bits;
379				else
380					bytes = blocks << 9;
381				spin_lock_irq(&md->lock);
382				ret = __blk_end_request(req, 0, bytes);
383				spin_unlock_irq(&md->lock);
384			}
385		} else {
386			spin_lock_irq(&md->lock);
387			ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
388			spin_unlock_irq(&md->lock);
389		}
390	}
391
392	mmc_release_host(card->host);
393
394	spin_lock_irq(&md->lock);
395	while (ret)
396		ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
397	spin_unlock_irq(&md->lock);
398
399	return 0;
400}
401
402
403static inline int mmc_blk_readonly(struct mmc_card *card)
404{
405	return mmc_card_readonly(card) ||
406	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
407}
408
409static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
410{
411	struct mmc_blk_data *md;
412	int devidx, ret;
413
414	devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
415	if (devidx >= MMC_NUM_MINORS)
416		return ERR_PTR(-ENOSPC);
417	__set_bit(devidx, dev_use);
418
419	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
420	if (!md) {
421		ret = -ENOMEM;
422		goto out;
423	}
424
425
426	/*
427	 * Set the read-only status based on the supported commands
428	 * and the write protect switch.
429	 */
430	md->read_only = mmc_blk_readonly(card);
431
432	/*
433	 * Both SD and MMC specifications state (although a bit
434	 * unclearly in the MMC case) that a block size of 512
435	 * bytes must always be supported by the card.
436	 */
437	md->block_bits = 9;
438
439	md->disk = alloc_disk(1 << MMC_SHIFT);
440	if (md->disk == NULL) {
441		ret = -ENOMEM;
442		goto err_kfree;
443	}
444
445	spin_lock_init(&md->lock);
446	md->usage = 1;
447
448	ret = mmc_init_queue(&md->queue, card, &md->lock);
449	if (ret)
450		goto err_putdisk;
451
452	md->queue.issue_fn = mmc_blk_issue_rq;
453	md->queue.data = md;
454
455	md->disk->major	= MMC_BLOCK_MAJOR;
456	md->disk->first_minor = devidx << MMC_SHIFT;
457	md->disk->fops = &mmc_bdops;
458	md->disk->private_data = md;
459	md->disk->queue = md->queue.queue;
460	md->disk->driverfs_dev = &card->dev;
461
462	/*
463	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
464	 *
465	 * - be set for removable media with permanent block devices
466	 * - be unset for removable block devices with permanent media
467	 *
468	 * Since MMC block devices clearly fall under the second
469	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
470	 * should use the block device creation/destruction hotplug
471	 * messages to tell when the card is present.
472	 */
473
474	sprintf(md->disk->disk_name, "mmcblk%d", devidx);
475
476	blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
477
478	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
479		/*
480		 * The EXT_CSD sector count is in number or 512 byte
481		 * sectors.
482		 */
483		set_capacity(md->disk, card->ext_csd.sectors);
484	} else {
485		/*
486		 * The CSD capacity field is in units of read_blkbits.
487		 * set_capacity takes units of 512 bytes.
488		 */
489		set_capacity(md->disk,
490			card->csd.capacity << (card->csd.read_blkbits - 9));
491	}
492	return md;
493
494 err_putdisk:
495	put_disk(md->disk);
496 err_kfree:
497	kfree(md);
498 out:
499	return ERR_PTR(ret);
500}
501
502static int
503mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
504{
505	struct mmc_command cmd;
506	int err;
507
508	/* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
509	if (mmc_card_blockaddr(card))
510		return 0;
511
512	mmc_claim_host(card->host);
513	cmd.opcode = MMC_SET_BLOCKLEN;
514	cmd.arg = 1 << md->block_bits;
515	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
516	err = mmc_wait_for_cmd(card->host, &cmd, 5);
517	mmc_release_host(card->host);
518
519	if (err) {
520		printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
521			md->disk->disk_name, cmd.arg, err);
522		return -EINVAL;
523	}
524
525	return 0;
526}
527
528static int mmc_blk_probe(struct mmc_card *card)
529{
530	struct mmc_blk_data *md;
531	int err;
532
533	/*
534	 * Check that the card supports the command class(es) we need.
535	 */
536	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
537		return -ENODEV;
538
539	md = mmc_blk_alloc(card);
540	if (IS_ERR(md))
541		return PTR_ERR(md);
542
543	err = mmc_blk_set_blksize(md, card);
544	if (err)
545		goto out;
546
547	printk(KERN_INFO "%s: %s %s %lluKiB %s\n",
548		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
549		(unsigned long long)(get_capacity(md->disk) >> 1),
550		md->read_only ? "(ro)" : "");
551
552	mmc_set_drvdata(card, md);
553	add_disk(md->disk);
554	return 0;
555
556 out:
557	mmc_blk_put(md);
558
559	return err;
560}
561
562static void mmc_blk_remove(struct mmc_card *card)
563{
564	struct mmc_blk_data *md = mmc_get_drvdata(card);
565
566	if (md) {
567		/* Stop new requests from getting into the queue */
568		del_gendisk(md->disk);
569
570		/* Then flush out any already in there */
571		mmc_cleanup_queue(&md->queue);
572
573		mmc_blk_put(md);
574	}
575	mmc_set_drvdata(card, NULL);
576}
577
578#ifdef CONFIG_PM
579static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
580{
581	struct mmc_blk_data *md = mmc_get_drvdata(card);
582
583	if (md) {
584		mmc_queue_suspend(&md->queue);
585	}
586	return 0;
587}
588
589static int mmc_blk_resume(struct mmc_card *card)
590{
591	struct mmc_blk_data *md = mmc_get_drvdata(card);
592
593	if (md) {
594		mmc_blk_set_blksize(md, card);
595		mmc_queue_resume(&md->queue);
596	}
597	return 0;
598}
599#else
600#define	mmc_blk_suspend	NULL
601#define mmc_blk_resume	NULL
602#endif
603
604static struct mmc_driver mmc_driver = {
605	.drv		= {
606		.name	= "mmcblk",
607	},
608	.probe		= mmc_blk_probe,
609	.remove		= mmc_blk_remove,
610	.suspend	= mmc_blk_suspend,
611	.resume		= mmc_blk_resume,
612};
613
614static int __init mmc_blk_init(void)
615{
616	int res;
617
618	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
619	if (res)
620		goto out;
621
622	res = mmc_register_driver(&mmc_driver);
623	if (res)
624		goto out2;
625
626	return 0;
627 out2:
628	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
629 out:
630	return res;
631}
632
633static void __exit mmc_blk_exit(void)
634{
635	mmc_unregister_driver(&mmc_driver);
636	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
637}
638
639module_init(mmc_blk_init);
640module_exit(mmc_blk_exit);
641
642MODULE_LICENSE("GPL");
643MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
644
645