block.c revision 17b0429dde9ab60f9cee8e07ab28c7dc6cfe6efd
1/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2007 Pierre Ossman
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author:  Andrew Christian
18 *          28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/errno.h>
27#include <linux/hdreg.h>
28#include <linux/kdev_t.h>
29#include <linux/blkdev.h>
30#include <linux/mutex.h>
31#include <linux/scatterlist.h>
32
33#include <linux/mmc/card.h>
34#include <linux/mmc/host.h>
35#include <linux/mmc/mmc.h>
36#include <linux/mmc/sd.h>
37
38#include <asm/system.h>
39#include <asm/uaccess.h>
40
41#include "queue.h"
42
43/*
44 * max 8 partitions per card
45 */
46#define MMC_SHIFT	3
47
48/*
49 * There is one mmc_blk_data per slot.
50 */
51struct mmc_blk_data {
52	spinlock_t	lock;
53	struct gendisk	*disk;
54	struct mmc_queue queue;
55
56	unsigned int	usage;
57	unsigned int	block_bits;
58	unsigned int	read_only;
59};
60
61static DEFINE_MUTEX(open_lock);
62
63static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
64{
65	struct mmc_blk_data *md;
66
67	mutex_lock(&open_lock);
68	md = disk->private_data;
69	if (md && md->usage == 0)
70		md = NULL;
71	if (md)
72		md->usage++;
73	mutex_unlock(&open_lock);
74
75	return md;
76}
77
78static void mmc_blk_put(struct mmc_blk_data *md)
79{
80	mutex_lock(&open_lock);
81	md->usage--;
82	if (md->usage == 0) {
83		put_disk(md->disk);
84		kfree(md);
85	}
86	mutex_unlock(&open_lock);
87}
88
89static int mmc_blk_open(struct inode *inode, struct file *filp)
90{
91	struct mmc_blk_data *md;
92	int ret = -ENXIO;
93
94	md = mmc_blk_get(inode->i_bdev->bd_disk);
95	if (md) {
96		if (md->usage == 2)
97			check_disk_change(inode->i_bdev);
98		ret = 0;
99
100		if ((filp->f_mode & FMODE_WRITE) && md->read_only)
101			ret = -EROFS;
102	}
103
104	return ret;
105}
106
107static int mmc_blk_release(struct inode *inode, struct file *filp)
108{
109	struct mmc_blk_data *md = inode->i_bdev->bd_disk->private_data;
110
111	mmc_blk_put(md);
112	return 0;
113}
114
115static int
116mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
117{
118	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
119	geo->heads = 4;
120	geo->sectors = 16;
121	return 0;
122}
123
124static struct block_device_operations mmc_bdops = {
125	.open			= mmc_blk_open,
126	.release		= mmc_blk_release,
127	.getgeo			= mmc_blk_getgeo,
128	.owner			= THIS_MODULE,
129};
130
131struct mmc_blk_request {
132	struct mmc_request	mrq;
133	struct mmc_command	cmd;
134	struct mmc_command	stop;
135	struct mmc_data		data;
136};
137
138static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
139{
140	int err;
141	u32 blocks;
142
143	struct mmc_request mrq;
144	struct mmc_command cmd;
145	struct mmc_data data;
146	unsigned int timeout_us;
147
148	struct scatterlist sg;
149
150	memset(&cmd, 0, sizeof(struct mmc_command));
151
152	cmd.opcode = MMC_APP_CMD;
153	cmd.arg = card->rca << 16;
154	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
155
156	err = mmc_wait_for_cmd(card->host, &cmd, 0);
157	if (err || !(cmd.resp[0] & R1_APP_CMD))
158		return (u32)-1;
159
160	memset(&cmd, 0, sizeof(struct mmc_command));
161
162	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
163	cmd.arg = 0;
164	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
165
166	memset(&data, 0, sizeof(struct mmc_data));
167
168	data.timeout_ns = card->csd.tacc_ns * 100;
169	data.timeout_clks = card->csd.tacc_clks * 100;
170
171	timeout_us = data.timeout_ns / 1000;
172	timeout_us += data.timeout_clks * 1000 /
173		(card->host->ios.clock / 1000);
174
175	if (timeout_us > 100000) {
176		data.timeout_ns = 100000000;
177		data.timeout_clks = 0;
178	}
179
180	data.blksz = 4;
181	data.blocks = 1;
182	data.flags = MMC_DATA_READ;
183	data.sg = &sg;
184	data.sg_len = 1;
185
186	memset(&mrq, 0, sizeof(struct mmc_request));
187
188	mrq.cmd = &cmd;
189	mrq.data = &data;
190
191	sg_init_one(&sg, &blocks, 4);
192
193	mmc_wait_for_req(card->host, &mrq);
194
195	if (cmd.error || data.error)
196		return (u32)-1;
197
198	blocks = ntohl(blocks);
199
200	return blocks;
201}
202
203static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
204{
205	struct mmc_blk_data *md = mq->data;
206	struct mmc_card *card = md->queue.card;
207	struct mmc_blk_request brq;
208	int ret = 1, sg_pos, data_size;
209
210	mmc_claim_host(card->host);
211
212	do {
213		struct mmc_command cmd;
214		u32 readcmd, writecmd;
215
216		memset(&brq, 0, sizeof(struct mmc_blk_request));
217		brq.mrq.cmd = &brq.cmd;
218		brq.mrq.data = &brq.data;
219
220		brq.cmd.arg = req->sector;
221		if (!mmc_card_blockaddr(card))
222			brq.cmd.arg <<= 9;
223		brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
224		brq.data.blksz = 1 << md->block_bits;
225		brq.stop.opcode = MMC_STOP_TRANSMISSION;
226		brq.stop.arg = 0;
227		brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
228		brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
229		if (brq.data.blocks > card->host->max_blk_count)
230			brq.data.blocks = card->host->max_blk_count;
231
232		mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
233
234		/*
235		 * If the host doesn't support multiple block writes, force
236		 * block writes to single block. SD cards are excepted from
237		 * this rule as they support querying the number of
238		 * successfully written sectors.
239		 */
240		if (rq_data_dir(req) != READ &&
241		    !(card->host->caps & MMC_CAP_MULTIWRITE) &&
242		    !mmc_card_sd(card))
243			brq.data.blocks = 1;
244
245		if (brq.data.blocks > 1) {
246			brq.data.flags |= MMC_DATA_MULTI;
247			brq.mrq.stop = &brq.stop;
248			readcmd = MMC_READ_MULTIPLE_BLOCK;
249			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
250		} else {
251			brq.mrq.stop = NULL;
252			readcmd = MMC_READ_SINGLE_BLOCK;
253			writecmd = MMC_WRITE_BLOCK;
254		}
255
256		if (rq_data_dir(req) == READ) {
257			brq.cmd.opcode = readcmd;
258			brq.data.flags |= MMC_DATA_READ;
259		} else {
260			brq.cmd.opcode = writecmd;
261			brq.data.flags |= MMC_DATA_WRITE;
262		}
263
264		brq.data.sg = mq->sg;
265		brq.data.sg_len = mmc_queue_map_sg(mq);
266
267		mmc_queue_bounce_pre(mq);
268
269		if (brq.data.blocks !=
270		    (req->nr_sectors >> (md->block_bits - 9))) {
271			data_size = brq.data.blocks * brq.data.blksz;
272			for (sg_pos = 0; sg_pos < brq.data.sg_len; sg_pos++) {
273				data_size -= mq->sg[sg_pos].length;
274				if (data_size <= 0) {
275					mq->sg[sg_pos].length += data_size;
276					sg_pos++;
277					break;
278				}
279			}
280			brq.data.sg_len = sg_pos;
281		}
282
283		mmc_wait_for_req(card->host, &brq.mrq);
284
285		mmc_queue_bounce_post(mq);
286
287		if (brq.cmd.error) {
288			printk(KERN_ERR "%s: error %d sending read/write command\n",
289			       req->rq_disk->disk_name, brq.cmd.error);
290			goto cmd_err;
291		}
292
293		if (brq.data.error) {
294			printk(KERN_ERR "%s: error %d transferring data\n",
295			       req->rq_disk->disk_name, brq.data.error);
296			goto cmd_err;
297		}
298
299		if (brq.stop.error) {
300			printk(KERN_ERR "%s: error %d sending stop command\n",
301			       req->rq_disk->disk_name, brq.stop.error);
302			goto cmd_err;
303		}
304
305		if (rq_data_dir(req) != READ) {
306			do {
307				int err;
308
309				cmd.opcode = MMC_SEND_STATUS;
310				cmd.arg = card->rca << 16;
311				cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
312				err = mmc_wait_for_cmd(card->host, &cmd, 5);
313				if (err) {
314					printk(KERN_ERR "%s: error %d requesting status\n",
315					       req->rq_disk->disk_name, err);
316					goto cmd_err;
317				}
318			} while (!(cmd.resp[0] & R1_READY_FOR_DATA));
319
320#if 0
321			if (cmd.resp[0] & ~0x00000900)
322				printk(KERN_ERR "%s: status = %08x\n",
323				       req->rq_disk->disk_name, cmd.resp[0]);
324			if (mmc_decode_status(cmd.resp))
325				goto cmd_err;
326#endif
327		}
328
329		/*
330		 * A block was successfully transferred.
331		 */
332		spin_lock_irq(&md->lock);
333		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
334		if (!ret) {
335			/*
336			 * The whole request completed successfully.
337			 */
338			add_disk_randomness(req->rq_disk);
339			blkdev_dequeue_request(req);
340			end_that_request_last(req, 1);
341		}
342		spin_unlock_irq(&md->lock);
343	} while (ret);
344
345	mmc_release_host(card->host);
346
347	return 1;
348
349 cmd_err:
350 	/*
351 	 * If this is an SD card and we're writing, we can first
352 	 * mark the known good sectors as ok.
353 	 *
354	 * If the card is not SD, we can still ok written sectors
355	 * if the controller can do proper error reporting.
356	 *
357	 * For reads we just fail the entire chunk as that should
358	 * be safe in all cases.
359	 */
360 	if (rq_data_dir(req) != READ && mmc_card_sd(card)) {
361		u32 blocks;
362		unsigned int bytes;
363
364		blocks = mmc_sd_num_wr_blocks(card);
365		if (blocks != (u32)-1) {
366			if (card->csd.write_partial)
367				bytes = blocks << md->block_bits;
368			else
369				bytes = blocks << 9;
370			spin_lock_irq(&md->lock);
371			ret = end_that_request_chunk(req, 1, bytes);
372			spin_unlock_irq(&md->lock);
373		}
374	} else if (rq_data_dir(req) != READ &&
375		   (card->host->caps & MMC_CAP_MULTIWRITE)) {
376		spin_lock_irq(&md->lock);
377		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
378		spin_unlock_irq(&md->lock);
379	}
380
381	mmc_release_host(card->host);
382
383	spin_lock_irq(&md->lock);
384	while (ret) {
385		ret = end_that_request_chunk(req, 0,
386				req->current_nr_sectors << 9);
387	}
388
389	add_disk_randomness(req->rq_disk);
390	blkdev_dequeue_request(req);
391	end_that_request_last(req, 0);
392	spin_unlock_irq(&md->lock);
393
394	return 0;
395}
396
397#define MMC_NUM_MINORS	(256 >> MMC_SHIFT)
398
399static unsigned long dev_use[MMC_NUM_MINORS/(8*sizeof(unsigned long))];
400
401static inline int mmc_blk_readonly(struct mmc_card *card)
402{
403	return mmc_card_readonly(card) ||
404	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
405}
406
407static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
408{
409	struct mmc_blk_data *md;
410	int devidx, ret;
411
412	devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
413	if (devidx >= MMC_NUM_MINORS)
414		return ERR_PTR(-ENOSPC);
415	__set_bit(devidx, dev_use);
416
417	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
418	if (!md) {
419		ret = -ENOMEM;
420		goto out;
421	}
422
423
424	/*
425	 * Set the read-only status based on the supported commands
426	 * and the write protect switch.
427	 */
428	md->read_only = mmc_blk_readonly(card);
429
430	/*
431	 * Both SD and MMC specifications state (although a bit
432	 * unclearly in the MMC case) that a block size of 512
433	 * bytes must always be supported by the card.
434	 */
435	md->block_bits = 9;
436
437	md->disk = alloc_disk(1 << MMC_SHIFT);
438	if (md->disk == NULL) {
439		ret = -ENOMEM;
440		goto err_kfree;
441	}
442
443	spin_lock_init(&md->lock);
444	md->usage = 1;
445
446	ret = mmc_init_queue(&md->queue, card, &md->lock);
447	if (ret)
448		goto err_putdisk;
449
450	md->queue.issue_fn = mmc_blk_issue_rq;
451	md->queue.data = md;
452
453	md->disk->major	= MMC_BLOCK_MAJOR;
454	md->disk->first_minor = devidx << MMC_SHIFT;
455	md->disk->fops = &mmc_bdops;
456	md->disk->private_data = md;
457	md->disk->queue = md->queue.queue;
458	md->disk->driverfs_dev = &card->dev;
459
460	/*
461	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
462	 *
463	 * - be set for removable media with permanent block devices
464	 * - be unset for removable block devices with permanent media
465	 *
466	 * Since MMC block devices clearly fall under the second
467	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
468	 * should use the block device creation/destruction hotplug
469	 * messages to tell when the card is present.
470	 */
471
472	sprintf(md->disk->disk_name, "mmcblk%d", devidx);
473
474	blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
475
476	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
477		/*
478		 * The EXT_CSD sector count is in number or 512 byte
479		 * sectors.
480		 */
481		set_capacity(md->disk, card->ext_csd.sectors);
482	} else {
483		/*
484		 * The CSD capacity field is in units of read_blkbits.
485		 * set_capacity takes units of 512 bytes.
486		 */
487		set_capacity(md->disk,
488			card->csd.capacity << (card->csd.read_blkbits - 9));
489	}
490	return md;
491
492 err_putdisk:
493	put_disk(md->disk);
494 err_kfree:
495	kfree(md);
496 out:
497	return ERR_PTR(ret);
498}
499
500static int
501mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
502{
503	struct mmc_command cmd;
504	int err;
505
506	/* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
507	if (mmc_card_blockaddr(card))
508		return 0;
509
510	mmc_claim_host(card->host);
511	cmd.opcode = MMC_SET_BLOCKLEN;
512	cmd.arg = 1 << md->block_bits;
513	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
514	err = mmc_wait_for_cmd(card->host, &cmd, 5);
515	mmc_release_host(card->host);
516
517	if (err) {
518		printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
519			md->disk->disk_name, cmd.arg, err);
520		return -EINVAL;
521	}
522
523	return 0;
524}
525
526static int mmc_blk_probe(struct mmc_card *card)
527{
528	struct mmc_blk_data *md;
529	int err;
530
531	/*
532	 * Check that the card supports the command class(es) we need.
533	 */
534	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
535		return -ENODEV;
536
537	md = mmc_blk_alloc(card);
538	if (IS_ERR(md))
539		return PTR_ERR(md);
540
541	err = mmc_blk_set_blksize(md, card);
542	if (err)
543		goto out;
544
545	printk(KERN_INFO "%s: %s %s %lluKiB %s\n",
546		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
547		(unsigned long long)(get_capacity(md->disk) >> 1),
548		md->read_only ? "(ro)" : "");
549
550	mmc_set_drvdata(card, md);
551	add_disk(md->disk);
552	return 0;
553
554 out:
555	mmc_blk_put(md);
556
557	return err;
558}
559
560static void mmc_blk_remove(struct mmc_card *card)
561{
562	struct mmc_blk_data *md = mmc_get_drvdata(card);
563
564	if (md) {
565		int devidx;
566
567		/* Stop new requests from getting into the queue */
568		del_gendisk(md->disk);
569
570		/* Then flush out any already in there */
571		mmc_cleanup_queue(&md->queue);
572
573		devidx = md->disk->first_minor >> MMC_SHIFT;
574		__clear_bit(devidx, dev_use);
575
576		mmc_blk_put(md);
577	}
578	mmc_set_drvdata(card, NULL);
579}
580
581#ifdef CONFIG_PM
582static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
583{
584	struct mmc_blk_data *md = mmc_get_drvdata(card);
585
586	if (md) {
587		mmc_queue_suspend(&md->queue);
588	}
589	return 0;
590}
591
592static int mmc_blk_resume(struct mmc_card *card)
593{
594	struct mmc_blk_data *md = mmc_get_drvdata(card);
595
596	if (md) {
597		mmc_blk_set_blksize(md, card);
598		mmc_queue_resume(&md->queue);
599	}
600	return 0;
601}
602#else
603#define	mmc_blk_suspend	NULL
604#define mmc_blk_resume	NULL
605#endif
606
607static struct mmc_driver mmc_driver = {
608	.drv		= {
609		.name	= "mmcblk",
610	},
611	.probe		= mmc_blk_probe,
612	.remove		= mmc_blk_remove,
613	.suspend	= mmc_blk_suspend,
614	.resume		= mmc_blk_resume,
615};
616
617static int __init mmc_blk_init(void)
618{
619	int res = -ENOMEM;
620
621	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
622	if (res)
623		goto out;
624
625	return mmc_register_driver(&mmc_driver);
626
627 out:
628	return res;
629}
630
631static void __exit mmc_blk_exit(void)
632{
633	mmc_unregister_driver(&mmc_driver);
634	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
635}
636
637module_init(mmc_blk_init);
638module_exit(mmc_blk_exit);
639
640MODULE_LICENSE("GPL");
641MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
642
643