sd.c revision 4913efe456c987057e5d36a3f0a55422a9072cae
1/*
2 *      sd.c Copyright (C) 1992 Drew Eckhardt
3 *           Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 *
5 *      Linux scsi disk driver
6 *              Initial versions: Drew Eckhardt
7 *              Subsequent revisions: Eric Youngdale
8 *	Modification history:
9 *       - Drew Eckhardt <drew@colorado.edu> original
10 *       - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
11 *         outstanding request, and other enhancements.
12 *         Support loadable low-level scsi drivers.
13 *       - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
14 *         eight major numbers.
15 *       - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
16 *	 - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
17 *	   sd_init and cleanups.
18 *	 - Alex Davis <letmein@erols.com> Fix problem where partition info
19 *	   not being read in sd_open. Fix problem where removable media
20 *	   could be ejected after sd_open.
21 *	 - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
22 *	 - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
23 *	   <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
24 *	   Support 32k/1M disks.
25 *
26 *	Logging policy (needs CONFIG_SCSI_LOGGING defined):
27 *	 - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
28 *	 - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
29 *	 - entering sd_ioctl: SCSI_LOG_IOCTL level 1
30 *	 - entering other commands: SCSI_LOG_HLQUEUE level 3
31 *	Note: when the logging level is set by the user, it must be greater
32 *	than the level indicated above to trigger output.
33 */
34
35#include <linux/module.h>
36#include <linux/fs.h>
37#include <linux/kernel.h>
38#include <linux/mm.h>
39#include <linux/bio.h>
40#include <linux/genhd.h>
41#include <linux/hdreg.h>
42#include <linux/errno.h>
43#include <linux/idr.h>
44#include <linux/interrupt.h>
45#include <linux/init.h>
46#include <linux/blkdev.h>
47#include <linux/blkpg.h>
48#include <linux/delay.h>
49#include <linux/smp_lock.h>
50#include <linux/mutex.h>
51#include <linux/string_helpers.h>
52#include <linux/async.h>
53#include <linux/slab.h>
54#include <asm/uaccess.h>
55#include <asm/unaligned.h>
56
57#include <scsi/scsi.h>
58#include <scsi/scsi_cmnd.h>
59#include <scsi/scsi_dbg.h>
60#include <scsi/scsi_device.h>
61#include <scsi/scsi_driver.h>
62#include <scsi/scsi_eh.h>
63#include <scsi/scsi_host.h>
64#include <scsi/scsi_ioctl.h>
65#include <scsi/scsicam.h>
66
67#include "sd.h"
68#include "scsi_logging.h"
69
70MODULE_AUTHOR("Eric Youngdale");
71MODULE_DESCRIPTION("SCSI disk (sd) driver");
72MODULE_LICENSE("GPL");
73
74MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
75MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
76MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
77MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
78MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
79MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
80MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
81MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
82MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
83MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
84MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
85MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
86MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
87MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
88MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
89MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
90MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
91MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
92MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
93
94#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
95#define SD_MINORS	16
96#else
97#define SD_MINORS	0
98#endif
99
100static int  sd_revalidate_disk(struct gendisk *);
101static void sd_unlock_native_capacity(struct gendisk *disk);
102static int  sd_probe(struct device *);
103static int  sd_remove(struct device *);
104static void sd_shutdown(struct device *);
105static int sd_suspend(struct device *, pm_message_t state);
106static int sd_resume(struct device *);
107static void sd_rescan(struct device *);
108static int sd_done(struct scsi_cmnd *);
109static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
110static void scsi_disk_release(struct device *cdev);
111static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
112static void sd_print_result(struct scsi_disk *, int);
113
114static DEFINE_SPINLOCK(sd_index_lock);
115static DEFINE_IDA(sd_index_ida);
116
117/* This semaphore is used to mediate the 0->1 reference get in the
118 * face of object destruction (i.e. we can't allow a get on an
119 * object after last put) */
120static DEFINE_MUTEX(sd_ref_mutex);
121
122static struct kmem_cache *sd_cdb_cache;
123static mempool_t *sd_cdb_pool;
124
125static const char *sd_cache_types[] = {
126	"write through", "none", "write back",
127	"write back, no read (daft)"
128};
129
130static ssize_t
131sd_store_cache_type(struct device *dev, struct device_attribute *attr,
132		    const char *buf, size_t count)
133{
134	int i, ct = -1, rcd, wce, sp;
135	struct scsi_disk *sdkp = to_scsi_disk(dev);
136	struct scsi_device *sdp = sdkp->device;
137	char buffer[64];
138	char *buffer_data;
139	struct scsi_mode_data data;
140	struct scsi_sense_hdr sshdr;
141	int len;
142
143	if (sdp->type != TYPE_DISK)
144		/* no cache control on RBC devices; theoretically they
145		 * can do it, but there's probably so many exceptions
146		 * it's not worth the risk */
147		return -EINVAL;
148
149	for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
150		len = strlen(sd_cache_types[i]);
151		if (strncmp(sd_cache_types[i], buf, len) == 0 &&
152		    buf[len] == '\n') {
153			ct = i;
154			break;
155		}
156	}
157	if (ct < 0)
158		return -EINVAL;
159	rcd = ct & 0x01 ? 1 : 0;
160	wce = ct & 0x02 ? 1 : 0;
161	if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
162			    SD_MAX_RETRIES, &data, NULL))
163		return -EINVAL;
164	len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
165		  data.block_descriptor_length);
166	buffer_data = buffer + data.header_length +
167		data.block_descriptor_length;
168	buffer_data[2] &= ~0x05;
169	buffer_data[2] |= wce << 2 | rcd;
170	sp = buffer_data[0] & 0x80 ? 1 : 0;
171
172	if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
173			     SD_MAX_RETRIES, &data, &sshdr)) {
174		if (scsi_sense_valid(&sshdr))
175			sd_print_sense_hdr(sdkp, &sshdr);
176		return -EINVAL;
177	}
178	revalidate_disk(sdkp->disk);
179	return count;
180}
181
182static ssize_t
183sd_store_manage_start_stop(struct device *dev, struct device_attribute *attr,
184			   const char *buf, size_t count)
185{
186	struct scsi_disk *sdkp = to_scsi_disk(dev);
187	struct scsi_device *sdp = sdkp->device;
188
189	if (!capable(CAP_SYS_ADMIN))
190		return -EACCES;
191
192	sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
193
194	return count;
195}
196
197static ssize_t
198sd_store_allow_restart(struct device *dev, struct device_attribute *attr,
199		       const char *buf, size_t count)
200{
201	struct scsi_disk *sdkp = to_scsi_disk(dev);
202	struct scsi_device *sdp = sdkp->device;
203
204	if (!capable(CAP_SYS_ADMIN))
205		return -EACCES;
206
207	if (sdp->type != TYPE_DISK)
208		return -EINVAL;
209
210	sdp->allow_restart = simple_strtoul(buf, NULL, 10);
211
212	return count;
213}
214
215static ssize_t
216sd_show_cache_type(struct device *dev, struct device_attribute *attr,
217		   char *buf)
218{
219	struct scsi_disk *sdkp = to_scsi_disk(dev);
220	int ct = sdkp->RCD + 2*sdkp->WCE;
221
222	return snprintf(buf, 40, "%s\n", sd_cache_types[ct]);
223}
224
225static ssize_t
226sd_show_fua(struct device *dev, struct device_attribute *attr, char *buf)
227{
228	struct scsi_disk *sdkp = to_scsi_disk(dev);
229
230	return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
231}
232
233static ssize_t
234sd_show_manage_start_stop(struct device *dev, struct device_attribute *attr,
235			  char *buf)
236{
237	struct scsi_disk *sdkp = to_scsi_disk(dev);
238	struct scsi_device *sdp = sdkp->device;
239
240	return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
241}
242
243static ssize_t
244sd_show_allow_restart(struct device *dev, struct device_attribute *attr,
245		      char *buf)
246{
247	struct scsi_disk *sdkp = to_scsi_disk(dev);
248
249	return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
250}
251
252static ssize_t
253sd_show_protection_type(struct device *dev, struct device_attribute *attr,
254			char *buf)
255{
256	struct scsi_disk *sdkp = to_scsi_disk(dev);
257
258	return snprintf(buf, 20, "%u\n", sdkp->protection_type);
259}
260
261static ssize_t
262sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
263		    char *buf)
264{
265	struct scsi_disk *sdkp = to_scsi_disk(dev);
266
267	return snprintf(buf, 20, "%u\n", sdkp->ATO);
268}
269
270static ssize_t
271sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
272			  char *buf)
273{
274	struct scsi_disk *sdkp = to_scsi_disk(dev);
275
276	return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
277}
278
279static struct device_attribute sd_disk_attrs[] = {
280	__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
281	       sd_store_cache_type),
282	__ATTR(FUA, S_IRUGO, sd_show_fua, NULL),
283	__ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart,
284	       sd_store_allow_restart),
285	__ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
286	       sd_store_manage_start_stop),
287	__ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
288	__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
289	__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
290	__ATTR_NULL,
291};
292
293static struct class sd_disk_class = {
294	.name		= "scsi_disk",
295	.owner		= THIS_MODULE,
296	.dev_release	= scsi_disk_release,
297	.dev_attrs	= sd_disk_attrs,
298};
299
300static struct scsi_driver sd_template = {
301	.owner			= THIS_MODULE,
302	.gendrv = {
303		.name		= "sd",
304		.probe		= sd_probe,
305		.remove		= sd_remove,
306		.suspend	= sd_suspend,
307		.resume		= sd_resume,
308		.shutdown	= sd_shutdown,
309	},
310	.rescan			= sd_rescan,
311	.done			= sd_done,
312};
313
314/*
315 * Device no to disk mapping:
316 *
317 *       major         disc2     disc  p1
318 *   |............|.............|....|....| <- dev_t
319 *    31        20 19          8 7  4 3  0
320 *
321 * Inside a major, we have 16k disks, however mapped non-
322 * contiguously. The first 16 disks are for major0, the next
323 * ones with major1, ... Disk 256 is for major0 again, disk 272
324 * for major1, ...
325 * As we stay compatible with our numbering scheme, we can reuse
326 * the well-know SCSI majors 8, 65--71, 136--143.
327 */
328static int sd_major(int major_idx)
329{
330	switch (major_idx) {
331	case 0:
332		return SCSI_DISK0_MAJOR;
333	case 1 ... 7:
334		return SCSI_DISK1_MAJOR + major_idx - 1;
335	case 8 ... 15:
336		return SCSI_DISK8_MAJOR + major_idx - 8;
337	default:
338		BUG();
339		return 0;	/* shut up gcc */
340	}
341}
342
343static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
344{
345	struct scsi_disk *sdkp = NULL;
346
347	if (disk->private_data) {
348		sdkp = scsi_disk(disk);
349		if (scsi_device_get(sdkp->device) == 0)
350			get_device(&sdkp->dev);
351		else
352			sdkp = NULL;
353	}
354	return sdkp;
355}
356
357static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
358{
359	struct scsi_disk *sdkp;
360
361	mutex_lock(&sd_ref_mutex);
362	sdkp = __scsi_disk_get(disk);
363	mutex_unlock(&sd_ref_mutex);
364	return sdkp;
365}
366
367static struct scsi_disk *scsi_disk_get_from_dev(struct device *dev)
368{
369	struct scsi_disk *sdkp;
370
371	mutex_lock(&sd_ref_mutex);
372	sdkp = dev_get_drvdata(dev);
373	if (sdkp)
374		sdkp = __scsi_disk_get(sdkp->disk);
375	mutex_unlock(&sd_ref_mutex);
376	return sdkp;
377}
378
379static void scsi_disk_put(struct scsi_disk *sdkp)
380{
381	struct scsi_device *sdev = sdkp->device;
382
383	mutex_lock(&sd_ref_mutex);
384	put_device(&sdkp->dev);
385	scsi_device_put(sdev);
386	mutex_unlock(&sd_ref_mutex);
387}
388
389static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
390{
391	unsigned int prot_op = SCSI_PROT_NORMAL;
392	unsigned int dix = scsi_prot_sg_count(scmd);
393
394	if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
395		if (dif && dix)
396			prot_op = SCSI_PROT_READ_PASS;
397		else if (dif && !dix)
398			prot_op = SCSI_PROT_READ_STRIP;
399		else if (!dif && dix)
400			prot_op = SCSI_PROT_READ_INSERT;
401	} else {
402		if (dif && dix)
403			prot_op = SCSI_PROT_WRITE_PASS;
404		else if (dif && !dix)
405			prot_op = SCSI_PROT_WRITE_INSERT;
406		else if (!dif && dix)
407			prot_op = SCSI_PROT_WRITE_STRIP;
408	}
409
410	scsi_set_prot_op(scmd, prot_op);
411	scsi_set_prot_type(scmd, dif);
412}
413
414/**
415 * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device
416 * @sdp: scsi device to operate one
417 * @rq: Request to prepare
418 *
419 * Will issue either UNMAP or WRITE SAME(16) depending on preference
420 * indicated by target device.
421 **/
422static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
423{
424	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
425	struct bio *bio = rq->bio;
426	sector_t sector = bio->bi_sector;
427	unsigned int nr_sectors = bio_sectors(bio);
428	unsigned int len;
429	int ret;
430	struct page *page;
431
432	if (sdkp->device->sector_size == 4096) {
433		sector >>= 3;
434		nr_sectors >>= 3;
435	}
436
437	rq->timeout = SD_TIMEOUT;
438
439	memset(rq->cmd, 0, rq->cmd_len);
440
441	page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
442	if (!page)
443		return BLKPREP_DEFER;
444
445	if (sdkp->unmap) {
446		char *buf = page_address(page);
447
448		rq->cmd_len = 10;
449		rq->cmd[0] = UNMAP;
450		rq->cmd[8] = 24;
451
452		put_unaligned_be16(6 + 16, &buf[0]);
453		put_unaligned_be16(16, &buf[2]);
454		put_unaligned_be64(sector, &buf[8]);
455		put_unaligned_be32(nr_sectors, &buf[16]);
456
457		len = 24;
458	} else {
459		rq->cmd_len = 16;
460		rq->cmd[0] = WRITE_SAME_16;
461		rq->cmd[1] = 0x8; /* UNMAP */
462		put_unaligned_be64(sector, &rq->cmd[2]);
463		put_unaligned_be32(nr_sectors, &rq->cmd[10]);
464
465		len = sdkp->device->sector_size;
466	}
467
468	blk_add_request_payload(rq, page, len);
469	ret = scsi_setup_blk_pc_cmnd(sdp, rq);
470	rq->buffer = page_address(page);
471	if (ret != BLKPREP_OK) {
472		__free_page(page);
473		rq->buffer = NULL;
474	}
475	return ret;
476}
477
478static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
479{
480	rq->timeout = SD_TIMEOUT;
481	rq->retries = SD_MAX_RETRIES;
482	rq->cmd[0] = SYNCHRONIZE_CACHE;
483	rq->cmd_len = 10;
484
485	return scsi_setup_blk_pc_cmnd(sdp, rq);
486}
487
488static void sd_unprep_fn(struct request_queue *q, struct request *rq)
489{
490	if (rq->cmd_flags & REQ_DISCARD) {
491		free_page((unsigned long)rq->buffer);
492		rq->buffer = NULL;
493	}
494}
495
496/**
497 *	sd_init_command - build a scsi (read or write) command from
498 *	information in the request structure.
499 *	@SCpnt: pointer to mid-level's per scsi command structure that
500 *	contains request and into which the scsi command is written
501 *
502 *	Returns 1 if successful and 0 if error (or cannot be done now).
503 **/
504static int sd_prep_fn(struct request_queue *q, struct request *rq)
505{
506	struct scsi_cmnd *SCpnt;
507	struct scsi_device *sdp = q->queuedata;
508	struct gendisk *disk = rq->rq_disk;
509	struct scsi_disk *sdkp;
510	sector_t block = blk_rq_pos(rq);
511	sector_t threshold;
512	unsigned int this_count = blk_rq_sectors(rq);
513	int ret, host_dif;
514	unsigned char protect;
515
516	/*
517	 * Discard request come in as REQ_TYPE_FS but we turn them into
518	 * block PC requests to make life easier.
519	 */
520	if (rq->cmd_flags & REQ_DISCARD) {
521		ret = scsi_setup_discard_cmnd(sdp, rq);
522		goto out;
523	} else if (rq->cmd_flags & REQ_FLUSH) {
524		ret = scsi_setup_flush_cmnd(sdp, rq);
525		goto out;
526	} else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
527		ret = scsi_setup_blk_pc_cmnd(sdp, rq);
528		goto out;
529	} else if (rq->cmd_type != REQ_TYPE_FS) {
530		ret = BLKPREP_KILL;
531		goto out;
532	}
533	ret = scsi_setup_fs_cmnd(sdp, rq);
534	if (ret != BLKPREP_OK)
535		goto out;
536	SCpnt = rq->special;
537	sdkp = scsi_disk(disk);
538
539	/* from here on until we're complete, any goto out
540	 * is used for a killable error condition */
541	ret = BLKPREP_KILL;
542
543	SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
544					"sd_init_command: block=%llu, "
545					"count=%d\n",
546					(unsigned long long)block,
547					this_count));
548
549	if (!sdp || !scsi_device_online(sdp) ||
550	    block + blk_rq_sectors(rq) > get_capacity(disk)) {
551		SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
552						"Finishing %u sectors\n",
553						blk_rq_sectors(rq)));
554		SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
555						"Retry with 0x%p\n", SCpnt));
556		goto out;
557	}
558
559	if (sdp->changed) {
560		/*
561		 * quietly refuse to do anything to a changed disc until
562		 * the changed bit has been reset
563		 */
564		/* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
565		goto out;
566	}
567
568	/*
569	 * Some SD card readers can't handle multi-sector accesses which touch
570	 * the last one or two hardware sectors.  Split accesses as needed.
571	 */
572	threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS *
573		(sdp->sector_size / 512);
574
575	if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) {
576		if (block < threshold) {
577			/* Access up to the threshold but not beyond */
578			this_count = threshold - block;
579		} else {
580			/* Access only a single hardware sector */
581			this_count = sdp->sector_size / 512;
582		}
583	}
584
585	SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
586					(unsigned long long)block));
587
588	/*
589	 * If we have a 1K hardware sectorsize, prevent access to single
590	 * 512 byte sectors.  In theory we could handle this - in fact
591	 * the scsi cdrom driver must be able to handle this because
592	 * we typically use 1K blocksizes, and cdroms typically have
593	 * 2K hardware sectorsizes.  Of course, things are simpler
594	 * with the cdrom, since it is read-only.  For performance
595	 * reasons, the filesystems should be able to handle this
596	 * and not force the scsi disk driver to use bounce buffers
597	 * for this.
598	 */
599	if (sdp->sector_size == 1024) {
600		if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
601			scmd_printk(KERN_ERR, SCpnt,
602				    "Bad block number requested\n");
603			goto out;
604		} else {
605			block = block >> 1;
606			this_count = this_count >> 1;
607		}
608	}
609	if (sdp->sector_size == 2048) {
610		if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
611			scmd_printk(KERN_ERR, SCpnt,
612				    "Bad block number requested\n");
613			goto out;
614		} else {
615			block = block >> 2;
616			this_count = this_count >> 2;
617		}
618	}
619	if (sdp->sector_size == 4096) {
620		if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
621			scmd_printk(KERN_ERR, SCpnt,
622				    "Bad block number requested\n");
623			goto out;
624		} else {
625			block = block >> 3;
626			this_count = this_count >> 3;
627		}
628	}
629	if (rq_data_dir(rq) == WRITE) {
630		if (!sdp->writeable) {
631			goto out;
632		}
633		SCpnt->cmnd[0] = WRITE_6;
634		SCpnt->sc_data_direction = DMA_TO_DEVICE;
635
636		if (blk_integrity_rq(rq) &&
637		    sd_dif_prepare(rq, block, sdp->sector_size) == -EIO)
638			goto out;
639
640	} else if (rq_data_dir(rq) == READ) {
641		SCpnt->cmnd[0] = READ_6;
642		SCpnt->sc_data_direction = DMA_FROM_DEVICE;
643	} else {
644		scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags);
645		goto out;
646	}
647
648	SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
649					"%s %d/%u 512 byte blocks.\n",
650					(rq_data_dir(rq) == WRITE) ?
651					"writing" : "reading", this_count,
652					blk_rq_sectors(rq)));
653
654	/* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
655	host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
656	if (host_dif)
657		protect = 1 << 5;
658	else
659		protect = 0;
660
661	if (host_dif == SD_DIF_TYPE2_PROTECTION) {
662		SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
663
664		if (unlikely(SCpnt->cmnd == NULL)) {
665			ret = BLKPREP_DEFER;
666			goto out;
667		}
668
669		SCpnt->cmd_len = SD_EXT_CDB_SIZE;
670		memset(SCpnt->cmnd, 0, SCpnt->cmd_len);
671		SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD;
672		SCpnt->cmnd[7] = 0x18;
673		SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32;
674		SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
675
676		/* LBA */
677		SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
678		SCpnt->cmnd[13] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
679		SCpnt->cmnd[14] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
680		SCpnt->cmnd[15] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
681		SCpnt->cmnd[16] = (unsigned char) (block >> 24) & 0xff;
682		SCpnt->cmnd[17] = (unsigned char) (block >> 16) & 0xff;
683		SCpnt->cmnd[18] = (unsigned char) (block >> 8) & 0xff;
684		SCpnt->cmnd[19] = (unsigned char) block & 0xff;
685
686		/* Expected Indirect LBA */
687		SCpnt->cmnd[20] = (unsigned char) (block >> 24) & 0xff;
688		SCpnt->cmnd[21] = (unsigned char) (block >> 16) & 0xff;
689		SCpnt->cmnd[22] = (unsigned char) (block >> 8) & 0xff;
690		SCpnt->cmnd[23] = (unsigned char) block & 0xff;
691
692		/* Transfer length */
693		SCpnt->cmnd[28] = (unsigned char) (this_count >> 24) & 0xff;
694		SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff;
695		SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff;
696		SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
697	} else if (block > 0xffffffff) {
698		SCpnt->cmnd[0] += READ_16 - READ_6;
699		SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
700		SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
701		SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
702		SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
703		SCpnt->cmnd[5] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
704		SCpnt->cmnd[6] = (unsigned char) (block >> 24) & 0xff;
705		SCpnt->cmnd[7] = (unsigned char) (block >> 16) & 0xff;
706		SCpnt->cmnd[8] = (unsigned char) (block >> 8) & 0xff;
707		SCpnt->cmnd[9] = (unsigned char) block & 0xff;
708		SCpnt->cmnd[10] = (unsigned char) (this_count >> 24) & 0xff;
709		SCpnt->cmnd[11] = (unsigned char) (this_count >> 16) & 0xff;
710		SCpnt->cmnd[12] = (unsigned char) (this_count >> 8) & 0xff;
711		SCpnt->cmnd[13] = (unsigned char) this_count & 0xff;
712		SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0;
713	} else if ((this_count > 0xff) || (block > 0x1fffff) ||
714		   scsi_device_protection(SCpnt->device) ||
715		   SCpnt->device->use_10_for_rw) {
716		if (this_count > 0xffff)
717			this_count = 0xffff;
718
719		SCpnt->cmnd[0] += READ_10 - READ_6;
720		SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
721		SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
722		SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
723		SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
724		SCpnt->cmnd[5] = (unsigned char) block & 0xff;
725		SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
726		SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
727		SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
728	} else {
729		if (unlikely(rq->cmd_flags & REQ_FUA)) {
730			/*
731			 * This happens only if this drive failed
732			 * 10byte rw command with ILLEGAL_REQUEST
733			 * during operation and thus turned off
734			 * use_10_for_rw.
735			 */
736			scmd_printk(KERN_ERR, SCpnt,
737				    "FUA write on READ/WRITE(6) drive\n");
738			goto out;
739		}
740
741		SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
742		SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
743		SCpnt->cmnd[3] = (unsigned char) block & 0xff;
744		SCpnt->cmnd[4] = (unsigned char) this_count;
745		SCpnt->cmnd[5] = 0;
746	}
747	SCpnt->sdb.length = this_count * sdp->sector_size;
748
749	/* If DIF or DIX is enabled, tell HBA how to handle request */
750	if (host_dif || scsi_prot_sg_count(SCpnt))
751		sd_prot_op(SCpnt, host_dif);
752
753	/*
754	 * We shouldn't disconnect in the middle of a sector, so with a dumb
755	 * host adapter, it's safe to assume that we can at least transfer
756	 * this many bytes between each connect / disconnect.
757	 */
758	SCpnt->transfersize = sdp->sector_size;
759	SCpnt->underflow = this_count << 9;
760	SCpnt->allowed = SD_MAX_RETRIES;
761
762	/*
763	 * This indicates that the command is ready from our end to be
764	 * queued.
765	 */
766	ret = BLKPREP_OK;
767 out:
768	return scsi_prep_return(q, rq, ret);
769}
770
771/**
772 *	sd_open - open a scsi disk device
773 *	@inode: only i_rdev member may be used
774 *	@filp: only f_mode and f_flags may be used
775 *
776 *	Returns 0 if successful. Returns a negated errno value in case
777 *	of error.
778 *
779 *	Note: This can be called from a user context (e.g. fsck(1) )
780 *	or from within the kernel (e.g. as a result of a mount(1) ).
781 *	In the latter case @inode and @filp carry an abridged amount
782 *	of information as noted above.
783 *
784 *	Locking: called with bdev->bd_mutex held.
785 **/
786static int sd_open(struct block_device *bdev, fmode_t mode)
787{
788	struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
789	struct scsi_device *sdev;
790	int retval;
791
792	if (!sdkp)
793		return -ENXIO;
794
795	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
796
797	sdev = sdkp->device;
798
799	retval = scsi_autopm_get_device(sdev);
800	if (retval)
801		goto error_autopm;
802
803	/*
804	 * If the device is in error recovery, wait until it is done.
805	 * If the device is offline, then disallow any access to it.
806	 */
807	retval = -ENXIO;
808	if (!scsi_block_when_processing_errors(sdev))
809		goto error_out;
810
811	if (sdev->removable || sdkp->write_prot)
812		check_disk_change(bdev);
813
814	/*
815	 * If the drive is empty, just let the open fail.
816	 */
817	retval = -ENOMEDIUM;
818	if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
819		goto error_out;
820
821	/*
822	 * If the device has the write protect tab set, have the open fail
823	 * if the user expects to be able to write to the thing.
824	 */
825	retval = -EROFS;
826	if (sdkp->write_prot && (mode & FMODE_WRITE))
827		goto error_out;
828
829	/*
830	 * It is possible that the disk changing stuff resulted in
831	 * the device being taken offline.  If this is the case,
832	 * report this to the user, and don't pretend that the
833	 * open actually succeeded.
834	 */
835	retval = -ENXIO;
836	if (!scsi_device_online(sdev))
837		goto error_out;
838
839	if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
840		if (scsi_block_when_processing_errors(sdev))
841			scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
842	}
843
844	return 0;
845
846error_out:
847	scsi_autopm_put_device(sdev);
848error_autopm:
849	scsi_disk_put(sdkp);
850	return retval;
851}
852
853/**
854 *	sd_release - invoked when the (last) close(2) is called on this
855 *	scsi disk.
856 *	@inode: only i_rdev member may be used
857 *	@filp: only f_mode and f_flags may be used
858 *
859 *	Returns 0.
860 *
861 *	Note: may block (uninterruptible) if error recovery is underway
862 *	on this disk.
863 *
864 *	Locking: called with bdev->bd_mutex held.
865 **/
866static int sd_release(struct gendisk *disk, fmode_t mode)
867{
868	struct scsi_disk *sdkp = scsi_disk(disk);
869	struct scsi_device *sdev = sdkp->device;
870
871	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
872
873	if (atomic_dec_return(&sdkp->openers) && sdev->removable) {
874		if (scsi_block_when_processing_errors(sdev))
875			scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
876	}
877
878	/*
879	 * XXX and what if there are packets in flight and this close()
880	 * XXX is followed by a "rmmod sd_mod"?
881	 */
882
883	scsi_autopm_put_device(sdev);
884	scsi_disk_put(sdkp);
885	return 0;
886}
887
888static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
889{
890	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
891	struct scsi_device *sdp = sdkp->device;
892	struct Scsi_Host *host = sdp->host;
893	int diskinfo[4];
894
895	/* default to most commonly used values */
896        diskinfo[0] = 0x40;	/* 1 << 6 */
897       	diskinfo[1] = 0x20;	/* 1 << 5 */
898       	diskinfo[2] = sdkp->capacity >> 11;
899
900	/* override with calculated, extended default, or driver values */
901	if (host->hostt->bios_param)
902		host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
903	else
904		scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
905
906	geo->heads = diskinfo[0];
907	geo->sectors = diskinfo[1];
908	geo->cylinders = diskinfo[2];
909	return 0;
910}
911
912/**
913 *	sd_ioctl - process an ioctl
914 *	@inode: only i_rdev/i_bdev members may be used
915 *	@filp: only f_mode and f_flags may be used
916 *	@cmd: ioctl command number
917 *	@arg: this is third argument given to ioctl(2) system call.
918 *	Often contains a pointer.
919 *
920 *	Returns 0 if successful (some ioctls return postive numbers on
921 *	success as well). Returns a negated errno value in case of error.
922 *
923 *	Note: most ioctls are forward onto the block subsystem or further
924 *	down in the scsi subsystem.
925 **/
926static int sd_ioctl(struct block_device *bdev, fmode_t mode,
927		    unsigned int cmd, unsigned long arg)
928{
929	struct gendisk *disk = bdev->bd_disk;
930	struct scsi_device *sdp = scsi_disk(disk)->device;
931	void __user *p = (void __user *)arg;
932	int error;
933
934	SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n",
935						disk->disk_name, cmd));
936
937	/*
938	 * If we are in the middle of error recovery, don't let anyone
939	 * else try and use this device.  Also, if error recovery fails, it
940	 * may try and take the device offline, in which case all further
941	 * access to the device is prohibited.
942	 */
943	error = scsi_nonblockable_ioctl(sdp, cmd, p,
944					(mode & FMODE_NDELAY) != 0);
945	if (!scsi_block_when_processing_errors(sdp) || !error)
946		goto out;
947
948	/*
949	 * Send SCSI addressing ioctls directly to mid level, send other
950	 * ioctls to block level and then onto mid level if they can't be
951	 * resolved.
952	 */
953	switch (cmd) {
954		case SCSI_IOCTL_GET_IDLUN:
955		case SCSI_IOCTL_GET_BUS_NUMBER:
956			error = scsi_ioctl(sdp, cmd, p);
957			break;
958		default:
959			error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
960			if (error != -ENOTTY)
961				break;
962			error = scsi_ioctl(sdp, cmd, p);
963			break;
964	}
965out:
966	return error;
967}
968
969static void set_media_not_present(struct scsi_disk *sdkp)
970{
971	sdkp->media_present = 0;
972	sdkp->capacity = 0;
973	sdkp->device->changed = 1;
974}
975
976/**
977 *	sd_media_changed - check if our medium changed
978 *	@disk: kernel device descriptor
979 *
980 *	Returns 0 if not applicable or no change; 1 if change
981 *
982 *	Note: this function is invoked from the block subsystem.
983 **/
984static int sd_media_changed(struct gendisk *disk)
985{
986	struct scsi_disk *sdkp = scsi_disk(disk);
987	struct scsi_device *sdp = sdkp->device;
988	struct scsi_sense_hdr *sshdr = NULL;
989	int retval;
990
991	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
992
993	if (!sdp->removable)
994		return 0;
995
996	/*
997	 * If the device is offline, don't send any commands - just pretend as
998	 * if the command failed.  If the device ever comes back online, we
999	 * can deal with it then.  It is only because of unrecoverable errors
1000	 * that we would ever take a device offline in the first place.
1001	 */
1002	if (!scsi_device_online(sdp)) {
1003		set_media_not_present(sdkp);
1004		retval = 1;
1005		goto out;
1006	}
1007
1008	/*
1009	 * Using TEST_UNIT_READY enables differentiation between drive with
1010	 * no cartridge loaded - NOT READY, drive with changed cartridge -
1011	 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
1012	 *
1013	 * Drives that auto spin down. eg iomega jaz 1G, will be started
1014	 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
1015	 * sd_revalidate() is called.
1016	 */
1017	retval = -ENODEV;
1018
1019	if (scsi_block_when_processing_errors(sdp)) {
1020		sshdr  = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1021		retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
1022					      sshdr);
1023	}
1024
1025	/*
1026	 * Unable to test, unit probably not ready.   This usually
1027	 * means there is no disc in the drive.  Mark as changed,
1028	 * and we will figure it out later once the drive is
1029	 * available again.
1030	 */
1031	if (retval || (scsi_sense_valid(sshdr) &&
1032		       /* 0x3a is medium not present */
1033		       sshdr->asc == 0x3a)) {
1034		set_media_not_present(sdkp);
1035		retval = 1;
1036		goto out;
1037	}
1038
1039	/*
1040	 * For removable scsi disk we have to recognise the presence
1041	 * of a disk in the drive. This is kept in the struct scsi_disk
1042	 * struct and tested at open !  Daniel Roche (dan@lectra.fr)
1043	 */
1044	sdkp->media_present = 1;
1045
1046	retval = sdp->changed;
1047	sdp->changed = 0;
1048out:
1049	if (retval != sdkp->previous_state)
1050		sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL);
1051	sdkp->previous_state = retval;
1052	kfree(sshdr);
1053	return retval;
1054}
1055
1056static int sd_sync_cache(struct scsi_disk *sdkp)
1057{
1058	int retries, res;
1059	struct scsi_device *sdp = sdkp->device;
1060	struct scsi_sense_hdr sshdr;
1061
1062	if (!scsi_device_online(sdp))
1063		return -ENODEV;
1064
1065
1066	for (retries = 3; retries > 0; --retries) {
1067		unsigned char cmd[10] = { 0 };
1068
1069		cmd[0] = SYNCHRONIZE_CACHE;
1070		/*
1071		 * Leave the rest of the command zero to indicate
1072		 * flush everything.
1073		 */
1074		res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
1075				       SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1076		if (res == 0)
1077			break;
1078	}
1079
1080	if (res) {
1081		sd_print_result(sdkp, res);
1082		if (driver_byte(res) & DRIVER_SENSE)
1083			sd_print_sense_hdr(sdkp, &sshdr);
1084	}
1085
1086	if (res)
1087		return -EIO;
1088	return 0;
1089}
1090
1091static void sd_rescan(struct device *dev)
1092{
1093	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
1094
1095	if (sdkp) {
1096		revalidate_disk(sdkp->disk);
1097		scsi_disk_put(sdkp);
1098	}
1099}
1100
1101
1102#ifdef CONFIG_COMPAT
1103/*
1104 * This gets directly called from VFS. When the ioctl
1105 * is not recognized we go back to the other translation paths.
1106 */
1107static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
1108			   unsigned int cmd, unsigned long arg)
1109{
1110	struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
1111
1112	/*
1113	 * If we are in the middle of error recovery, don't let anyone
1114	 * else try and use this device.  Also, if error recovery fails, it
1115	 * may try and take the device offline, in which case all further
1116	 * access to the device is prohibited.
1117	 */
1118	if (!scsi_block_when_processing_errors(sdev))
1119		return -ENODEV;
1120
1121	if (sdev->host->hostt->compat_ioctl) {
1122		int ret;
1123
1124		ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
1125
1126		return ret;
1127	}
1128
1129	/*
1130	 * Let the static ioctl translation table take care of it.
1131	 */
1132	return -ENOIOCTLCMD;
1133}
1134#endif
1135
1136static const struct block_device_operations sd_fops = {
1137	.owner			= THIS_MODULE,
1138	.open			= sd_open,
1139	.release		= sd_release,
1140	.ioctl			= sd_ioctl,
1141	.getgeo			= sd_getgeo,
1142#ifdef CONFIG_COMPAT
1143	.compat_ioctl		= sd_compat_ioctl,
1144#endif
1145	.media_changed		= sd_media_changed,
1146	.revalidate_disk	= sd_revalidate_disk,
1147	.unlock_native_capacity	= sd_unlock_native_capacity,
1148};
1149
1150static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1151{
1152	u64 start_lba = blk_rq_pos(scmd->request);
1153	u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
1154	u64 bad_lba;
1155	int info_valid;
1156
1157	if (scmd->request->cmd_type != REQ_TYPE_FS)
1158		return 0;
1159
1160	info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
1161					     SCSI_SENSE_BUFFERSIZE,
1162					     &bad_lba);
1163	if (!info_valid)
1164		return 0;
1165
1166	if (scsi_bufflen(scmd) <= scmd->device->sector_size)
1167		return 0;
1168
1169	if (scmd->device->sector_size < 512) {
1170		/* only legitimate sector_size here is 256 */
1171		start_lba <<= 1;
1172		end_lba <<= 1;
1173	} else {
1174		/* be careful ... don't want any overflows */
1175		u64 factor = scmd->device->sector_size / 512;
1176		do_div(start_lba, factor);
1177		do_div(end_lba, factor);
1178	}
1179
1180	/* The bad lba was reported incorrectly, we have no idea where
1181	 * the error is.
1182	 */
1183	if (bad_lba < start_lba  || bad_lba >= end_lba)
1184		return 0;
1185
1186	/* This computation should always be done in terms of
1187	 * the resolution of the device's medium.
1188	 */
1189	return (bad_lba - start_lba) * scmd->device->sector_size;
1190}
1191
1192/**
1193 *	sd_done - bottom half handler: called when the lower level
1194 *	driver has completed (successfully or otherwise) a scsi command.
1195 *	@SCpnt: mid-level's per command structure.
1196 *
1197 *	Note: potentially run from within an ISR. Must not block.
1198 **/
1199static int sd_done(struct scsi_cmnd *SCpnt)
1200{
1201	int result = SCpnt->result;
1202	unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
1203	struct scsi_sense_hdr sshdr;
1204	struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
1205	int sense_valid = 0;
1206	int sense_deferred = 0;
1207
1208	if (SCpnt->request->cmd_flags & REQ_DISCARD) {
1209		if (!result)
1210			scsi_set_resid(SCpnt, 0);
1211		return good_bytes;
1212	}
1213
1214	if (result) {
1215		sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
1216		if (sense_valid)
1217			sense_deferred = scsi_sense_is_deferred(&sshdr);
1218	}
1219#ifdef CONFIG_SCSI_LOGGING
1220	SCSI_LOG_HLCOMPLETE(1, scsi_print_result(SCpnt));
1221	if (sense_valid) {
1222		SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
1223						   "sd_done: sb[respc,sk,asc,"
1224						   "ascq]=%x,%x,%x,%x\n",
1225						   sshdr.response_code,
1226						   sshdr.sense_key, sshdr.asc,
1227						   sshdr.ascq));
1228	}
1229#endif
1230	if (driver_byte(result) != DRIVER_SENSE &&
1231	    (!sense_valid || sense_deferred))
1232		goto out;
1233
1234	switch (sshdr.sense_key) {
1235	case HARDWARE_ERROR:
1236	case MEDIUM_ERROR:
1237		good_bytes = sd_completed_bytes(SCpnt);
1238		break;
1239	case RECOVERED_ERROR:
1240		good_bytes = scsi_bufflen(SCpnt);
1241		break;
1242	case NO_SENSE:
1243		/* This indicates a false check condition, so ignore it.  An
1244		 * unknown amount of data was transferred so treat it as an
1245		 * error.
1246		 */
1247		scsi_print_sense("sd", SCpnt);
1248		SCpnt->result = 0;
1249		memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1250		break;
1251	case ABORTED_COMMAND: /* DIF: Target detected corruption */
1252	case ILLEGAL_REQUEST: /* DIX: Host detected corruption */
1253		if (sshdr.asc == 0x10)
1254			good_bytes = sd_completed_bytes(SCpnt);
1255		break;
1256	default:
1257		break;
1258	}
1259 out:
1260	if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
1261		sd_dif_complete(SCpnt, good_bytes);
1262
1263	if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
1264	    == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
1265
1266		/* We have to print a failed command here as the
1267		 * extended CDB gets freed before scsi_io_completion()
1268		 * is called.
1269		 */
1270		if (result)
1271			scsi_print_command(SCpnt);
1272
1273		mempool_free(SCpnt->cmnd, sd_cdb_pool);
1274		SCpnt->cmnd = NULL;
1275		SCpnt->cmd_len = 0;
1276	}
1277
1278	return good_bytes;
1279}
1280
1281static int media_not_present(struct scsi_disk *sdkp,
1282			     struct scsi_sense_hdr *sshdr)
1283{
1284
1285	if (!scsi_sense_valid(sshdr))
1286		return 0;
1287	/* not invoked for commands that could return deferred errors */
1288	if (sshdr->sense_key != NOT_READY &&
1289	    sshdr->sense_key != UNIT_ATTENTION)
1290		return 0;
1291	if (sshdr->asc != 0x3A) /* medium not present */
1292		return 0;
1293
1294	set_media_not_present(sdkp);
1295	return 1;
1296}
1297
1298/*
1299 * spinup disk - called only in sd_revalidate_disk()
1300 */
1301static void
1302sd_spinup_disk(struct scsi_disk *sdkp)
1303{
1304	unsigned char cmd[10];
1305	unsigned long spintime_expire = 0;
1306	int retries, spintime;
1307	unsigned int the_result;
1308	struct scsi_sense_hdr sshdr;
1309	int sense_valid = 0;
1310
1311	spintime = 0;
1312
1313	/* Spin up drives, as required.  Only do this at boot time */
1314	/* Spinup needs to be done for module loads too. */
1315	do {
1316		retries = 0;
1317
1318		do {
1319			cmd[0] = TEST_UNIT_READY;
1320			memset((void *) &cmd[1], 0, 9);
1321
1322			the_result = scsi_execute_req(sdkp->device, cmd,
1323						      DMA_NONE, NULL, 0,
1324						      &sshdr, SD_TIMEOUT,
1325						      SD_MAX_RETRIES, NULL);
1326
1327			/*
1328			 * If the drive has indicated to us that it
1329			 * doesn't have any media in it, don't bother
1330			 * with any more polling.
1331			 */
1332			if (media_not_present(sdkp, &sshdr))
1333				return;
1334
1335			if (the_result)
1336				sense_valid = scsi_sense_valid(&sshdr);
1337			retries++;
1338		} while (retries < 3 &&
1339			 (!scsi_status_is_good(the_result) ||
1340			  ((driver_byte(the_result) & DRIVER_SENSE) &&
1341			  sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
1342
1343		if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
1344			/* no sense, TUR either succeeded or failed
1345			 * with a status error */
1346			if(!spintime && !scsi_status_is_good(the_result)) {
1347				sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
1348				sd_print_result(sdkp, the_result);
1349			}
1350			break;
1351		}
1352
1353		/*
1354		 * The device does not want the automatic start to be issued.
1355		 */
1356		if (sdkp->device->no_start_on_add)
1357			break;
1358
1359		if (sense_valid && sshdr.sense_key == NOT_READY) {
1360			if (sshdr.asc == 4 && sshdr.ascq == 3)
1361				break;	/* manual intervention required */
1362			if (sshdr.asc == 4 && sshdr.ascq == 0xb)
1363				break;	/* standby */
1364			if (sshdr.asc == 4 && sshdr.ascq == 0xc)
1365				break;	/* unavailable */
1366			/*
1367			 * Issue command to spin up drive when not ready
1368			 */
1369			if (!spintime) {
1370				sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
1371				cmd[0] = START_STOP;
1372				cmd[1] = 1;	/* Return immediately */
1373				memset((void *) &cmd[2], 0, 8);
1374				cmd[4] = 1;	/* Start spin cycle */
1375				if (sdkp->device->start_stop_pwr_cond)
1376					cmd[4] |= 1 << 4;
1377				scsi_execute_req(sdkp->device, cmd, DMA_NONE,
1378						 NULL, 0, &sshdr,
1379						 SD_TIMEOUT, SD_MAX_RETRIES,
1380						 NULL);
1381				spintime_expire = jiffies + 100 * HZ;
1382				spintime = 1;
1383			}
1384			/* Wait 1 second for next try */
1385			msleep(1000);
1386			printk(".");
1387
1388		/*
1389		 * Wait for USB flash devices with slow firmware.
1390		 * Yes, this sense key/ASC combination shouldn't
1391		 * occur here.  It's characteristic of these devices.
1392		 */
1393		} else if (sense_valid &&
1394				sshdr.sense_key == UNIT_ATTENTION &&
1395				sshdr.asc == 0x28) {
1396			if (!spintime) {
1397				spintime_expire = jiffies + 5 * HZ;
1398				spintime = 1;
1399			}
1400			/* Wait 1 second for next try */
1401			msleep(1000);
1402		} else {
1403			/* we don't understand the sense code, so it's
1404			 * probably pointless to loop */
1405			if(!spintime) {
1406				sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
1407				sd_print_sense_hdr(sdkp, &sshdr);
1408			}
1409			break;
1410		}
1411
1412	} while (spintime && time_before_eq(jiffies, spintime_expire));
1413
1414	if (spintime) {
1415		if (scsi_status_is_good(the_result))
1416			printk("ready\n");
1417		else
1418			printk("not responding...\n");
1419	}
1420}
1421
1422
1423/*
1424 * Determine whether disk supports Data Integrity Field.
1425 */
1426static void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
1427{
1428	struct scsi_device *sdp = sdkp->device;
1429	u8 type;
1430
1431	if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
1432		return;
1433
1434	type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
1435
1436	if (type == sdkp->protection_type || !sdkp->first_scan)
1437		return;
1438
1439	sdkp->protection_type = type;
1440
1441	if (type > SD_DIF_TYPE3_PROTECTION) {
1442		sd_printk(KERN_ERR, sdkp, "formatted with unsupported "	\
1443			  "protection type %u. Disabling disk!\n", type);
1444		sdkp->capacity = 0;
1445		return;
1446	}
1447
1448	if (scsi_host_dif_capable(sdp->host, type))
1449		sd_printk(KERN_NOTICE, sdkp,
1450			  "Enabling DIF Type %u protection\n", type);
1451	else
1452		sd_printk(KERN_NOTICE, sdkp,
1453			  "Disabling DIF Type %u protection\n", type);
1454}
1455
1456static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
1457			struct scsi_sense_hdr *sshdr, int sense_valid,
1458			int the_result)
1459{
1460	sd_print_result(sdkp, the_result);
1461	if (driver_byte(the_result) & DRIVER_SENSE)
1462		sd_print_sense_hdr(sdkp, sshdr);
1463	else
1464		sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
1465
1466	/*
1467	 * Set dirty bit for removable devices if not ready -
1468	 * sometimes drives will not report this properly.
1469	 */
1470	if (sdp->removable &&
1471	    sense_valid && sshdr->sense_key == NOT_READY)
1472		sdp->changed = 1;
1473
1474	/*
1475	 * We used to set media_present to 0 here to indicate no media
1476	 * in the drive, but some drives fail read capacity even with
1477	 * media present, so we can't do that.
1478	 */
1479	sdkp->capacity = 0; /* unknown mapped to zero - as usual */
1480}
1481
1482#define RC16_LEN 32
1483#if RC16_LEN > SD_BUF_SIZE
1484#error RC16_LEN must not be more than SD_BUF_SIZE
1485#endif
1486
1487#define READ_CAPACITY_RETRIES_ON_RESET	10
1488
1489static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1490						unsigned char *buffer)
1491{
1492	unsigned char cmd[16];
1493	struct scsi_sense_hdr sshdr;
1494	int sense_valid = 0;
1495	int the_result;
1496	int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
1497	unsigned int alignment;
1498	unsigned long long lba;
1499	unsigned sector_size;
1500
1501	do {
1502		memset(cmd, 0, 16);
1503		cmd[0] = SERVICE_ACTION_IN;
1504		cmd[1] = SAI_READ_CAPACITY_16;
1505		cmd[13] = RC16_LEN;
1506		memset(buffer, 0, RC16_LEN);
1507
1508		the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
1509					buffer, RC16_LEN, &sshdr,
1510					SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1511
1512		if (media_not_present(sdkp, &sshdr))
1513			return -ENODEV;
1514
1515		if (the_result) {
1516			sense_valid = scsi_sense_valid(&sshdr);
1517			if (sense_valid &&
1518			    sshdr.sense_key == ILLEGAL_REQUEST &&
1519			    (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
1520			    sshdr.ascq == 0x00)
1521				/* Invalid Command Operation Code or
1522				 * Invalid Field in CDB, just retry
1523				 * silently with RC10 */
1524				return -EINVAL;
1525			if (sense_valid &&
1526			    sshdr.sense_key == UNIT_ATTENTION &&
1527			    sshdr.asc == 0x29 && sshdr.ascq == 0x00)
1528				/* Device reset might occur several times,
1529				 * give it one more chance */
1530				if (--reset_retries > 0)
1531					continue;
1532		}
1533		retries--;
1534
1535	} while (the_result && retries);
1536
1537	if (the_result) {
1538		sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY(16) failed\n");
1539		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
1540		return -EINVAL;
1541	}
1542
1543	sector_size = get_unaligned_be32(&buffer[8]);
1544	lba = get_unaligned_be64(&buffer[0]);
1545
1546	sd_read_protection_type(sdkp, buffer);
1547
1548	if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
1549		sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
1550			"kernel compiled with support for large block "
1551			"devices.\n");
1552		sdkp->capacity = 0;
1553		return -EOVERFLOW;
1554	}
1555
1556	/* Logical blocks per physical block exponent */
1557	sdkp->hw_sector_size = (1 << (buffer[13] & 0xf)) * sector_size;
1558
1559	/* Lowest aligned logical block */
1560	alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
1561	blk_queue_alignment_offset(sdp->request_queue, alignment);
1562	if (alignment && sdkp->first_scan)
1563		sd_printk(KERN_NOTICE, sdkp,
1564			  "physical block alignment offset: %u\n", alignment);
1565
1566	if (buffer[14] & 0x80) { /* TPE */
1567		struct request_queue *q = sdp->request_queue;
1568
1569		sdkp->thin_provisioning = 1;
1570		q->limits.discard_granularity = sdkp->hw_sector_size;
1571		q->limits.max_discard_sectors = 0xffffffff;
1572
1573		if (buffer[14] & 0x40) /* TPRZ */
1574			q->limits.discard_zeroes_data = 1;
1575
1576		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1577	}
1578
1579	sdkp->capacity = lba + 1;
1580	return sector_size;
1581}
1582
1583static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
1584						unsigned char *buffer)
1585{
1586	unsigned char cmd[16];
1587	struct scsi_sense_hdr sshdr;
1588	int sense_valid = 0;
1589	int the_result;
1590	int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
1591	sector_t lba;
1592	unsigned sector_size;
1593
1594	do {
1595		cmd[0] = READ_CAPACITY;
1596		memset(&cmd[1], 0, 9);
1597		memset(buffer, 0, 8);
1598
1599		the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
1600					buffer, 8, &sshdr,
1601					SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1602
1603		if (media_not_present(sdkp, &sshdr))
1604			return -ENODEV;
1605
1606		if (the_result) {
1607			sense_valid = scsi_sense_valid(&sshdr);
1608			if (sense_valid &&
1609			    sshdr.sense_key == UNIT_ATTENTION &&
1610			    sshdr.asc == 0x29 && sshdr.ascq == 0x00)
1611				/* Device reset might occur several times,
1612				 * give it one more chance */
1613				if (--reset_retries > 0)
1614					continue;
1615		}
1616		retries--;
1617
1618	} while (the_result && retries);
1619
1620	if (the_result) {
1621		sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY failed\n");
1622		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
1623		return -EINVAL;
1624	}
1625
1626	sector_size = get_unaligned_be32(&buffer[4]);
1627	lba = get_unaligned_be32(&buffer[0]);
1628
1629	if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
1630		sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
1631			"kernel compiled with support for large block "
1632			"devices.\n");
1633		sdkp->capacity = 0;
1634		return -EOVERFLOW;
1635	}
1636
1637	sdkp->capacity = lba + 1;
1638	sdkp->hw_sector_size = sector_size;
1639	return sector_size;
1640}
1641
1642static int sd_try_rc16_first(struct scsi_device *sdp)
1643{
1644	if (sdp->host->max_cmd_len < 16)
1645		return 0;
1646	if (sdp->scsi_level > SCSI_SPC_2)
1647		return 1;
1648	if (scsi_device_protection(sdp))
1649		return 1;
1650	return 0;
1651}
1652
1653/*
1654 * read disk capacity
1655 */
1656static void
1657sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
1658{
1659	int sector_size;
1660	struct scsi_device *sdp = sdkp->device;
1661	sector_t old_capacity = sdkp->capacity;
1662
1663	if (sd_try_rc16_first(sdp)) {
1664		sector_size = read_capacity_16(sdkp, sdp, buffer);
1665		if (sector_size == -EOVERFLOW)
1666			goto got_data;
1667		if (sector_size == -ENODEV)
1668			return;
1669		if (sector_size < 0)
1670			sector_size = read_capacity_10(sdkp, sdp, buffer);
1671		if (sector_size < 0)
1672			return;
1673	} else {
1674		sector_size = read_capacity_10(sdkp, sdp, buffer);
1675		if (sector_size == -EOVERFLOW)
1676			goto got_data;
1677		if (sector_size < 0)
1678			return;
1679		if ((sizeof(sdkp->capacity) > 4) &&
1680		    (sdkp->capacity > 0xffffffffULL)) {
1681			int old_sector_size = sector_size;
1682			sd_printk(KERN_NOTICE, sdkp, "Very big device. "
1683					"Trying to use READ CAPACITY(16).\n");
1684			sector_size = read_capacity_16(sdkp, sdp, buffer);
1685			if (sector_size < 0) {
1686				sd_printk(KERN_NOTICE, sdkp,
1687					"Using 0xffffffff as device size\n");
1688				sdkp->capacity = 1 + (sector_t) 0xffffffff;
1689				sector_size = old_sector_size;
1690				goto got_data;
1691			}
1692		}
1693	}
1694
1695	/* Some devices are known to return the total number of blocks,
1696	 * not the highest block number.  Some devices have versions
1697	 * which do this and others which do not.  Some devices we might
1698	 * suspect of doing this but we don't know for certain.
1699	 *
1700	 * If we know the reported capacity is wrong, decrement it.  If
1701	 * we can only guess, then assume the number of blocks is even
1702	 * (usually true but not always) and err on the side of lowering
1703	 * the capacity.
1704	 */
1705	if (sdp->fix_capacity ||
1706	    (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
1707		sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
1708				"from its reported value: %llu\n",
1709				(unsigned long long) sdkp->capacity);
1710		--sdkp->capacity;
1711	}
1712
1713got_data:
1714	if (sector_size == 0) {
1715		sector_size = 512;
1716		sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
1717			  "assuming 512.\n");
1718	}
1719
1720	if (sector_size != 512 &&
1721	    sector_size != 1024 &&
1722	    sector_size != 2048 &&
1723	    sector_size != 4096 &&
1724	    sector_size != 256) {
1725		sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
1726			  sector_size);
1727		/*
1728		 * The user might want to re-format the drive with
1729		 * a supported sectorsize.  Once this happens, it
1730		 * would be relatively trivial to set the thing up.
1731		 * For this reason, we leave the thing in the table.
1732		 */
1733		sdkp->capacity = 0;
1734		/*
1735		 * set a bogus sector size so the normal read/write
1736		 * logic in the block layer will eventually refuse any
1737		 * request on this device without tripping over power
1738		 * of two sector size assumptions
1739		 */
1740		sector_size = 512;
1741	}
1742	blk_queue_logical_block_size(sdp->request_queue, sector_size);
1743
1744	{
1745		char cap_str_2[10], cap_str_10[10];
1746		u64 sz = (u64)sdkp->capacity << ilog2(sector_size);
1747
1748		string_get_size(sz, STRING_UNITS_2, cap_str_2,
1749				sizeof(cap_str_2));
1750		string_get_size(sz, STRING_UNITS_10, cap_str_10,
1751				sizeof(cap_str_10));
1752
1753		if (sdkp->first_scan || old_capacity != sdkp->capacity) {
1754			sd_printk(KERN_NOTICE, sdkp,
1755				  "%llu %d-byte logical blocks: (%s/%s)\n",
1756				  (unsigned long long)sdkp->capacity,
1757				  sector_size, cap_str_10, cap_str_2);
1758
1759			if (sdkp->hw_sector_size != sector_size)
1760				sd_printk(KERN_NOTICE, sdkp,
1761					  "%u-byte physical blocks\n",
1762					  sdkp->hw_sector_size);
1763		}
1764	}
1765
1766	/* Rescale capacity to 512-byte units */
1767	if (sector_size == 4096)
1768		sdkp->capacity <<= 3;
1769	else if (sector_size == 2048)
1770		sdkp->capacity <<= 2;
1771	else if (sector_size == 1024)
1772		sdkp->capacity <<= 1;
1773	else if (sector_size == 256)
1774		sdkp->capacity >>= 1;
1775
1776	blk_queue_physical_block_size(sdp->request_queue, sdkp->hw_sector_size);
1777	sdkp->device->sector_size = sector_size;
1778}
1779
1780/* called with buffer of length 512 */
1781static inline int
1782sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
1783		 unsigned char *buffer, int len, struct scsi_mode_data *data,
1784		 struct scsi_sense_hdr *sshdr)
1785{
1786	return scsi_mode_sense(sdp, dbd, modepage, buffer, len,
1787			       SD_TIMEOUT, SD_MAX_RETRIES, data,
1788			       sshdr);
1789}
1790
1791/*
1792 * read write protect setting, if possible - called only in sd_revalidate_disk()
1793 * called with buffer of length SD_BUF_SIZE
1794 */
1795static void
1796sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
1797{
1798	int res;
1799	struct scsi_device *sdp = sdkp->device;
1800	struct scsi_mode_data data;
1801	int old_wp = sdkp->write_prot;
1802
1803	set_disk_ro(sdkp->disk, 0);
1804	if (sdp->skip_ms_page_3f) {
1805		sd_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
1806		return;
1807	}
1808
1809	if (sdp->use_192_bytes_for_3f) {
1810		res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL);
1811	} else {
1812		/*
1813		 * First attempt: ask for all pages (0x3F), but only 4 bytes.
1814		 * We have to start carefully: some devices hang if we ask
1815		 * for more than is available.
1816		 */
1817		res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL);
1818
1819		/*
1820		 * Second attempt: ask for page 0 When only page 0 is
1821		 * implemented, a request for page 3F may return Sense Key
1822		 * 5: Illegal Request, Sense Code 24: Invalid field in
1823		 * CDB.
1824		 */
1825		if (!scsi_status_is_good(res))
1826			res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL);
1827
1828		/*
1829		 * Third attempt: ask 255 bytes, as we did earlier.
1830		 */
1831		if (!scsi_status_is_good(res))
1832			res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255,
1833					       &data, NULL);
1834	}
1835
1836	if (!scsi_status_is_good(res)) {
1837		sd_printk(KERN_WARNING, sdkp,
1838			  "Test WP failed, assume Write Enabled\n");
1839	} else {
1840		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
1841		set_disk_ro(sdkp->disk, sdkp->write_prot);
1842		if (sdkp->first_scan || old_wp != sdkp->write_prot) {
1843			sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
1844				  sdkp->write_prot ? "on" : "off");
1845			sd_printk(KERN_DEBUG, sdkp,
1846				  "Mode Sense: %02x %02x %02x %02x\n",
1847				  buffer[0], buffer[1], buffer[2], buffer[3]);
1848		}
1849	}
1850}
1851
1852/*
1853 * sd_read_cache_type - called only from sd_revalidate_disk()
1854 * called with buffer of length SD_BUF_SIZE
1855 */
1856static void
1857sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
1858{
1859	int len = 0, res;
1860	struct scsi_device *sdp = sdkp->device;
1861
1862	int dbd;
1863	int modepage;
1864	struct scsi_mode_data data;
1865	struct scsi_sense_hdr sshdr;
1866	int old_wce = sdkp->WCE;
1867	int old_rcd = sdkp->RCD;
1868	int old_dpofua = sdkp->DPOFUA;
1869
1870	if (sdp->skip_ms_page_8)
1871		goto defaults;
1872
1873	if (sdp->type == TYPE_RBC) {
1874		modepage = 6;
1875		dbd = 8;
1876	} else {
1877		modepage = 8;
1878		dbd = 0;
1879	}
1880
1881	/* cautiously ask */
1882	res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr);
1883
1884	if (!scsi_status_is_good(res))
1885		goto bad_sense;
1886
1887	if (!data.header_length) {
1888		modepage = 6;
1889		sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
1890	}
1891
1892	/* that went OK, now ask for the proper length */
1893	len = data.length;
1894
1895	/*
1896	 * We're only interested in the first three bytes, actually.
1897	 * But the data cache page is defined for the first 20.
1898	 */
1899	if (len < 3)
1900		goto bad_sense;
1901	if (len > 20)
1902		len = 20;
1903
1904	/* Take headers and block descriptors into account */
1905	len += data.header_length + data.block_descriptor_length;
1906	if (len > SD_BUF_SIZE)
1907		goto bad_sense;
1908
1909	/* Get the data */
1910	res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
1911
1912	if (scsi_status_is_good(res)) {
1913		int offset = data.header_length + data.block_descriptor_length;
1914
1915		if (offset >= SD_BUF_SIZE - 2) {
1916			sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
1917			goto defaults;
1918		}
1919
1920		if ((buffer[offset] & 0x3f) != modepage) {
1921			sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
1922			goto defaults;
1923		}
1924
1925		if (modepage == 8) {
1926			sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
1927			sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
1928		} else {
1929			sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
1930			sdkp->RCD = 0;
1931		}
1932
1933		sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
1934		if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
1935			sd_printk(KERN_NOTICE, sdkp,
1936				  "Uses READ/WRITE(6), disabling FUA\n");
1937			sdkp->DPOFUA = 0;
1938		}
1939
1940		if (sdkp->first_scan || old_wce != sdkp->WCE ||
1941		    old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
1942			sd_printk(KERN_NOTICE, sdkp,
1943				  "Write cache: %s, read cache: %s, %s\n",
1944				  sdkp->WCE ? "enabled" : "disabled",
1945				  sdkp->RCD ? "disabled" : "enabled",
1946				  sdkp->DPOFUA ? "supports DPO and FUA"
1947				  : "doesn't support DPO or FUA");
1948
1949		return;
1950	}
1951
1952bad_sense:
1953	if (scsi_sense_valid(&sshdr) &&
1954	    sshdr.sense_key == ILLEGAL_REQUEST &&
1955	    sshdr.asc == 0x24 && sshdr.ascq == 0x0)
1956		/* Invalid field in CDB */
1957		sd_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
1958	else
1959		sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n");
1960
1961defaults:
1962	sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
1963	sdkp->WCE = 0;
1964	sdkp->RCD = 0;
1965	sdkp->DPOFUA = 0;
1966}
1967
1968/*
1969 * The ATO bit indicates whether the DIF application tag is available
1970 * for use by the operating system.
1971 */
1972static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
1973{
1974	int res, offset;
1975	struct scsi_device *sdp = sdkp->device;
1976	struct scsi_mode_data data;
1977	struct scsi_sense_hdr sshdr;
1978
1979	if (sdp->type != TYPE_DISK)
1980		return;
1981
1982	if (sdkp->protection_type == 0)
1983		return;
1984
1985	res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
1986			      SD_MAX_RETRIES, &data, &sshdr);
1987
1988	if (!scsi_status_is_good(res) || !data.header_length ||
1989	    data.length < 6) {
1990		sd_printk(KERN_WARNING, sdkp,
1991			  "getting Control mode page failed, assume no ATO\n");
1992
1993		if (scsi_sense_valid(&sshdr))
1994			sd_print_sense_hdr(sdkp, &sshdr);
1995
1996		return;
1997	}
1998
1999	offset = data.header_length + data.block_descriptor_length;
2000
2001	if ((buffer[offset] & 0x3f) != 0x0a) {
2002		sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
2003		return;
2004	}
2005
2006	if ((buffer[offset + 5] & 0x80) == 0)
2007		return;
2008
2009	sdkp->ATO = 1;
2010
2011	return;
2012}
2013
2014/**
2015 * sd_read_block_limits - Query disk device for preferred I/O sizes.
2016 * @disk: disk to query
2017 */
2018static void sd_read_block_limits(struct scsi_disk *sdkp)
2019{
2020	struct request_queue *q = sdkp->disk->queue;
2021	unsigned int sector_sz = sdkp->device->sector_size;
2022	const int vpd_len = 64;
2023	unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
2024
2025	if (!buffer ||
2026	    /* Block Limits VPD */
2027	    scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
2028		goto out;
2029
2030	blk_queue_io_min(sdkp->disk->queue,
2031			 get_unaligned_be16(&buffer[6]) * sector_sz);
2032	blk_queue_io_opt(sdkp->disk->queue,
2033			 get_unaligned_be32(&buffer[12]) * sector_sz);
2034
2035	/* Thin provisioning enabled and page length indicates TP support */
2036	if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
2037		unsigned int lba_count, desc_count, granularity;
2038
2039		lba_count = get_unaligned_be32(&buffer[20]);
2040		desc_count = get_unaligned_be32(&buffer[24]);
2041
2042		if (lba_count) {
2043			q->limits.max_discard_sectors =
2044				lba_count * sector_sz >> 9;
2045
2046			if (desc_count)
2047				sdkp->unmap = 1;
2048		}
2049
2050		granularity = get_unaligned_be32(&buffer[28]);
2051
2052		if (granularity)
2053			q->limits.discard_granularity = granularity * sector_sz;
2054
2055		if (buffer[32] & 0x80)
2056			q->limits.discard_alignment =
2057				get_unaligned_be32(&buffer[32]) & ~(1 << 31);
2058	}
2059
2060 out:
2061	kfree(buffer);
2062}
2063
2064/**
2065 * sd_read_block_characteristics - Query block dev. characteristics
2066 * @disk: disk to query
2067 */
2068static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2069{
2070	unsigned char *buffer;
2071	u16 rot;
2072	const int vpd_len = 64;
2073
2074	buffer = kmalloc(vpd_len, GFP_KERNEL);
2075
2076	if (!buffer ||
2077	    /* Block Device Characteristics VPD */
2078	    scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
2079		goto out;
2080
2081	rot = get_unaligned_be16(&buffer[4]);
2082
2083	if (rot == 1)
2084		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
2085
2086 out:
2087	kfree(buffer);
2088}
2089
2090static int sd_try_extended_inquiry(struct scsi_device *sdp)
2091{
2092	/*
2093	 * Although VPD inquiries can go to SCSI-2 type devices,
2094	 * some USB ones crash on receiving them, and the pages
2095	 * we currently ask for are for SPC-3 and beyond
2096	 */
2097	if (sdp->scsi_level > SCSI_SPC_2)
2098		return 1;
2099	return 0;
2100}
2101
2102/**
2103 *	sd_revalidate_disk - called the first time a new disk is seen,
2104 *	performs disk spin up, read_capacity, etc.
2105 *	@disk: struct gendisk we care about
2106 **/
2107static int sd_revalidate_disk(struct gendisk *disk)
2108{
2109	struct scsi_disk *sdkp = scsi_disk(disk);
2110	struct scsi_device *sdp = sdkp->device;
2111	unsigned char *buffer;
2112	unsigned flush = 0;
2113
2114	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
2115				      "sd_revalidate_disk\n"));
2116
2117	/*
2118	 * If the device is offline, don't try and read capacity or any
2119	 * of the other niceties.
2120	 */
2121	if (!scsi_device_online(sdp))
2122		goto out;
2123
2124	buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
2125	if (!buffer) {
2126		sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
2127			  "allocation failure.\n");
2128		goto out;
2129	}
2130
2131	sd_spinup_disk(sdkp);
2132
2133	/*
2134	 * Without media there is no reason to ask; moreover, some devices
2135	 * react badly if we do.
2136	 */
2137	if (sdkp->media_present) {
2138		sd_read_capacity(sdkp, buffer);
2139
2140		if (sd_try_extended_inquiry(sdp)) {
2141			sd_read_block_limits(sdkp);
2142			sd_read_block_characteristics(sdkp);
2143		}
2144
2145		sd_read_write_protect_flag(sdkp, buffer);
2146		sd_read_cache_type(sdkp, buffer);
2147		sd_read_app_tag_own(sdkp, buffer);
2148	}
2149
2150	sdkp->first_scan = 0;
2151
2152	/*
2153	 * We now have all cache related info, determine how we deal
2154	 * with flush requests.
2155	 */
2156	if (sdkp->WCE) {
2157		flush |= REQ_FLUSH;
2158		if (sdkp->DPOFUA)
2159			flush |= REQ_FUA;
2160	}
2161
2162	blk_queue_flush(sdkp->disk->queue, flush);
2163
2164	set_capacity(disk, sdkp->capacity);
2165	kfree(buffer);
2166
2167 out:
2168	return 0;
2169}
2170
2171/**
2172 *	sd_unlock_native_capacity - unlock native capacity
2173 *	@disk: struct gendisk to set capacity for
2174 *
2175 *	Block layer calls this function if it detects that partitions
2176 *	on @disk reach beyond the end of the device.  If the SCSI host
2177 *	implements ->unlock_native_capacity() method, it's invoked to
2178 *	give it a chance to adjust the device capacity.
2179 *
2180 *	CONTEXT:
2181 *	Defined by block layer.  Might sleep.
2182 */
2183static void sd_unlock_native_capacity(struct gendisk *disk)
2184{
2185	struct scsi_device *sdev = scsi_disk(disk)->device;
2186
2187	if (sdev->host->hostt->unlock_native_capacity)
2188		sdev->host->hostt->unlock_native_capacity(sdev);
2189}
2190
2191/**
2192 *	sd_format_disk_name - format disk name
2193 *	@prefix: name prefix - ie. "sd" for SCSI disks
2194 *	@index: index of the disk to format name for
2195 *	@buf: output buffer
2196 *	@buflen: length of the output buffer
2197 *
2198 *	SCSI disk names starts at sda.  The 26th device is sdz and the
2199 *	27th is sdaa.  The last one for two lettered suffix is sdzz
2200 *	which is followed by sdaaa.
2201 *
2202 *	This is basically 26 base counting with one extra 'nil' entry
2203 *	at the beginning from the second digit on and can be
2204 *	determined using similar method as 26 base conversion with the
2205 *	index shifted -1 after each digit is computed.
2206 *
2207 *	CONTEXT:
2208 *	Don't care.
2209 *
2210 *	RETURNS:
2211 *	0 on success, -errno on failure.
2212 */
2213static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
2214{
2215	const int base = 'z' - 'a' + 1;
2216	char *begin = buf + strlen(prefix);
2217	char *end = buf + buflen;
2218	char *p;
2219	int unit;
2220
2221	p = end - 1;
2222	*p = '\0';
2223	unit = base;
2224	do {
2225		if (p == begin)
2226			return -EINVAL;
2227		*--p = 'a' + (index % unit);
2228		index = (index / unit) - 1;
2229	} while (index >= 0);
2230
2231	memmove(begin, p, end - p);
2232	memcpy(buf, prefix, strlen(prefix));
2233
2234	return 0;
2235}
2236
2237/*
2238 * The asynchronous part of sd_probe
2239 */
2240static void sd_probe_async(void *data, async_cookie_t cookie)
2241{
2242	struct scsi_disk *sdkp = data;
2243	struct scsi_device *sdp;
2244	struct gendisk *gd;
2245	u32 index;
2246	struct device *dev;
2247
2248	sdp = sdkp->device;
2249	gd = sdkp->disk;
2250	index = sdkp->index;
2251	dev = &sdp->sdev_gendev;
2252
2253	if (index < SD_MAX_DISKS) {
2254		gd->major = sd_major((index & 0xf0) >> 4);
2255		gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
2256		gd->minors = SD_MINORS;
2257	}
2258	gd->fops = &sd_fops;
2259	gd->private_data = &sdkp->driver;
2260	gd->queue = sdkp->device->request_queue;
2261
2262	/* defaults, until the device tells us otherwise */
2263	sdp->sector_size = 512;
2264	sdkp->capacity = 0;
2265	sdkp->media_present = 1;
2266	sdkp->write_prot = 0;
2267	sdkp->WCE = 0;
2268	sdkp->RCD = 0;
2269	sdkp->ATO = 0;
2270	sdkp->first_scan = 1;
2271
2272	sd_revalidate_disk(gd);
2273
2274	blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
2275	blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn);
2276
2277	gd->driverfs_dev = &sdp->sdev_gendev;
2278	gd->flags = GENHD_FL_EXT_DEVT;
2279	if (sdp->removable)
2280		gd->flags |= GENHD_FL_REMOVABLE;
2281
2282	add_disk(gd);
2283	sd_dif_config_host(sdkp);
2284
2285	sd_revalidate_disk(gd);
2286
2287	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
2288		  sdp->removable ? "removable " : "");
2289	scsi_autopm_put_device(sdp);
2290	put_device(&sdkp->dev);
2291}
2292
2293/**
2294 *	sd_probe - called during driver initialization and whenever a
2295 *	new scsi device is attached to the system. It is called once
2296 *	for each scsi device (not just disks) present.
2297 *	@dev: pointer to device object
2298 *
2299 *	Returns 0 if successful (or not interested in this scsi device
2300 *	(e.g. scanner)); 1 when there is an error.
2301 *
2302 *	Note: this function is invoked from the scsi mid-level.
2303 *	This function sets up the mapping between a given
2304 *	<host,channel,id,lun> (found in sdp) and new device name
2305 *	(e.g. /dev/sda). More precisely it is the block device major
2306 *	and minor number that is chosen here.
2307 *
2308 *	Assume sd_attach is not re-entrant (for time being)
2309 *	Also think about sd_attach() and sd_remove() running coincidentally.
2310 **/
2311static int sd_probe(struct device *dev)
2312{
2313	struct scsi_device *sdp = to_scsi_device(dev);
2314	struct scsi_disk *sdkp;
2315	struct gendisk *gd;
2316	int index;
2317	int error;
2318
2319	error = -ENODEV;
2320	if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC)
2321		goto out;
2322
2323	SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
2324					"sd_attach\n"));
2325
2326	error = -ENOMEM;
2327	sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
2328	if (!sdkp)
2329		goto out;
2330
2331	gd = alloc_disk(SD_MINORS);
2332	if (!gd)
2333		goto out_free;
2334
2335	do {
2336		if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
2337			goto out_put;
2338
2339		spin_lock(&sd_index_lock);
2340		error = ida_get_new(&sd_index_ida, &index);
2341		spin_unlock(&sd_index_lock);
2342	} while (error == -EAGAIN);
2343
2344	if (error)
2345		goto out_put;
2346
2347	error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
2348	if (error)
2349		goto out_free_index;
2350
2351	sdkp->device = sdp;
2352	sdkp->driver = &sd_template;
2353	sdkp->disk = gd;
2354	sdkp->index = index;
2355	atomic_set(&sdkp->openers, 0);
2356	sdkp->previous_state = 1;
2357
2358	if (!sdp->request_queue->rq_timeout) {
2359		if (sdp->type != TYPE_MOD)
2360			blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
2361		else
2362			blk_queue_rq_timeout(sdp->request_queue,
2363					     SD_MOD_TIMEOUT);
2364	}
2365
2366	device_initialize(&sdkp->dev);
2367	sdkp->dev.parent = dev;
2368	sdkp->dev.class = &sd_disk_class;
2369	dev_set_name(&sdkp->dev, dev_name(dev));
2370
2371	if (device_add(&sdkp->dev))
2372		goto out_free_index;
2373
2374	get_device(dev);
2375	dev_set_drvdata(dev, sdkp);
2376
2377	get_device(&sdkp->dev);	/* prevent release before async_schedule */
2378	async_schedule(sd_probe_async, sdkp);
2379
2380	return 0;
2381
2382 out_free_index:
2383	spin_lock(&sd_index_lock);
2384	ida_remove(&sd_index_ida, index);
2385	spin_unlock(&sd_index_lock);
2386 out_put:
2387	put_disk(gd);
2388 out_free:
2389	kfree(sdkp);
2390 out:
2391	return error;
2392}
2393
2394/**
2395 *	sd_remove - called whenever a scsi disk (previously recognized by
2396 *	sd_probe) is detached from the system. It is called (potentially
2397 *	multiple times) during sd module unload.
2398 *	@sdp: pointer to mid level scsi device object
2399 *
2400 *	Note: this function is invoked from the scsi mid-level.
2401 *	This function potentially frees up a device name (e.g. /dev/sdc)
2402 *	that could be re-used by a subsequent sd_probe().
2403 *	This function is not called when the built-in sd driver is "exit-ed".
2404 **/
2405static int sd_remove(struct device *dev)
2406{
2407	struct scsi_disk *sdkp;
2408
2409	sdkp = dev_get_drvdata(dev);
2410	scsi_autopm_get_device(sdkp->device);
2411
2412	async_synchronize_full();
2413	blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
2414	blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
2415	device_del(&sdkp->dev);
2416	del_gendisk(sdkp->disk);
2417	sd_shutdown(dev);
2418
2419	mutex_lock(&sd_ref_mutex);
2420	dev_set_drvdata(dev, NULL);
2421	put_device(&sdkp->dev);
2422	mutex_unlock(&sd_ref_mutex);
2423
2424	return 0;
2425}
2426
2427/**
2428 *	scsi_disk_release - Called to free the scsi_disk structure
2429 *	@dev: pointer to embedded class device
2430 *
2431 *	sd_ref_mutex must be held entering this routine.  Because it is
2432 *	called on last put, you should always use the scsi_disk_get()
2433 *	scsi_disk_put() helpers which manipulate the semaphore directly
2434 *	and never do a direct put_device.
2435 **/
2436static void scsi_disk_release(struct device *dev)
2437{
2438	struct scsi_disk *sdkp = to_scsi_disk(dev);
2439	struct gendisk *disk = sdkp->disk;
2440
2441	spin_lock(&sd_index_lock);
2442	ida_remove(&sd_index_ida, sdkp->index);
2443	spin_unlock(&sd_index_lock);
2444
2445	disk->private_data = NULL;
2446	put_disk(disk);
2447	put_device(&sdkp->device->sdev_gendev);
2448
2449	kfree(sdkp);
2450}
2451
2452static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
2453{
2454	unsigned char cmd[6] = { START_STOP };	/* START_VALID */
2455	struct scsi_sense_hdr sshdr;
2456	struct scsi_device *sdp = sdkp->device;
2457	int res;
2458
2459	if (start)
2460		cmd[4] |= 1;	/* START */
2461
2462	if (sdp->start_stop_pwr_cond)
2463		cmd[4] |= start ? 1 << 4 : 3 << 4;	/* Active or Standby */
2464
2465	if (!scsi_device_online(sdp))
2466		return -ENODEV;
2467
2468	res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
2469			       SD_TIMEOUT, SD_MAX_RETRIES, NULL);
2470	if (res) {
2471		sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
2472		sd_print_result(sdkp, res);
2473		if (driver_byte(res) & DRIVER_SENSE)
2474			sd_print_sense_hdr(sdkp, &sshdr);
2475	}
2476
2477	return res;
2478}
2479
2480/*
2481 * Send a SYNCHRONIZE CACHE instruction down to the device through
2482 * the normal SCSI command structure.  Wait for the command to
2483 * complete.
2484 */
2485static void sd_shutdown(struct device *dev)
2486{
2487	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
2488
2489	if (!sdkp)
2490		return;         /* this can happen */
2491
2492	if (sdkp->WCE) {
2493		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
2494		sd_sync_cache(sdkp);
2495	}
2496
2497	if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
2498		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
2499		sd_start_stop_device(sdkp, 0);
2500	}
2501
2502	scsi_disk_put(sdkp);
2503}
2504
2505static int sd_suspend(struct device *dev, pm_message_t mesg)
2506{
2507	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
2508	int ret = 0;
2509
2510	if (!sdkp)
2511		return 0;	/* this can happen */
2512
2513	if (sdkp->WCE) {
2514		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
2515		ret = sd_sync_cache(sdkp);
2516		if (ret)
2517			goto done;
2518	}
2519
2520	if ((mesg.event & PM_EVENT_SLEEP) && sdkp->device->manage_start_stop) {
2521		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
2522		ret = sd_start_stop_device(sdkp, 0);
2523	}
2524
2525done:
2526	scsi_disk_put(sdkp);
2527	return ret;
2528}
2529
2530static int sd_resume(struct device *dev)
2531{
2532	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
2533	int ret = 0;
2534
2535	if (!sdkp->device->manage_start_stop)
2536		goto done;
2537
2538	sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
2539	ret = sd_start_stop_device(sdkp, 1);
2540
2541done:
2542	scsi_disk_put(sdkp);
2543	return ret;
2544}
2545
2546/**
2547 *	init_sd - entry point for this driver (both when built in or when
2548 *	a module).
2549 *
2550 *	Note: this function registers this driver with the scsi mid-level.
2551 **/
2552static int __init init_sd(void)
2553{
2554	int majors = 0, i, err;
2555
2556	SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
2557
2558	for (i = 0; i < SD_MAJORS; i++)
2559		if (register_blkdev(sd_major(i), "sd") == 0)
2560			majors++;
2561
2562	if (!majors)
2563		return -ENODEV;
2564
2565	err = class_register(&sd_disk_class);
2566	if (err)
2567		goto err_out;
2568
2569	err = scsi_register_driver(&sd_template.gendrv);
2570	if (err)
2571		goto err_out_class;
2572
2573	sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
2574					 0, 0, NULL);
2575	if (!sd_cdb_cache) {
2576		printk(KERN_ERR "sd: can't init extended cdb cache\n");
2577		goto err_out_class;
2578	}
2579
2580	sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
2581	if (!sd_cdb_pool) {
2582		printk(KERN_ERR "sd: can't init extended cdb pool\n");
2583		goto err_out_cache;
2584	}
2585
2586	return 0;
2587
2588err_out_cache:
2589	kmem_cache_destroy(sd_cdb_cache);
2590
2591err_out_class:
2592	class_unregister(&sd_disk_class);
2593err_out:
2594	for (i = 0; i < SD_MAJORS; i++)
2595		unregister_blkdev(sd_major(i), "sd");
2596	return err;
2597}
2598
2599/**
2600 *	exit_sd - exit point for this driver (when it is a module).
2601 *
2602 *	Note: this function unregisters this driver from the scsi mid-level.
2603 **/
2604static void __exit exit_sd(void)
2605{
2606	int i;
2607
2608	SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
2609
2610	mempool_destroy(sd_cdb_pool);
2611	kmem_cache_destroy(sd_cdb_cache);
2612
2613	scsi_unregister_driver(&sd_template.gendrv);
2614	class_unregister(&sd_disk_class);
2615
2616	for (i = 0; i < SD_MAJORS; i++)
2617		unregister_blkdev(sd_major(i), "sd");
2618}
2619
2620module_init(init_sd);
2621module_exit(exit_sd);
2622
2623static void sd_print_sense_hdr(struct scsi_disk *sdkp,
2624			       struct scsi_sense_hdr *sshdr)
2625{
2626	sd_printk(KERN_INFO, sdkp, "");
2627	scsi_show_sense_hdr(sshdr);
2628	sd_printk(KERN_INFO, sdkp, "");
2629	scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
2630}
2631
2632static void sd_print_result(struct scsi_disk *sdkp, int result)
2633{
2634	sd_printk(KERN_INFO, sdkp, "");
2635	scsi_show_result(result);
2636}
2637
2638