scsi_lib.c revision 8a78362c4eefc1deddbefe2c7f38aabbc2429d6b
1/*
2 *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 *  SCSI queueing library.
5 *      Initial versions: Eric Youngdale (eric@andante.org).
6 *                        Based upon conversations with large numbers
7 *                        of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
11#include <linux/bitops.h>
12#include <linux/blkdev.h>
13#include <linux/completion.h>
14#include <linux/kernel.h>
15#include <linux/mempool.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/hardirq.h>
21#include <linux/scatterlist.h>
22
23#include <scsi/scsi.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_dbg.h>
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_driver.h>
28#include <scsi/scsi_eh.h>
29#include <scsi/scsi_host.h>
30
31#include "scsi_priv.h"
32#include "scsi_logging.h"
33
34
35#define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)
36#define SG_MEMPOOL_SIZE		2
37
38struct scsi_host_sg_pool {
39	size_t		size;
40	char		*name;
41	struct kmem_cache	*slab;
42	mempool_t	*pool;
43};
44
45#define SP(x) { x, "sgpool-" __stringify(x) }
46#if (SCSI_MAX_SG_SEGMENTS < 32)
47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48#endif
49static struct scsi_host_sg_pool scsi_sg_pools[] = {
50	SP(8),
51	SP(16),
52#if (SCSI_MAX_SG_SEGMENTS > 32)
53	SP(32),
54#if (SCSI_MAX_SG_SEGMENTS > 64)
55	SP(64),
56#if (SCSI_MAX_SG_SEGMENTS > 128)
57	SP(128),
58#if (SCSI_MAX_SG_SEGMENTS > 256)
59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
60#endif
61#endif
62#endif
63#endif
64	SP(SCSI_MAX_SG_SEGMENTS)
65};
66#undef SP
67
68struct kmem_cache *scsi_sdb_cache;
69
70static void scsi_run_queue(struct request_queue *q);
71
72/*
73 * Function:	scsi_unprep_request()
74 *
75 * Purpose:	Remove all preparation done for a request, including its
76 *		associated scsi_cmnd, so that it can be requeued.
77 *
78 * Arguments:	req	- request to unprepare
79 *
80 * Lock status:	Assumed that no locks are held upon entry.
81 *
82 * Returns:	Nothing.
83 */
84static void scsi_unprep_request(struct request *req)
85{
86	struct scsi_cmnd *cmd = req->special;
87
88	req->cmd_flags &= ~REQ_DONTPREP;
89	req->special = NULL;
90
91	scsi_put_command(cmd);
92}
93
94/**
95 * __scsi_queue_insert - private queue insertion
96 * @cmd: The SCSI command being requeued
97 * @reason:  The reason for the requeue
98 * @unbusy: Whether the queue should be unbusied
99 *
100 * This is a private queue insertion.  The public interface
101 * scsi_queue_insert() always assumes the queue should be unbusied
102 * because it's always called before the completion.  This function is
103 * for a requeue after completion, which should only occur in this
104 * file.
105 */
106static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
107{
108	struct Scsi_Host *host = cmd->device->host;
109	struct scsi_device *device = cmd->device;
110	struct scsi_target *starget = scsi_target(device);
111	struct request_queue *q = device->request_queue;
112	unsigned long flags;
113
114	SCSI_LOG_MLQUEUE(1,
115		 printk("Inserting command %p into mlqueue\n", cmd));
116
117	/*
118	 * Set the appropriate busy bit for the device/host.
119	 *
120	 * If the host/device isn't busy, assume that something actually
121	 * completed, and that we should be able to queue a command now.
122	 *
123	 * Note that the prior mid-layer assumption that any host could
124	 * always queue at least one command is now broken.  The mid-layer
125	 * will implement a user specifiable stall (see
126	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
127	 * if a command is requeued with no other commands outstanding
128	 * either for the device or for the host.
129	 */
130	switch (reason) {
131	case SCSI_MLQUEUE_HOST_BUSY:
132		host->host_blocked = host->max_host_blocked;
133		break;
134	case SCSI_MLQUEUE_DEVICE_BUSY:
135		device->device_blocked = device->max_device_blocked;
136		break;
137	case SCSI_MLQUEUE_TARGET_BUSY:
138		starget->target_blocked = starget->max_target_blocked;
139		break;
140	}
141
142	/*
143	 * Decrement the counters, since these commands are no longer
144	 * active on the host/device.
145	 */
146	if (unbusy)
147		scsi_device_unbusy(device);
148
149	/*
150	 * Requeue this command.  It will go before all other commands
151	 * that are already in the queue.
152	 *
153	 * NOTE: there is magic here about the way the queue is plugged if
154	 * we have no outstanding commands.
155	 *
156	 * Although we *don't* plug the queue, we call the request
157	 * function.  The SCSI request function detects the blocked condition
158	 * and plugs the queue appropriately.
159         */
160	spin_lock_irqsave(q->queue_lock, flags);
161	blk_requeue_request(q, cmd->request);
162	spin_unlock_irqrestore(q->queue_lock, flags);
163
164	scsi_run_queue(q);
165
166	return 0;
167}
168
169/*
170 * Function:    scsi_queue_insert()
171 *
172 * Purpose:     Insert a command in the midlevel queue.
173 *
174 * Arguments:   cmd    - command that we are adding to queue.
175 *              reason - why we are inserting command to queue.
176 *
177 * Lock status: Assumed that lock is not held upon entry.
178 *
179 * Returns:     Nothing.
180 *
181 * Notes:       We do this for one of two cases.  Either the host is busy
182 *              and it cannot accept any more commands for the time being,
183 *              or the device returned QUEUE_FULL and can accept no more
184 *              commands.
185 * Notes:       This could be called either from an interrupt context or a
186 *              normal process context.
187 */
188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189{
190	return __scsi_queue_insert(cmd, reason, 1);
191}
192/**
193 * scsi_execute - insert request and wait for the result
194 * @sdev:	scsi device
195 * @cmd:	scsi command
196 * @data_direction: data direction
197 * @buffer:	data buffer
198 * @bufflen:	len of buffer
199 * @sense:	optional sense buffer
200 * @timeout:	request timeout in seconds
201 * @retries:	number of times to retry request
202 * @flags:	or into request flags;
203 * @resid:	optional residual length
204 *
205 * returns the req->errors value which is the scsi_cmnd result
206 * field.
207 */
208int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
209		 int data_direction, void *buffer, unsigned bufflen,
210		 unsigned char *sense, int timeout, int retries, int flags,
211		 int *resid)
212{
213	struct request *req;
214	int write = (data_direction == DMA_TO_DEVICE);
215	int ret = DRIVER_ERROR << 24;
216
217	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
218
219	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
220					buffer, bufflen, __GFP_WAIT))
221		goto out;
222
223	req->cmd_len = COMMAND_SIZE(cmd[0]);
224	memcpy(req->cmd, cmd, req->cmd_len);
225	req->sense = sense;
226	req->sense_len = 0;
227	req->retries = retries;
228	req->timeout = timeout;
229	req->cmd_type = REQ_TYPE_BLOCK_PC;
230	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
231
232	/*
233	 * head injection *required* here otherwise quiesce won't work
234	 */
235	blk_execute_rq(req->q, NULL, req, 1);
236
237	/*
238	 * Some devices (USB mass-storage in particular) may transfer
239	 * garbage data together with a residue indicating that the data
240	 * is invalid.  Prevent the garbage from being misinterpreted
241	 * and prevent security leaks by zeroing out the excess data.
242	 */
243	if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244		memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245
246	if (resid)
247		*resid = req->resid_len;
248	ret = req->errors;
249 out:
250	blk_put_request(req);
251
252	return ret;
253}
254EXPORT_SYMBOL(scsi_execute);
255
256
257int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
258		     int data_direction, void *buffer, unsigned bufflen,
259		     struct scsi_sense_hdr *sshdr, int timeout, int retries,
260		     int *resid)
261{
262	char *sense = NULL;
263	int result;
264
265	if (sshdr) {
266		sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
267		if (!sense)
268			return DRIVER_ERROR << 24;
269	}
270	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
271			      sense, timeout, retries, 0, resid);
272	if (sshdr)
273		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
274
275	kfree(sense);
276	return result;
277}
278EXPORT_SYMBOL(scsi_execute_req);
279
280/*
281 * Function:    scsi_init_cmd_errh()
282 *
283 * Purpose:     Initialize cmd fields related to error handling.
284 *
285 * Arguments:   cmd	- command that is ready to be queued.
286 *
287 * Notes:       This function has the job of initializing a number of
288 *              fields related to error handling.   Typically this will
289 *              be called once for each command, as required.
290 */
291static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
292{
293	cmd->serial_number = 0;
294	scsi_set_resid(cmd, 0);
295	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
296	if (cmd->cmd_len == 0)
297		cmd->cmd_len = scsi_command_size(cmd->cmnd);
298}
299
300void scsi_device_unbusy(struct scsi_device *sdev)
301{
302	struct Scsi_Host *shost = sdev->host;
303	struct scsi_target *starget = scsi_target(sdev);
304	unsigned long flags;
305
306	spin_lock_irqsave(shost->host_lock, flags);
307	shost->host_busy--;
308	starget->target_busy--;
309	if (unlikely(scsi_host_in_recovery(shost) &&
310		     (shost->host_failed || shost->host_eh_scheduled)))
311		scsi_eh_wakeup(shost);
312	spin_unlock(shost->host_lock);
313	spin_lock(sdev->request_queue->queue_lock);
314	sdev->device_busy--;
315	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
316}
317
318/*
319 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
320 * and call blk_run_queue for all the scsi_devices on the target -
321 * including current_sdev first.
322 *
323 * Called with *no* scsi locks held.
324 */
325static void scsi_single_lun_run(struct scsi_device *current_sdev)
326{
327	struct Scsi_Host *shost = current_sdev->host;
328	struct scsi_device *sdev, *tmp;
329	struct scsi_target *starget = scsi_target(current_sdev);
330	unsigned long flags;
331
332	spin_lock_irqsave(shost->host_lock, flags);
333	starget->starget_sdev_user = NULL;
334	spin_unlock_irqrestore(shost->host_lock, flags);
335
336	/*
337	 * Call blk_run_queue for all LUNs on the target, starting with
338	 * current_sdev. We race with others (to set starget_sdev_user),
339	 * but in most cases, we will be first. Ideally, each LU on the
340	 * target would get some limited time or requests on the target.
341	 */
342	blk_run_queue(current_sdev->request_queue);
343
344	spin_lock_irqsave(shost->host_lock, flags);
345	if (starget->starget_sdev_user)
346		goto out;
347	list_for_each_entry_safe(sdev, tmp, &starget->devices,
348			same_target_siblings) {
349		if (sdev == current_sdev)
350			continue;
351		if (scsi_device_get(sdev))
352			continue;
353
354		spin_unlock_irqrestore(shost->host_lock, flags);
355		blk_run_queue(sdev->request_queue);
356		spin_lock_irqsave(shost->host_lock, flags);
357
358		scsi_device_put(sdev);
359	}
360 out:
361	spin_unlock_irqrestore(shost->host_lock, flags);
362}
363
364static inline int scsi_device_is_busy(struct scsi_device *sdev)
365{
366	if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
367		return 1;
368
369	return 0;
370}
371
372static inline int scsi_target_is_busy(struct scsi_target *starget)
373{
374	return ((starget->can_queue > 0 &&
375		 starget->target_busy >= starget->can_queue) ||
376		 starget->target_blocked);
377}
378
379static inline int scsi_host_is_busy(struct Scsi_Host *shost)
380{
381	if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
382	    shost->host_blocked || shost->host_self_blocked)
383		return 1;
384
385	return 0;
386}
387
388/*
389 * Function:	scsi_run_queue()
390 *
391 * Purpose:	Select a proper request queue to serve next
392 *
393 * Arguments:	q	- last request's queue
394 *
395 * Returns:     Nothing
396 *
397 * Notes:	The previous command was completely finished, start
398 *		a new one if possible.
399 */
400static void scsi_run_queue(struct request_queue *q)
401{
402	struct scsi_device *sdev = q->queuedata;
403	struct Scsi_Host *shost = sdev->host;
404	LIST_HEAD(starved_list);
405	unsigned long flags;
406
407	if (scsi_target(sdev)->single_lun)
408		scsi_single_lun_run(sdev);
409
410	spin_lock_irqsave(shost->host_lock, flags);
411	list_splice_init(&shost->starved_list, &starved_list);
412
413	while (!list_empty(&starved_list)) {
414		int flagset;
415
416		/*
417		 * As long as shost is accepting commands and we have
418		 * starved queues, call blk_run_queue. scsi_request_fn
419		 * drops the queue_lock and can add us back to the
420		 * starved_list.
421		 *
422		 * host_lock protects the starved_list and starved_entry.
423		 * scsi_request_fn must get the host_lock before checking
424		 * or modifying starved_list or starved_entry.
425		 */
426		if (scsi_host_is_busy(shost))
427			break;
428
429		sdev = list_entry(starved_list.next,
430				  struct scsi_device, starved_entry);
431		list_del_init(&sdev->starved_entry);
432		if (scsi_target_is_busy(scsi_target(sdev))) {
433			list_move_tail(&sdev->starved_entry,
434				       &shost->starved_list);
435			continue;
436		}
437
438		spin_unlock(shost->host_lock);
439
440		spin_lock(sdev->request_queue->queue_lock);
441		flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
442				!test_bit(QUEUE_FLAG_REENTER,
443					&sdev->request_queue->queue_flags);
444		if (flagset)
445			queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446		__blk_run_queue(sdev->request_queue);
447		if (flagset)
448			queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449		spin_unlock(sdev->request_queue->queue_lock);
450
451		spin_lock(shost->host_lock);
452	}
453	/* put any unprocessed entries back */
454	list_splice(&starved_list, &shost->starved_list);
455	spin_unlock_irqrestore(shost->host_lock, flags);
456
457	blk_run_queue(q);
458}
459
460/*
461 * Function:	scsi_requeue_command()
462 *
463 * Purpose:	Handle post-processing of completed commands.
464 *
465 * Arguments:	q	- queue to operate on
466 *		cmd	- command that may need to be requeued.
467 *
468 * Returns:	Nothing
469 *
470 * Notes:	After command completion, there may be blocks left
471 *		over which weren't finished by the previous command
472 *		this can be for a number of reasons - the main one is
473 *		I/O errors in the middle of the request, in which case
474 *		we need to request the blocks that come after the bad
475 *		sector.
476 * Notes:	Upon return, cmd is a stale pointer.
477 */
478static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
479{
480	struct request *req = cmd->request;
481	unsigned long flags;
482
483	spin_lock_irqsave(q->queue_lock, flags);
484	scsi_unprep_request(req);
485	blk_requeue_request(q, req);
486	spin_unlock_irqrestore(q->queue_lock, flags);
487
488	scsi_run_queue(q);
489}
490
491void scsi_next_command(struct scsi_cmnd *cmd)
492{
493	struct scsi_device *sdev = cmd->device;
494	struct request_queue *q = sdev->request_queue;
495
496	/* need to hold a reference on the device before we let go of the cmd */
497	get_device(&sdev->sdev_gendev);
498
499	scsi_put_command(cmd);
500	scsi_run_queue(q);
501
502	/* ok to remove device now */
503	put_device(&sdev->sdev_gendev);
504}
505
506void scsi_run_host_queues(struct Scsi_Host *shost)
507{
508	struct scsi_device *sdev;
509
510	shost_for_each_device(sdev, shost)
511		scsi_run_queue(sdev->request_queue);
512}
513
514static void __scsi_release_buffers(struct scsi_cmnd *, int);
515
516/*
517 * Function:    scsi_end_request()
518 *
519 * Purpose:     Post-processing of completed commands (usually invoked at end
520 *		of upper level post-processing and scsi_io_completion).
521 *
522 * Arguments:   cmd	 - command that is complete.
523 *              error    - 0 if I/O indicates success, < 0 for I/O error.
524 *              bytes    - number of bytes of completed I/O
525 *		requeue  - indicates whether we should requeue leftovers.
526 *
527 * Lock status: Assumed that lock is not held upon entry.
528 *
529 * Returns:     cmd if requeue required, NULL otherwise.
530 *
531 * Notes:       This is called for block device requests in order to
532 *              mark some number of sectors as complete.
533 *
534 *		We are guaranteeing that the request queue will be goosed
535 *		at some point during this call.
536 * Notes:	If cmd was requeued, upon return it will be a stale pointer.
537 */
538static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
539					  int bytes, int requeue)
540{
541	struct request_queue *q = cmd->device->request_queue;
542	struct request *req = cmd->request;
543
544	/*
545	 * If there are blocks left over at the end, set up the command
546	 * to queue the remainder of them.
547	 */
548	if (blk_end_request(req, error, bytes)) {
549		/* kill remainder if no retrys */
550		if (error && scsi_noretry_cmd(cmd))
551			blk_end_request_all(req, error);
552		else {
553			if (requeue) {
554				/*
555				 * Bleah.  Leftovers again.  Stick the
556				 * leftovers in the front of the
557				 * queue, and goose the queue again.
558				 */
559				scsi_release_buffers(cmd);
560				scsi_requeue_command(q, cmd);
561				cmd = NULL;
562			}
563			return cmd;
564		}
565	}
566
567	/*
568	 * This will goose the queue request function at the end, so we don't
569	 * need to worry about launching another command.
570	 */
571	__scsi_release_buffers(cmd, 0);
572	scsi_next_command(cmd);
573	return NULL;
574}
575
576static inline unsigned int scsi_sgtable_index(unsigned short nents)
577{
578	unsigned int index;
579
580	BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
581
582	if (nents <= 8)
583		index = 0;
584	else
585		index = get_count_order(nents) - 3;
586
587	return index;
588}
589
590static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
591{
592	struct scsi_host_sg_pool *sgp;
593
594	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
595	mempool_free(sgl, sgp->pool);
596}
597
598static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
599{
600	struct scsi_host_sg_pool *sgp;
601
602	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
603	return mempool_alloc(sgp->pool, gfp_mask);
604}
605
606static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
607			      gfp_t gfp_mask)
608{
609	int ret;
610
611	BUG_ON(!nents);
612
613	ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
614			       gfp_mask, scsi_sg_alloc);
615	if (unlikely(ret))
616		__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
617				scsi_sg_free);
618
619	return ret;
620}
621
622static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
623{
624	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
625}
626
627static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
628{
629
630	if (cmd->sdb.table.nents)
631		scsi_free_sgtable(&cmd->sdb);
632
633	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
634
635	if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
636		struct scsi_data_buffer *bidi_sdb =
637			cmd->request->next_rq->special;
638		scsi_free_sgtable(bidi_sdb);
639		kmem_cache_free(scsi_sdb_cache, bidi_sdb);
640		cmd->request->next_rq->special = NULL;
641	}
642
643	if (scsi_prot_sg_count(cmd))
644		scsi_free_sgtable(cmd->prot_sdb);
645}
646
647/*
648 * Function:    scsi_release_buffers()
649 *
650 * Purpose:     Completion processing for block device I/O requests.
651 *
652 * Arguments:   cmd	- command that we are bailing.
653 *
654 * Lock status: Assumed that no lock is held upon entry.
655 *
656 * Returns:     Nothing
657 *
658 * Notes:       In the event that an upper level driver rejects a
659 *		command, we must release resources allocated during
660 *		the __init_io() function.  Primarily this would involve
661 *		the scatter-gather table, and potentially any bounce
662 *		buffers.
663 */
664void scsi_release_buffers(struct scsi_cmnd *cmd)
665{
666	__scsi_release_buffers(cmd, 1);
667}
668EXPORT_SYMBOL(scsi_release_buffers);
669
670/*
671 * Function:    scsi_io_completion()
672 *
673 * Purpose:     Completion processing for block device I/O requests.
674 *
675 * Arguments:   cmd   - command that is finished.
676 *
677 * Lock status: Assumed that no lock is held upon entry.
678 *
679 * Returns:     Nothing
680 *
681 * Notes:       This function is matched in terms of capabilities to
682 *              the function that created the scatter-gather list.
683 *              In other words, if there are no bounce buffers
684 *              (the normal case for most drivers), we don't need
685 *              the logic to deal with cleaning up afterwards.
686 *
687 *		We must call scsi_end_request().  This will finish off
688 *		the specified number of sectors.  If we are done, the
689 *		command block will be released and the queue function
690 *		will be goosed.  If we are not done then we have to
691 *		figure out what to do next:
692 *
693 *		a) We can call scsi_requeue_command().  The request
694 *		   will be unprepared and put back on the queue.  Then
695 *		   a new command will be created for it.  This should
696 *		   be used if we made forward progress, or if we want
697 *		   to switch from READ(10) to READ(6) for example.
698 *
699 *		b) We can call scsi_queue_insert().  The request will
700 *		   be put back on the queue and retried using the same
701 *		   command as before, possibly after a delay.
702 *
703 *		c) We can call blk_end_request() with -EIO to fail
704 *		   the remainder of the request.
705 */
706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
707{
708	int result = cmd->result;
709	struct request_queue *q = cmd->device->request_queue;
710	struct request *req = cmd->request;
711	int error = 0;
712	struct scsi_sense_hdr sshdr;
713	int sense_valid = 0;
714	int sense_deferred = 0;
715	enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
716	      ACTION_DELAYED_RETRY} action;
717	char *description = NULL;
718
719	if (result) {
720		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
721		if (sense_valid)
722			sense_deferred = scsi_sense_is_deferred(&sshdr);
723	}
724
725	if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
726		req->errors = result;
727		if (result) {
728			if (sense_valid && req->sense) {
729				/*
730				 * SG_IO wants current and deferred errors
731				 */
732				int len = 8 + cmd->sense_buffer[7];
733
734				if (len > SCSI_SENSE_BUFFERSIZE)
735					len = SCSI_SENSE_BUFFERSIZE;
736				memcpy(req->sense, cmd->sense_buffer,  len);
737				req->sense_len = len;
738			}
739			if (!sense_deferred)
740				error = -EIO;
741		}
742
743		req->resid_len = scsi_get_resid(cmd);
744
745		if (scsi_bidi_cmnd(cmd)) {
746			/*
747			 * Bidi commands Must be complete as a whole,
748			 * both sides at once.
749			 */
750			req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752			scsi_release_buffers(cmd);
753			blk_end_request_all(req, 0);
754
755			scsi_next_command(cmd);
756			return;
757		}
758	}
759
760	BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
761
762	/*
763	 * Next deal with any sectors which we were able to correctly
764	 * handle.
765	 */
766	SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
767				      "%d bytes done.\n",
768				      blk_rq_sectors(req), good_bytes));
769
770	/*
771	 * Recovered errors need reporting, but they're always treated
772	 * as success, so fiddle the result code here.  For BLOCK_PC
773	 * we already took a copy of the original into rq->errors which
774	 * is what gets returned to the user
775	 */
776	if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
777		if (!(req->cmd_flags & REQ_QUIET))
778			scsi_print_sense("", cmd);
779		result = 0;
780		/* BLOCK_PC may have set error */
781		error = 0;
782	}
783
784	/*
785	 * A number of bytes were successfully read.  If there
786	 * are leftovers and there is some kind of error
787	 * (result != 0), retry the rest.
788	 */
789	if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
790		return;
791
792	error = -EIO;
793
794	if (host_byte(result) == DID_RESET) {
795		/* Third party bus reset or reset for error recovery
796		 * reasons.  Just retry the command and see what
797		 * happens.
798		 */
799		action = ACTION_RETRY;
800	} else if (sense_valid && !sense_deferred) {
801		switch (sshdr.sense_key) {
802		case UNIT_ATTENTION:
803			if (cmd->device->removable) {
804				/* Detected disc change.  Set a bit
805				 * and quietly refuse further access.
806				 */
807				cmd->device->changed = 1;
808				description = "Media Changed";
809				action = ACTION_FAIL;
810			} else {
811				/* Must have been a power glitch, or a
812				 * bus reset.  Could not have been a
813				 * media change, so we just retry the
814				 * command and see what happens.
815				 */
816				action = ACTION_RETRY;
817			}
818			break;
819		case ILLEGAL_REQUEST:
820			/* If we had an ILLEGAL REQUEST returned, then
821			 * we may have performed an unsupported
822			 * command.  The only thing this should be
823			 * would be a ten byte read where only a six
824			 * byte read was supported.  Also, on a system
825			 * where READ CAPACITY failed, we may have
826			 * read past the end of the disk.
827			 */
828			if ((cmd->device->use_10_for_rw &&
829			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
830			    (cmd->cmnd[0] == READ_10 ||
831			     cmd->cmnd[0] == WRITE_10)) {
832				/* This will issue a new 6-byte command. */
833				cmd->device->use_10_for_rw = 0;
834				action = ACTION_REPREP;
835			} else if (sshdr.asc == 0x10) /* DIX */ {
836				description = "Host Data Integrity Failure";
837				action = ACTION_FAIL;
838				error = -EILSEQ;
839			} else
840				action = ACTION_FAIL;
841			break;
842		case ABORTED_COMMAND:
843			action = ACTION_FAIL;
844			if (sshdr.asc == 0x10) { /* DIF */
845				description = "Target Data Integrity Failure";
846				error = -EILSEQ;
847			}
848			break;
849		case NOT_READY:
850			/* If the device is in the process of becoming
851			 * ready, or has a temporary blockage, retry.
852			 */
853			if (sshdr.asc == 0x04) {
854				switch (sshdr.ascq) {
855				case 0x01: /* becoming ready */
856				case 0x04: /* format in progress */
857				case 0x05: /* rebuild in progress */
858				case 0x06: /* recalculation in progress */
859				case 0x07: /* operation in progress */
860				case 0x08: /* Long write in progress */
861				case 0x09: /* self test in progress */
862				case 0x14: /* space allocation in progress */
863					action = ACTION_DELAYED_RETRY;
864					break;
865				default:
866					description = "Device not ready";
867					action = ACTION_FAIL;
868					break;
869				}
870			} else {
871				description = "Device not ready";
872				action = ACTION_FAIL;
873			}
874			break;
875		case VOLUME_OVERFLOW:
876			/* See SSC3rXX or current. */
877			action = ACTION_FAIL;
878			break;
879		default:
880			description = "Unhandled sense code";
881			action = ACTION_FAIL;
882			break;
883		}
884	} else {
885		description = "Unhandled error code";
886		action = ACTION_FAIL;
887	}
888
889	switch (action) {
890	case ACTION_FAIL:
891		/* Give up and fail the remainder of the request */
892		scsi_release_buffers(cmd);
893		if (!(req->cmd_flags & REQ_QUIET)) {
894			if (description)
895				scmd_printk(KERN_INFO, cmd, "%s\n",
896					    description);
897			scsi_print_result(cmd);
898			if (driver_byte(result) & DRIVER_SENSE)
899				scsi_print_sense("", cmd);
900			scsi_print_command(cmd);
901		}
902		if (blk_end_request_err(req, error))
903			scsi_requeue_command(q, cmd);
904		else
905			scsi_next_command(cmd);
906		break;
907	case ACTION_REPREP:
908		/* Unprep the request and put it back at the head of the queue.
909		 * A new command will be prepared and issued.
910		 */
911		scsi_release_buffers(cmd);
912		scsi_requeue_command(q, cmd);
913		break;
914	case ACTION_RETRY:
915		/* Retry the same command immediately */
916		__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
917		break;
918	case ACTION_DELAYED_RETRY:
919		/* Retry the same command after a delay */
920		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
921		break;
922	}
923}
924
925static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
926			     gfp_t gfp_mask)
927{
928	int count;
929
930	/*
931	 * If sg table allocation fails, requeue request later.
932	 */
933	if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
934					gfp_mask))) {
935		return BLKPREP_DEFER;
936	}
937
938	req->buffer = NULL;
939
940	/*
941	 * Next, walk the list, and fill in the addresses and sizes of
942	 * each segment.
943	 */
944	count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
945	BUG_ON(count > sdb->table.nents);
946	sdb->table.nents = count;
947	sdb->length = blk_rq_bytes(req);
948	return BLKPREP_OK;
949}
950
951/*
952 * Function:    scsi_init_io()
953 *
954 * Purpose:     SCSI I/O initialize function.
955 *
956 * Arguments:   cmd   - Command descriptor we wish to initialize
957 *
958 * Returns:     0 on success
959 *		BLKPREP_DEFER if the failure is retryable
960 *		BLKPREP_KILL if the failure is fatal
961 */
962int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
963{
964	int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
965	if (error)
966		goto err_exit;
967
968	if (blk_bidi_rq(cmd->request)) {
969		struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
970			scsi_sdb_cache, GFP_ATOMIC);
971		if (!bidi_sdb) {
972			error = BLKPREP_DEFER;
973			goto err_exit;
974		}
975
976		cmd->request->next_rq->special = bidi_sdb;
977		error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
978								    GFP_ATOMIC);
979		if (error)
980			goto err_exit;
981	}
982
983	if (blk_integrity_rq(cmd->request)) {
984		struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
985		int ivecs, count;
986
987		BUG_ON(prot_sdb == NULL);
988		ivecs = blk_rq_count_integrity_sg(cmd->request);
989
990		if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
991			error = BLKPREP_DEFER;
992			goto err_exit;
993		}
994
995		count = blk_rq_map_integrity_sg(cmd->request,
996						prot_sdb->table.sgl);
997		BUG_ON(unlikely(count > ivecs));
998
999		cmd->prot_sdb = prot_sdb;
1000		cmd->prot_sdb->table.nents = count;
1001	}
1002
1003	return BLKPREP_OK ;
1004
1005err_exit:
1006	scsi_release_buffers(cmd);
1007	if (error == BLKPREP_KILL)
1008		scsi_put_command(cmd);
1009	else /* BLKPREP_DEFER */
1010		scsi_unprep_request(cmd->request);
1011
1012	return error;
1013}
1014EXPORT_SYMBOL(scsi_init_io);
1015
1016static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1017		struct request *req)
1018{
1019	struct scsi_cmnd *cmd;
1020
1021	if (!req->special) {
1022		cmd = scsi_get_command(sdev, GFP_ATOMIC);
1023		if (unlikely(!cmd))
1024			return NULL;
1025		req->special = cmd;
1026	} else {
1027		cmd = req->special;
1028	}
1029
1030	/* pull a tag out of the request if we have one */
1031	cmd->tag = req->tag;
1032	cmd->request = req;
1033
1034	cmd->cmnd = req->cmd;
1035
1036	return cmd;
1037}
1038
1039int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1040{
1041	struct scsi_cmnd *cmd;
1042	int ret = scsi_prep_state_check(sdev, req);
1043
1044	if (ret != BLKPREP_OK)
1045		return ret;
1046
1047	cmd = scsi_get_cmd_from_req(sdev, req);
1048	if (unlikely(!cmd))
1049		return BLKPREP_DEFER;
1050
1051	/*
1052	 * BLOCK_PC requests may transfer data, in which case they must
1053	 * a bio attached to them.  Or they might contain a SCSI command
1054	 * that does not transfer data, in which case they may optionally
1055	 * submit a request without an attached bio.
1056	 */
1057	if (req->bio) {
1058		int ret;
1059
1060		BUG_ON(!req->nr_phys_segments);
1061
1062		ret = scsi_init_io(cmd, GFP_ATOMIC);
1063		if (unlikely(ret))
1064			return ret;
1065	} else {
1066		BUG_ON(blk_rq_bytes(req));
1067
1068		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1069		req->buffer = NULL;
1070	}
1071
1072	cmd->cmd_len = req->cmd_len;
1073	if (!blk_rq_bytes(req))
1074		cmd->sc_data_direction = DMA_NONE;
1075	else if (rq_data_dir(req) == WRITE)
1076		cmd->sc_data_direction = DMA_TO_DEVICE;
1077	else
1078		cmd->sc_data_direction = DMA_FROM_DEVICE;
1079
1080	cmd->transfersize = blk_rq_bytes(req);
1081	cmd->allowed = req->retries;
1082	return BLKPREP_OK;
1083}
1084EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1085
1086/*
1087 * Setup a REQ_TYPE_FS command.  These are simple read/write request
1088 * from filesystems that still need to be translated to SCSI CDBs from
1089 * the ULD.
1090 */
1091int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1092{
1093	struct scsi_cmnd *cmd;
1094	int ret = scsi_prep_state_check(sdev, req);
1095
1096	if (ret != BLKPREP_OK)
1097		return ret;
1098
1099	if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1100			 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1101		ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1102		if (ret != BLKPREP_OK)
1103			return ret;
1104	}
1105
1106	/*
1107	 * Filesystem requests must transfer data.
1108	 */
1109	BUG_ON(!req->nr_phys_segments);
1110
1111	cmd = scsi_get_cmd_from_req(sdev, req);
1112	if (unlikely(!cmd))
1113		return BLKPREP_DEFER;
1114
1115	memset(cmd->cmnd, 0, BLK_MAX_CDB);
1116	return scsi_init_io(cmd, GFP_ATOMIC);
1117}
1118EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1119
1120int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1121{
1122	int ret = BLKPREP_OK;
1123
1124	/*
1125	 * If the device is not in running state we will reject some
1126	 * or all commands.
1127	 */
1128	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1129		switch (sdev->sdev_state) {
1130		case SDEV_OFFLINE:
1131			/*
1132			 * If the device is offline we refuse to process any
1133			 * commands.  The device must be brought online
1134			 * before trying any recovery commands.
1135			 */
1136			sdev_printk(KERN_ERR, sdev,
1137				    "rejecting I/O to offline device\n");
1138			ret = BLKPREP_KILL;
1139			break;
1140		case SDEV_DEL:
1141			/*
1142			 * If the device is fully deleted, we refuse to
1143			 * process any commands as well.
1144			 */
1145			sdev_printk(KERN_ERR, sdev,
1146				    "rejecting I/O to dead device\n");
1147			ret = BLKPREP_KILL;
1148			break;
1149		case SDEV_QUIESCE:
1150		case SDEV_BLOCK:
1151		case SDEV_CREATED_BLOCK:
1152			/*
1153			 * If the devices is blocked we defer normal commands.
1154			 */
1155			if (!(req->cmd_flags & REQ_PREEMPT))
1156				ret = BLKPREP_DEFER;
1157			break;
1158		default:
1159			/*
1160			 * For any other not fully online state we only allow
1161			 * special commands.  In particular any user initiated
1162			 * command is not allowed.
1163			 */
1164			if (!(req->cmd_flags & REQ_PREEMPT))
1165				ret = BLKPREP_KILL;
1166			break;
1167		}
1168	}
1169	return ret;
1170}
1171EXPORT_SYMBOL(scsi_prep_state_check);
1172
1173int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1174{
1175	struct scsi_device *sdev = q->queuedata;
1176
1177	switch (ret) {
1178	case BLKPREP_KILL:
1179		req->errors = DID_NO_CONNECT << 16;
1180		/* release the command and kill it */
1181		if (req->special) {
1182			struct scsi_cmnd *cmd = req->special;
1183			scsi_release_buffers(cmd);
1184			scsi_put_command(cmd);
1185			req->special = NULL;
1186		}
1187		break;
1188	case BLKPREP_DEFER:
1189		/*
1190		 * If we defer, the blk_peek_request() returns NULL, but the
1191		 * queue must be restarted, so we plug here if no returning
1192		 * command will automatically do that.
1193		 */
1194		if (sdev->device_busy == 0)
1195			blk_plug_device(q);
1196		break;
1197	default:
1198		req->cmd_flags |= REQ_DONTPREP;
1199	}
1200
1201	return ret;
1202}
1203EXPORT_SYMBOL(scsi_prep_return);
1204
1205int scsi_prep_fn(struct request_queue *q, struct request *req)
1206{
1207	struct scsi_device *sdev = q->queuedata;
1208	int ret = BLKPREP_KILL;
1209
1210	if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1211		ret = scsi_setup_blk_pc_cmnd(sdev, req);
1212	return scsi_prep_return(q, req, ret);
1213}
1214EXPORT_SYMBOL(scsi_prep_fn);
1215
1216/*
1217 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1218 * return 0.
1219 *
1220 * Called with the queue_lock held.
1221 */
1222static inline int scsi_dev_queue_ready(struct request_queue *q,
1223				  struct scsi_device *sdev)
1224{
1225	if (sdev->device_busy == 0 && sdev->device_blocked) {
1226		/*
1227		 * unblock after device_blocked iterates to zero
1228		 */
1229		if (--sdev->device_blocked == 0) {
1230			SCSI_LOG_MLQUEUE(3,
1231				   sdev_printk(KERN_INFO, sdev,
1232				   "unblocking device at zero depth\n"));
1233		} else {
1234			blk_plug_device(q);
1235			return 0;
1236		}
1237	}
1238	if (scsi_device_is_busy(sdev))
1239		return 0;
1240
1241	return 1;
1242}
1243
1244
1245/*
1246 * scsi_target_queue_ready: checks if there we can send commands to target
1247 * @sdev: scsi device on starget to check.
1248 *
1249 * Called with the host lock held.
1250 */
1251static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1252					   struct scsi_device *sdev)
1253{
1254	struct scsi_target *starget = scsi_target(sdev);
1255
1256	if (starget->single_lun) {
1257		if (starget->starget_sdev_user &&
1258		    starget->starget_sdev_user != sdev)
1259			return 0;
1260		starget->starget_sdev_user = sdev;
1261	}
1262
1263	if (starget->target_busy == 0 && starget->target_blocked) {
1264		/*
1265		 * unblock after target_blocked iterates to zero
1266		 */
1267		if (--starget->target_blocked == 0) {
1268			SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1269					 "unblocking target at zero depth\n"));
1270		} else
1271			return 0;
1272	}
1273
1274	if (scsi_target_is_busy(starget)) {
1275		if (list_empty(&sdev->starved_entry)) {
1276			list_add_tail(&sdev->starved_entry,
1277				      &shost->starved_list);
1278			return 0;
1279		}
1280	}
1281
1282	/* We're OK to process the command, so we can't be starved */
1283	if (!list_empty(&sdev->starved_entry))
1284		list_del_init(&sdev->starved_entry);
1285	return 1;
1286}
1287
1288/*
1289 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1290 * return 0. We must end up running the queue again whenever 0 is
1291 * returned, else IO can hang.
1292 *
1293 * Called with host_lock held.
1294 */
1295static inline int scsi_host_queue_ready(struct request_queue *q,
1296				   struct Scsi_Host *shost,
1297				   struct scsi_device *sdev)
1298{
1299	if (scsi_host_in_recovery(shost))
1300		return 0;
1301	if (shost->host_busy == 0 && shost->host_blocked) {
1302		/*
1303		 * unblock after host_blocked iterates to zero
1304		 */
1305		if (--shost->host_blocked == 0) {
1306			SCSI_LOG_MLQUEUE(3,
1307				printk("scsi%d unblocking host at zero depth\n",
1308					shost->host_no));
1309		} else {
1310			return 0;
1311		}
1312	}
1313	if (scsi_host_is_busy(shost)) {
1314		if (list_empty(&sdev->starved_entry))
1315			list_add_tail(&sdev->starved_entry, &shost->starved_list);
1316		return 0;
1317	}
1318
1319	/* We're OK to process the command, so we can't be starved */
1320	if (!list_empty(&sdev->starved_entry))
1321		list_del_init(&sdev->starved_entry);
1322
1323	return 1;
1324}
1325
1326/*
1327 * Busy state exporting function for request stacking drivers.
1328 *
1329 * For efficiency, no lock is taken to check the busy state of
1330 * shost/starget/sdev, since the returned value is not guaranteed and
1331 * may be changed after request stacking drivers call the function,
1332 * regardless of taking lock or not.
1333 *
1334 * When scsi can't dispatch I/Os anymore and needs to kill I/Os
1335 * (e.g. !sdev), scsi needs to return 'not busy'.
1336 * Otherwise, request stacking drivers may hold requests forever.
1337 */
1338static int scsi_lld_busy(struct request_queue *q)
1339{
1340	struct scsi_device *sdev = q->queuedata;
1341	struct Scsi_Host *shost;
1342	struct scsi_target *starget;
1343
1344	if (!sdev)
1345		return 0;
1346
1347	shost = sdev->host;
1348	starget = scsi_target(sdev);
1349
1350	if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1351	    scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1352		return 1;
1353
1354	return 0;
1355}
1356
1357/*
1358 * Kill a request for a dead device
1359 */
1360static void scsi_kill_request(struct request *req, struct request_queue *q)
1361{
1362	struct scsi_cmnd *cmd = req->special;
1363	struct scsi_device *sdev;
1364	struct scsi_target *starget;
1365	struct Scsi_Host *shost;
1366
1367	blk_start_request(req);
1368
1369	if (unlikely(cmd == NULL)) {
1370		printk(KERN_CRIT "impossible request in %s.\n",
1371				 __func__);
1372		BUG();
1373	}
1374
1375	sdev = cmd->device;
1376	starget = scsi_target(sdev);
1377	shost = sdev->host;
1378	scsi_init_cmd_errh(cmd);
1379	cmd->result = DID_NO_CONNECT << 16;
1380	atomic_inc(&cmd->device->iorequest_cnt);
1381
1382	/*
1383	 * SCSI request completion path will do scsi_device_unbusy(),
1384	 * bump busy counts.  To bump the counters, we need to dance
1385	 * with the locks as normal issue path does.
1386	 */
1387	sdev->device_busy++;
1388	spin_unlock(sdev->request_queue->queue_lock);
1389	spin_lock(shost->host_lock);
1390	shost->host_busy++;
1391	starget->target_busy++;
1392	spin_unlock(shost->host_lock);
1393	spin_lock(sdev->request_queue->queue_lock);
1394
1395	blk_complete_request(req);
1396}
1397
1398static void scsi_softirq_done(struct request *rq)
1399{
1400	struct scsi_cmnd *cmd = rq->special;
1401	unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1402	int disposition;
1403
1404	INIT_LIST_HEAD(&cmd->eh_entry);
1405
1406	/*
1407	 * Set the serial numbers back to zero
1408	 */
1409	cmd->serial_number = 0;
1410
1411	atomic_inc(&cmd->device->iodone_cnt);
1412	if (cmd->result)
1413		atomic_inc(&cmd->device->ioerr_cnt);
1414
1415	disposition = scsi_decide_disposition(cmd);
1416	if (disposition != SUCCESS &&
1417	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1418		sdev_printk(KERN_ERR, cmd->device,
1419			    "timing out command, waited %lus\n",
1420			    wait_for/HZ);
1421		disposition = SUCCESS;
1422	}
1423
1424	scsi_log_completion(cmd, disposition);
1425
1426	switch (disposition) {
1427		case SUCCESS:
1428			scsi_finish_command(cmd);
1429			break;
1430		case NEEDS_RETRY:
1431			scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1432			break;
1433		case ADD_TO_MLQUEUE:
1434			scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1435			break;
1436		default:
1437			if (!scsi_eh_scmd_add(cmd, 0))
1438				scsi_finish_command(cmd);
1439	}
1440}
1441
1442/*
1443 * Function:    scsi_request_fn()
1444 *
1445 * Purpose:     Main strategy routine for SCSI.
1446 *
1447 * Arguments:   q       - Pointer to actual queue.
1448 *
1449 * Returns:     Nothing
1450 *
1451 * Lock status: IO request lock assumed to be held when called.
1452 */
1453static void scsi_request_fn(struct request_queue *q)
1454{
1455	struct scsi_device *sdev = q->queuedata;
1456	struct Scsi_Host *shost;
1457	struct scsi_cmnd *cmd;
1458	struct request *req;
1459
1460	if (!sdev) {
1461		printk("scsi: killing requests for dead queue\n");
1462		while ((req = blk_peek_request(q)) != NULL)
1463			scsi_kill_request(req, q);
1464		return;
1465	}
1466
1467	if(!get_device(&sdev->sdev_gendev))
1468		/* We must be tearing the block queue down already */
1469		return;
1470
1471	/*
1472	 * To start with, we keep looping until the queue is empty, or until
1473	 * the host is no longer able to accept any more requests.
1474	 */
1475	shost = sdev->host;
1476	while (!blk_queue_plugged(q)) {
1477		int rtn;
1478		/*
1479		 * get next queueable request.  We do this early to make sure
1480		 * that the request is fully prepared even if we cannot
1481		 * accept it.
1482		 */
1483		req = blk_peek_request(q);
1484		if (!req || !scsi_dev_queue_ready(q, sdev))
1485			break;
1486
1487		if (unlikely(!scsi_device_online(sdev))) {
1488			sdev_printk(KERN_ERR, sdev,
1489				    "rejecting I/O to offline device\n");
1490			scsi_kill_request(req, q);
1491			continue;
1492		}
1493
1494
1495		/*
1496		 * Remove the request from the request list.
1497		 */
1498		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1499			blk_start_request(req);
1500		sdev->device_busy++;
1501
1502		spin_unlock(q->queue_lock);
1503		cmd = req->special;
1504		if (unlikely(cmd == NULL)) {
1505			printk(KERN_CRIT "impossible request in %s.\n"
1506					 "please mail a stack trace to "
1507					 "linux-scsi@vger.kernel.org\n",
1508					 __func__);
1509			blk_dump_rq_flags(req, "foo");
1510			BUG();
1511		}
1512		spin_lock(shost->host_lock);
1513
1514		/*
1515		 * We hit this when the driver is using a host wide
1516		 * tag map. For device level tag maps the queue_depth check
1517		 * in the device ready fn would prevent us from trying
1518		 * to allocate a tag. Since the map is a shared host resource
1519		 * we add the dev to the starved list so it eventually gets
1520		 * a run when a tag is freed.
1521		 */
1522		if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1523			if (list_empty(&sdev->starved_entry))
1524				list_add_tail(&sdev->starved_entry,
1525					      &shost->starved_list);
1526			goto not_ready;
1527		}
1528
1529		if (!scsi_target_queue_ready(shost, sdev))
1530			goto not_ready;
1531
1532		if (!scsi_host_queue_ready(q, shost, sdev))
1533			goto not_ready;
1534
1535		scsi_target(sdev)->target_busy++;
1536		shost->host_busy++;
1537
1538		/*
1539		 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1540		 *		take the lock again.
1541		 */
1542		spin_unlock_irq(shost->host_lock);
1543
1544		/*
1545		 * Finally, initialize any error handling parameters, and set up
1546		 * the timers for timeouts.
1547		 */
1548		scsi_init_cmd_errh(cmd);
1549
1550		/*
1551		 * Dispatch the command to the low-level driver.
1552		 */
1553		rtn = scsi_dispatch_cmd(cmd);
1554		spin_lock_irq(q->queue_lock);
1555		if(rtn) {
1556			/* we're refusing the command; because of
1557			 * the way locks get dropped, we need to
1558			 * check here if plugging is required */
1559			if(sdev->device_busy == 0)
1560				blk_plug_device(q);
1561
1562			break;
1563		}
1564	}
1565
1566	goto out;
1567
1568 not_ready:
1569	spin_unlock_irq(shost->host_lock);
1570
1571	/*
1572	 * lock q, handle tag, requeue req, and decrement device_busy. We
1573	 * must return with queue_lock held.
1574	 *
1575	 * Decrementing device_busy without checking it is OK, as all such
1576	 * cases (host limits or settings) should run the queue at some
1577	 * later time.
1578	 */
1579	spin_lock_irq(q->queue_lock);
1580	blk_requeue_request(q, req);
1581	sdev->device_busy--;
1582	if(sdev->device_busy == 0)
1583		blk_plug_device(q);
1584 out:
1585	/* must be careful here...if we trigger the ->remove() function
1586	 * we cannot be holding the q lock */
1587	spin_unlock_irq(q->queue_lock);
1588	put_device(&sdev->sdev_gendev);
1589	spin_lock_irq(q->queue_lock);
1590}
1591
1592u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1593{
1594	struct device *host_dev;
1595	u64 bounce_limit = 0xffffffff;
1596
1597	if (shost->unchecked_isa_dma)
1598		return BLK_BOUNCE_ISA;
1599	/*
1600	 * Platforms with virtual-DMA translation
1601	 * hardware have no practical limit.
1602	 */
1603	if (!PCI_DMA_BUS_IS_PHYS)
1604		return BLK_BOUNCE_ANY;
1605
1606	host_dev = scsi_get_device(shost);
1607	if (host_dev && host_dev->dma_mask)
1608		bounce_limit = *host_dev->dma_mask;
1609
1610	return bounce_limit;
1611}
1612EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1613
1614struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1615					 request_fn_proc *request_fn)
1616{
1617	struct request_queue *q;
1618	struct device *dev = shost->shost_gendev.parent;
1619
1620	q = blk_init_queue(request_fn, NULL);
1621	if (!q)
1622		return NULL;
1623
1624	/*
1625	 * this limit is imposed by hardware restrictions
1626	 */
1627	blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1628					SCSI_MAX_SG_CHAIN_SEGMENTS));
1629
1630	blk_queue_max_hw_sectors(q, shost->max_sectors);
1631	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1632	blk_queue_segment_boundary(q, shost->dma_boundary);
1633	dma_set_seg_boundary(dev, shost->dma_boundary);
1634
1635	blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1636
1637	/* New queue, no concurrency on queue_flags */
1638	if (!shost->use_clustering)
1639		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1640
1641	/*
1642	 * set a reasonable default alignment on word boundaries: the
1643	 * host and device may alter it using
1644	 * blk_queue_update_dma_alignment() later.
1645	 */
1646	blk_queue_dma_alignment(q, 0x03);
1647
1648	return q;
1649}
1650EXPORT_SYMBOL(__scsi_alloc_queue);
1651
1652struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1653{
1654	struct request_queue *q;
1655
1656	q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1657	if (!q)
1658		return NULL;
1659
1660	blk_queue_prep_rq(q, scsi_prep_fn);
1661	blk_queue_softirq_done(q, scsi_softirq_done);
1662	blk_queue_rq_timed_out(q, scsi_times_out);
1663	blk_queue_lld_busy(q, scsi_lld_busy);
1664	return q;
1665}
1666
1667void scsi_free_queue(struct request_queue *q)
1668{
1669	blk_cleanup_queue(q);
1670}
1671
1672/*
1673 * Function:    scsi_block_requests()
1674 *
1675 * Purpose:     Utility function used by low-level drivers to prevent further
1676 *		commands from being queued to the device.
1677 *
1678 * Arguments:   shost       - Host in question
1679 *
1680 * Returns:     Nothing
1681 *
1682 * Lock status: No locks are assumed held.
1683 *
1684 * Notes:       There is no timer nor any other means by which the requests
1685 *		get unblocked other than the low-level driver calling
1686 *		scsi_unblock_requests().
1687 */
1688void scsi_block_requests(struct Scsi_Host *shost)
1689{
1690	shost->host_self_blocked = 1;
1691}
1692EXPORT_SYMBOL(scsi_block_requests);
1693
1694/*
1695 * Function:    scsi_unblock_requests()
1696 *
1697 * Purpose:     Utility function used by low-level drivers to allow further
1698 *		commands from being queued to the device.
1699 *
1700 * Arguments:   shost       - Host in question
1701 *
1702 * Returns:     Nothing
1703 *
1704 * Lock status: No locks are assumed held.
1705 *
1706 * Notes:       There is no timer nor any other means by which the requests
1707 *		get unblocked other than the low-level driver calling
1708 *		scsi_unblock_requests().
1709 *
1710 *		This is done as an API function so that changes to the
1711 *		internals of the scsi mid-layer won't require wholesale
1712 *		changes to drivers that use this feature.
1713 */
1714void scsi_unblock_requests(struct Scsi_Host *shost)
1715{
1716	shost->host_self_blocked = 0;
1717	scsi_run_host_queues(shost);
1718}
1719EXPORT_SYMBOL(scsi_unblock_requests);
1720
1721int __init scsi_init_queue(void)
1722{
1723	int i;
1724
1725	scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1726					   sizeof(struct scsi_data_buffer),
1727					   0, 0, NULL);
1728	if (!scsi_sdb_cache) {
1729		printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1730		return -ENOMEM;
1731	}
1732
1733	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1734		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1735		int size = sgp->size * sizeof(struct scatterlist);
1736
1737		sgp->slab = kmem_cache_create(sgp->name, size, 0,
1738				SLAB_HWCACHE_ALIGN, NULL);
1739		if (!sgp->slab) {
1740			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1741					sgp->name);
1742			goto cleanup_sdb;
1743		}
1744
1745		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1746						     sgp->slab);
1747		if (!sgp->pool) {
1748			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1749					sgp->name);
1750			goto cleanup_sdb;
1751		}
1752	}
1753
1754	return 0;
1755
1756cleanup_sdb:
1757	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1758		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1759		if (sgp->pool)
1760			mempool_destroy(sgp->pool);
1761		if (sgp->slab)
1762			kmem_cache_destroy(sgp->slab);
1763	}
1764	kmem_cache_destroy(scsi_sdb_cache);
1765
1766	return -ENOMEM;
1767}
1768
1769void scsi_exit_queue(void)
1770{
1771	int i;
1772
1773	kmem_cache_destroy(scsi_sdb_cache);
1774
1775	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1776		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1777		mempool_destroy(sgp->pool);
1778		kmem_cache_destroy(sgp->slab);
1779	}
1780}
1781
1782/**
1783 *	scsi_mode_select - issue a mode select
1784 *	@sdev:	SCSI device to be queried
1785 *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
1786 *	@sp:	Save page bit (0 == don't save, 1 == save)
1787 *	@modepage: mode page being requested
1788 *	@buffer: request buffer (may not be smaller than eight bytes)
1789 *	@len:	length of request buffer.
1790 *	@timeout: command timeout
1791 *	@retries: number of retries before failing
1792 *	@data: returns a structure abstracting the mode header data
1793 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
1794 *		must be SCSI_SENSE_BUFFERSIZE big.
1795 *
1796 *	Returns zero if successful; negative error number or scsi
1797 *	status on error
1798 *
1799 */
1800int
1801scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1802		 unsigned char *buffer, int len, int timeout, int retries,
1803		 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1804{
1805	unsigned char cmd[10];
1806	unsigned char *real_buffer;
1807	int ret;
1808
1809	memset(cmd, 0, sizeof(cmd));
1810	cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1811
1812	if (sdev->use_10_for_ms) {
1813		if (len > 65535)
1814			return -EINVAL;
1815		real_buffer = kmalloc(8 + len, GFP_KERNEL);
1816		if (!real_buffer)
1817			return -ENOMEM;
1818		memcpy(real_buffer + 8, buffer, len);
1819		len += 8;
1820		real_buffer[0] = 0;
1821		real_buffer[1] = 0;
1822		real_buffer[2] = data->medium_type;
1823		real_buffer[3] = data->device_specific;
1824		real_buffer[4] = data->longlba ? 0x01 : 0;
1825		real_buffer[5] = 0;
1826		real_buffer[6] = data->block_descriptor_length >> 8;
1827		real_buffer[7] = data->block_descriptor_length;
1828
1829		cmd[0] = MODE_SELECT_10;
1830		cmd[7] = len >> 8;
1831		cmd[8] = len;
1832	} else {
1833		if (len > 255 || data->block_descriptor_length > 255 ||
1834		    data->longlba)
1835			return -EINVAL;
1836
1837		real_buffer = kmalloc(4 + len, GFP_KERNEL);
1838		if (!real_buffer)
1839			return -ENOMEM;
1840		memcpy(real_buffer + 4, buffer, len);
1841		len += 4;
1842		real_buffer[0] = 0;
1843		real_buffer[1] = data->medium_type;
1844		real_buffer[2] = data->device_specific;
1845		real_buffer[3] = data->block_descriptor_length;
1846
1847
1848		cmd[0] = MODE_SELECT;
1849		cmd[4] = len;
1850	}
1851
1852	ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1853			       sshdr, timeout, retries, NULL);
1854	kfree(real_buffer);
1855	return ret;
1856}
1857EXPORT_SYMBOL_GPL(scsi_mode_select);
1858
1859/**
1860 *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1861 *	@sdev:	SCSI device to be queried
1862 *	@dbd:	set if mode sense will allow block descriptors to be returned
1863 *	@modepage: mode page being requested
1864 *	@buffer: request buffer (may not be smaller than eight bytes)
1865 *	@len:	length of request buffer.
1866 *	@timeout: command timeout
1867 *	@retries: number of retries before failing
1868 *	@data: returns a structure abstracting the mode header data
1869 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
1870 *		must be SCSI_SENSE_BUFFERSIZE big.
1871 *
1872 *	Returns zero if unsuccessful, or the header offset (either 4
1873 *	or 8 depending on whether a six or ten byte command was
1874 *	issued) if successful.
1875 */
1876int
1877scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1878		  unsigned char *buffer, int len, int timeout, int retries,
1879		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1880{
1881	unsigned char cmd[12];
1882	int use_10_for_ms;
1883	int header_length;
1884	int result;
1885	struct scsi_sense_hdr my_sshdr;
1886
1887	memset(data, 0, sizeof(*data));
1888	memset(&cmd[0], 0, 12);
1889	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
1890	cmd[2] = modepage;
1891
1892	/* caller might not be interested in sense, but we need it */
1893	if (!sshdr)
1894		sshdr = &my_sshdr;
1895
1896 retry:
1897	use_10_for_ms = sdev->use_10_for_ms;
1898
1899	if (use_10_for_ms) {
1900		if (len < 8)
1901			len = 8;
1902
1903		cmd[0] = MODE_SENSE_10;
1904		cmd[8] = len;
1905		header_length = 8;
1906	} else {
1907		if (len < 4)
1908			len = 4;
1909
1910		cmd[0] = MODE_SENSE;
1911		cmd[4] = len;
1912		header_length = 4;
1913	}
1914
1915	memset(buffer, 0, len);
1916
1917	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1918				  sshdr, timeout, retries, NULL);
1919
1920	/* This code looks awful: what it's doing is making sure an
1921	 * ILLEGAL REQUEST sense return identifies the actual command
1922	 * byte as the problem.  MODE_SENSE commands can return
1923	 * ILLEGAL REQUEST if the code page isn't supported */
1924
1925	if (use_10_for_ms && !scsi_status_is_good(result) &&
1926	    (driver_byte(result) & DRIVER_SENSE)) {
1927		if (scsi_sense_valid(sshdr)) {
1928			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1929			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1930				/*
1931				 * Invalid command operation code
1932				 */
1933				sdev->use_10_for_ms = 0;
1934				goto retry;
1935			}
1936		}
1937	}
1938
1939	if(scsi_status_is_good(result)) {
1940		if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1941			     (modepage == 6 || modepage == 8))) {
1942			/* Initio breakage? */
1943			header_length = 0;
1944			data->length = 13;
1945			data->medium_type = 0;
1946			data->device_specific = 0;
1947			data->longlba = 0;
1948			data->block_descriptor_length = 0;
1949		} else if(use_10_for_ms) {
1950			data->length = buffer[0]*256 + buffer[1] + 2;
1951			data->medium_type = buffer[2];
1952			data->device_specific = buffer[3];
1953			data->longlba = buffer[4] & 0x01;
1954			data->block_descriptor_length = buffer[6]*256
1955				+ buffer[7];
1956		} else {
1957			data->length = buffer[0] + 1;
1958			data->medium_type = buffer[1];
1959			data->device_specific = buffer[2];
1960			data->block_descriptor_length = buffer[3];
1961		}
1962		data->header_length = header_length;
1963	}
1964
1965	return result;
1966}
1967EXPORT_SYMBOL(scsi_mode_sense);
1968
1969/**
1970 *	scsi_test_unit_ready - test if unit is ready
1971 *	@sdev:	scsi device to change the state of.
1972 *	@timeout: command timeout
1973 *	@retries: number of retries before failing
1974 *	@sshdr_external: Optional pointer to struct scsi_sense_hdr for
1975 *		returning sense. Make sure that this is cleared before passing
1976 *		in.
1977 *
1978 *	Returns zero if unsuccessful or an error if TUR failed.  For
1979 *	removable media, a return of NOT_READY or UNIT_ATTENTION is
1980 *	translated to success, with the ->changed flag updated.
1981 **/
1982int
1983scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
1984		     struct scsi_sense_hdr *sshdr_external)
1985{
1986	char cmd[] = {
1987		TEST_UNIT_READY, 0, 0, 0, 0, 0,
1988	};
1989	struct scsi_sense_hdr *sshdr;
1990	int result;
1991
1992	if (!sshdr_external)
1993		sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1994	else
1995		sshdr = sshdr_external;
1996
1997	/* try to eat the UNIT_ATTENTION if there are enough retries */
1998	do {
1999		result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2000					  timeout, retries, NULL);
2001		if (sdev->removable && scsi_sense_valid(sshdr) &&
2002		    sshdr->sense_key == UNIT_ATTENTION)
2003			sdev->changed = 1;
2004	} while (scsi_sense_valid(sshdr) &&
2005		 sshdr->sense_key == UNIT_ATTENTION && --retries);
2006
2007	if (!sshdr)
2008		/* could not allocate sense buffer, so can't process it */
2009		return result;
2010
2011	if (sdev->removable && scsi_sense_valid(sshdr) &&
2012	    (sshdr->sense_key == UNIT_ATTENTION ||
2013	     sshdr->sense_key == NOT_READY)) {
2014		sdev->changed = 1;
2015		result = 0;
2016	}
2017	if (!sshdr_external)
2018		kfree(sshdr);
2019	return result;
2020}
2021EXPORT_SYMBOL(scsi_test_unit_ready);
2022
2023/**
2024 *	scsi_device_set_state - Take the given device through the device state model.
2025 *	@sdev:	scsi device to change the state of.
2026 *	@state:	state to change to.
2027 *
2028 *	Returns zero if unsuccessful or an error if the requested
2029 *	transition is illegal.
2030 */
2031int
2032scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2033{
2034	enum scsi_device_state oldstate = sdev->sdev_state;
2035
2036	if (state == oldstate)
2037		return 0;
2038
2039	switch (state) {
2040	case SDEV_CREATED:
2041		switch (oldstate) {
2042		case SDEV_CREATED_BLOCK:
2043			break;
2044		default:
2045			goto illegal;
2046		}
2047		break;
2048
2049	case SDEV_RUNNING:
2050		switch (oldstate) {
2051		case SDEV_CREATED:
2052		case SDEV_OFFLINE:
2053		case SDEV_QUIESCE:
2054		case SDEV_BLOCK:
2055			break;
2056		default:
2057			goto illegal;
2058		}
2059		break;
2060
2061	case SDEV_QUIESCE:
2062		switch (oldstate) {
2063		case SDEV_RUNNING:
2064		case SDEV_OFFLINE:
2065			break;
2066		default:
2067			goto illegal;
2068		}
2069		break;
2070
2071	case SDEV_OFFLINE:
2072		switch (oldstate) {
2073		case SDEV_CREATED:
2074		case SDEV_RUNNING:
2075		case SDEV_QUIESCE:
2076		case SDEV_BLOCK:
2077			break;
2078		default:
2079			goto illegal;
2080		}
2081		break;
2082
2083	case SDEV_BLOCK:
2084		switch (oldstate) {
2085		case SDEV_RUNNING:
2086		case SDEV_CREATED_BLOCK:
2087			break;
2088		default:
2089			goto illegal;
2090		}
2091		break;
2092
2093	case SDEV_CREATED_BLOCK:
2094		switch (oldstate) {
2095		case SDEV_CREATED:
2096			break;
2097		default:
2098			goto illegal;
2099		}
2100		break;
2101
2102	case SDEV_CANCEL:
2103		switch (oldstate) {
2104		case SDEV_CREATED:
2105		case SDEV_RUNNING:
2106		case SDEV_QUIESCE:
2107		case SDEV_OFFLINE:
2108		case SDEV_BLOCK:
2109			break;
2110		default:
2111			goto illegal;
2112		}
2113		break;
2114
2115	case SDEV_DEL:
2116		switch (oldstate) {
2117		case SDEV_CREATED:
2118		case SDEV_RUNNING:
2119		case SDEV_OFFLINE:
2120		case SDEV_CANCEL:
2121			break;
2122		default:
2123			goto illegal;
2124		}
2125		break;
2126
2127	}
2128	sdev->sdev_state = state;
2129	return 0;
2130
2131 illegal:
2132	SCSI_LOG_ERROR_RECOVERY(1,
2133				sdev_printk(KERN_ERR, sdev,
2134					    "Illegal state transition %s->%s\n",
2135					    scsi_device_state_name(oldstate),
2136					    scsi_device_state_name(state))
2137				);
2138	return -EINVAL;
2139}
2140EXPORT_SYMBOL(scsi_device_set_state);
2141
2142/**
2143 * 	sdev_evt_emit - emit a single SCSI device uevent
2144 *	@sdev: associated SCSI device
2145 *	@evt: event to emit
2146 *
2147 *	Send a single uevent (scsi_event) to the associated scsi_device.
2148 */
2149static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2150{
2151	int idx = 0;
2152	char *envp[3];
2153
2154	switch (evt->evt_type) {
2155	case SDEV_EVT_MEDIA_CHANGE:
2156		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2157		break;
2158
2159	default:
2160		/* do nothing */
2161		break;
2162	}
2163
2164	envp[idx++] = NULL;
2165
2166	kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2167}
2168
2169/**
2170 * 	sdev_evt_thread - send a uevent for each scsi event
2171 *	@work: work struct for scsi_device
2172 *
2173 *	Dispatch queued events to their associated scsi_device kobjects
2174 *	as uevents.
2175 */
2176void scsi_evt_thread(struct work_struct *work)
2177{
2178	struct scsi_device *sdev;
2179	LIST_HEAD(event_list);
2180
2181	sdev = container_of(work, struct scsi_device, event_work);
2182
2183	while (1) {
2184		struct scsi_event *evt;
2185		struct list_head *this, *tmp;
2186		unsigned long flags;
2187
2188		spin_lock_irqsave(&sdev->list_lock, flags);
2189		list_splice_init(&sdev->event_list, &event_list);
2190		spin_unlock_irqrestore(&sdev->list_lock, flags);
2191
2192		if (list_empty(&event_list))
2193			break;
2194
2195		list_for_each_safe(this, tmp, &event_list) {
2196			evt = list_entry(this, struct scsi_event, node);
2197			list_del(&evt->node);
2198			scsi_evt_emit(sdev, evt);
2199			kfree(evt);
2200		}
2201	}
2202}
2203
2204/**
2205 * 	sdev_evt_send - send asserted event to uevent thread
2206 *	@sdev: scsi_device event occurred on
2207 *	@evt: event to send
2208 *
2209 *	Assert scsi device event asynchronously.
2210 */
2211void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2212{
2213	unsigned long flags;
2214
2215#if 0
2216	/* FIXME: currently this check eliminates all media change events
2217	 * for polled devices.  Need to update to discriminate between AN
2218	 * and polled events */
2219	if (!test_bit(evt->evt_type, sdev->supported_events)) {
2220		kfree(evt);
2221		return;
2222	}
2223#endif
2224
2225	spin_lock_irqsave(&sdev->list_lock, flags);
2226	list_add_tail(&evt->node, &sdev->event_list);
2227	schedule_work(&sdev->event_work);
2228	spin_unlock_irqrestore(&sdev->list_lock, flags);
2229}
2230EXPORT_SYMBOL_GPL(sdev_evt_send);
2231
2232/**
2233 * 	sdev_evt_alloc - allocate a new scsi event
2234 *	@evt_type: type of event to allocate
2235 *	@gfpflags: GFP flags for allocation
2236 *
2237 *	Allocates and returns a new scsi_event.
2238 */
2239struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2240				  gfp_t gfpflags)
2241{
2242	struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2243	if (!evt)
2244		return NULL;
2245
2246	evt->evt_type = evt_type;
2247	INIT_LIST_HEAD(&evt->node);
2248
2249	/* evt_type-specific initialization, if any */
2250	switch (evt_type) {
2251	case SDEV_EVT_MEDIA_CHANGE:
2252	default:
2253		/* do nothing */
2254		break;
2255	}
2256
2257	return evt;
2258}
2259EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2260
2261/**
2262 * 	sdev_evt_send_simple - send asserted event to uevent thread
2263 *	@sdev: scsi_device event occurred on
2264 *	@evt_type: type of event to send
2265 *	@gfpflags: GFP flags for allocation
2266 *
2267 *	Assert scsi device event asynchronously, given an event type.
2268 */
2269void sdev_evt_send_simple(struct scsi_device *sdev,
2270			  enum scsi_device_event evt_type, gfp_t gfpflags)
2271{
2272	struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2273	if (!evt) {
2274		sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2275			    evt_type);
2276		return;
2277	}
2278
2279	sdev_evt_send(sdev, evt);
2280}
2281EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2282
2283/**
2284 *	scsi_device_quiesce - Block user issued commands.
2285 *	@sdev:	scsi device to quiesce.
2286 *
2287 *	This works by trying to transition to the SDEV_QUIESCE state
2288 *	(which must be a legal transition).  When the device is in this
2289 *	state, only special requests will be accepted, all others will
2290 *	be deferred.  Since special requests may also be requeued requests,
2291 *	a successful return doesn't guarantee the device will be
2292 *	totally quiescent.
2293 *
2294 *	Must be called with user context, may sleep.
2295 *
2296 *	Returns zero if unsuccessful or an error if not.
2297 */
2298int
2299scsi_device_quiesce(struct scsi_device *sdev)
2300{
2301	int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2302	if (err)
2303		return err;
2304
2305	scsi_run_queue(sdev->request_queue);
2306	while (sdev->device_busy) {
2307		msleep_interruptible(200);
2308		scsi_run_queue(sdev->request_queue);
2309	}
2310	return 0;
2311}
2312EXPORT_SYMBOL(scsi_device_quiesce);
2313
2314/**
2315 *	scsi_device_resume - Restart user issued commands to a quiesced device.
2316 *	@sdev:	scsi device to resume.
2317 *
2318 *	Moves the device from quiesced back to running and restarts the
2319 *	queues.
2320 *
2321 *	Must be called with user context, may sleep.
2322 */
2323void
2324scsi_device_resume(struct scsi_device *sdev)
2325{
2326	if(scsi_device_set_state(sdev, SDEV_RUNNING))
2327		return;
2328	scsi_run_queue(sdev->request_queue);
2329}
2330EXPORT_SYMBOL(scsi_device_resume);
2331
2332static void
2333device_quiesce_fn(struct scsi_device *sdev, void *data)
2334{
2335	scsi_device_quiesce(sdev);
2336}
2337
2338void
2339scsi_target_quiesce(struct scsi_target *starget)
2340{
2341	starget_for_each_device(starget, NULL, device_quiesce_fn);
2342}
2343EXPORT_SYMBOL(scsi_target_quiesce);
2344
2345static void
2346device_resume_fn(struct scsi_device *sdev, void *data)
2347{
2348	scsi_device_resume(sdev);
2349}
2350
2351void
2352scsi_target_resume(struct scsi_target *starget)
2353{
2354	starget_for_each_device(starget, NULL, device_resume_fn);
2355}
2356EXPORT_SYMBOL(scsi_target_resume);
2357
2358/**
2359 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2360 * @sdev:	device to block
2361 *
2362 * Block request made by scsi lld's to temporarily stop all
2363 * scsi commands on the specified device.  Called from interrupt
2364 * or normal process context.
2365 *
2366 * Returns zero if successful or error if not
2367 *
2368 * Notes:
2369 *	This routine transitions the device to the SDEV_BLOCK state
2370 *	(which must be a legal transition).  When the device is in this
2371 *	state, all commands are deferred until the scsi lld reenables
2372 *	the device with scsi_device_unblock or device_block_tmo fires.
2373 *	This routine assumes the host_lock is held on entry.
2374 */
2375int
2376scsi_internal_device_block(struct scsi_device *sdev)
2377{
2378	struct request_queue *q = sdev->request_queue;
2379	unsigned long flags;
2380	int err = 0;
2381
2382	err = scsi_device_set_state(sdev, SDEV_BLOCK);
2383	if (err) {
2384		err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2385
2386		if (err)
2387			return err;
2388	}
2389
2390	/*
2391	 * The device has transitioned to SDEV_BLOCK.  Stop the
2392	 * block layer from calling the midlayer with this device's
2393	 * request queue.
2394	 */
2395	spin_lock_irqsave(q->queue_lock, flags);
2396	blk_stop_queue(q);
2397	spin_unlock_irqrestore(q->queue_lock, flags);
2398
2399	return 0;
2400}
2401EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2402
2403/**
2404 * scsi_internal_device_unblock - resume a device after a block request
2405 * @sdev:	device to resume
2406 *
2407 * Called by scsi lld's or the midlayer to restart the device queue
2408 * for the previously suspended scsi device.  Called from interrupt or
2409 * normal process context.
2410 *
2411 * Returns zero if successful or error if not.
2412 *
2413 * Notes:
2414 *	This routine transitions the device to the SDEV_RUNNING state
2415 *	(which must be a legal transition) allowing the midlayer to
2416 *	goose the queue for this device.  This routine assumes the
2417 *	host_lock is held upon entry.
2418 */
2419int
2420scsi_internal_device_unblock(struct scsi_device *sdev)
2421{
2422	struct request_queue *q = sdev->request_queue;
2423	unsigned long flags;
2424
2425	/*
2426	 * Try to transition the scsi device to SDEV_RUNNING
2427	 * and goose the device queue if successful.
2428	 */
2429	if (sdev->sdev_state == SDEV_BLOCK)
2430		sdev->sdev_state = SDEV_RUNNING;
2431	else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2432		sdev->sdev_state = SDEV_CREATED;
2433	else
2434		return -EINVAL;
2435
2436	spin_lock_irqsave(q->queue_lock, flags);
2437	blk_start_queue(q);
2438	spin_unlock_irqrestore(q->queue_lock, flags);
2439
2440	return 0;
2441}
2442EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2443
2444static void
2445device_block(struct scsi_device *sdev, void *data)
2446{
2447	scsi_internal_device_block(sdev);
2448}
2449
2450static int
2451target_block(struct device *dev, void *data)
2452{
2453	if (scsi_is_target_device(dev))
2454		starget_for_each_device(to_scsi_target(dev), NULL,
2455					device_block);
2456	return 0;
2457}
2458
2459void
2460scsi_target_block(struct device *dev)
2461{
2462	if (scsi_is_target_device(dev))
2463		starget_for_each_device(to_scsi_target(dev), NULL,
2464					device_block);
2465	else
2466		device_for_each_child(dev, NULL, target_block);
2467}
2468EXPORT_SYMBOL_GPL(scsi_target_block);
2469
2470static void
2471device_unblock(struct scsi_device *sdev, void *data)
2472{
2473	scsi_internal_device_unblock(sdev);
2474}
2475
2476static int
2477target_unblock(struct device *dev, void *data)
2478{
2479	if (scsi_is_target_device(dev))
2480		starget_for_each_device(to_scsi_target(dev), NULL,
2481					device_unblock);
2482	return 0;
2483}
2484
2485void
2486scsi_target_unblock(struct device *dev)
2487{
2488	if (scsi_is_target_device(dev))
2489		starget_for_each_device(to_scsi_target(dev), NULL,
2490					device_unblock);
2491	else
2492		device_for_each_child(dev, NULL, target_unblock);
2493}
2494EXPORT_SYMBOL_GPL(scsi_target_unblock);
2495
2496/**
2497 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2498 * @sgl:	scatter-gather list
2499 * @sg_count:	number of segments in sg
2500 * @offset:	offset in bytes into sg, on return offset into the mapped area
2501 * @len:	bytes to map, on return number of bytes mapped
2502 *
2503 * Returns virtual address of the start of the mapped page
2504 */
2505void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2506			  size_t *offset, size_t *len)
2507{
2508	int i;
2509	size_t sg_len = 0, len_complete = 0;
2510	struct scatterlist *sg;
2511	struct page *page;
2512
2513	WARN_ON(!irqs_disabled());
2514
2515	for_each_sg(sgl, sg, sg_count, i) {
2516		len_complete = sg_len; /* Complete sg-entries */
2517		sg_len += sg->length;
2518		if (sg_len > *offset)
2519			break;
2520	}
2521
2522	if (unlikely(i == sg_count)) {
2523		printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2524			"elements %d\n",
2525		       __func__, sg_len, *offset, sg_count);
2526		WARN_ON(1);
2527		return NULL;
2528	}
2529
2530	/* Offset starting from the beginning of first page in this sg-entry */
2531	*offset = *offset - len_complete + sg->offset;
2532
2533	/* Assumption: contiguous pages can be accessed as "page + i" */
2534	page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2535	*offset &= ~PAGE_MASK;
2536
2537	/* Bytes in this sg-entry from *offset to the end of the page */
2538	sg_len = PAGE_SIZE - *offset;
2539	if (*len > sg_len)
2540		*len = sg_len;
2541
2542	return kmap_atomic(page, KM_BIO_SRC_IRQ);
2543}
2544EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2545
2546/**
2547 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2548 * @virt:	virtual address to be unmapped
2549 */
2550void scsi_kunmap_atomic_sg(void *virt)
2551{
2552	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2553}
2554EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2555