scsi_lib.c revision da6c5c720c52cc717124f8f0830b710ea6a092fd
1/*
2 *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 *  SCSI queueing library.
5 *      Initial versions: Eric Youngdale (eric@andante.org).
6 *                        Based upon conversations with large numbers
7 *                        of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
11#include <linux/bitops.h>
12#include <linux/blkdev.h>
13#include <linux/completion.h>
14#include <linux/kernel.h>
15#include <linux/mempool.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/hardirq.h>
21#include <linux/scatterlist.h>
22
23#include <scsi/scsi.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_dbg.h>
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_driver.h>
28#include <scsi/scsi_eh.h>
29#include <scsi/scsi_host.h>
30
31#include "scsi_priv.h"
32#include "scsi_logging.h"
33
34
35#define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)
36#define SG_MEMPOOL_SIZE		2
37
38struct scsi_host_sg_pool {
39	size_t		size;
40	char		*name;
41	struct kmem_cache	*slab;
42	mempool_t	*pool;
43};
44
45#define SP(x) { x, "sgpool-" __stringify(x) }
46#if (SCSI_MAX_SG_SEGMENTS < 32)
47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48#endif
49static struct scsi_host_sg_pool scsi_sg_pools[] = {
50	SP(8),
51	SP(16),
52#if (SCSI_MAX_SG_SEGMENTS > 32)
53	SP(32),
54#if (SCSI_MAX_SG_SEGMENTS > 64)
55	SP(64),
56#if (SCSI_MAX_SG_SEGMENTS > 128)
57	SP(128),
58#if (SCSI_MAX_SG_SEGMENTS > 256)
59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
60#endif
61#endif
62#endif
63#endif
64	SP(SCSI_MAX_SG_SEGMENTS)
65};
66#undef SP
67
68struct kmem_cache *scsi_sdb_cache;
69
70static void scsi_run_queue(struct request_queue *q);
71
72/*
73 * Function:	scsi_unprep_request()
74 *
75 * Purpose:	Remove all preparation done for a request, including its
76 *		associated scsi_cmnd, so that it can be requeued.
77 *
78 * Arguments:	req	- request to unprepare
79 *
80 * Lock status:	Assumed that no locks are held upon entry.
81 *
82 * Returns:	Nothing.
83 */
84static void scsi_unprep_request(struct request *req)
85{
86	struct scsi_cmnd *cmd = req->special;
87
88	req->cmd_flags &= ~REQ_DONTPREP;
89	req->special = NULL;
90
91	scsi_put_command(cmd);
92}
93
94/**
95 * __scsi_queue_insert - private queue insertion
96 * @cmd: The SCSI command being requeued
97 * @reason:  The reason for the requeue
98 * @unbusy: Whether the queue should be unbusied
99 *
100 * This is a private queue insertion.  The public interface
101 * scsi_queue_insert() always assumes the queue should be unbusied
102 * because it's always called before the completion.  This function is
103 * for a requeue after completion, which should only occur in this
104 * file.
105 */
106static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
107{
108	struct Scsi_Host *host = cmd->device->host;
109	struct scsi_device *device = cmd->device;
110	struct scsi_target *starget = scsi_target(device);
111	struct request_queue *q = device->request_queue;
112	unsigned long flags;
113
114	SCSI_LOG_MLQUEUE(1,
115		 printk("Inserting command %p into mlqueue\n", cmd));
116
117	/*
118	 * Set the appropriate busy bit for the device/host.
119	 *
120	 * If the host/device isn't busy, assume that something actually
121	 * completed, and that we should be able to queue a command now.
122	 *
123	 * Note that the prior mid-layer assumption that any host could
124	 * always queue at least one command is now broken.  The mid-layer
125	 * will implement a user specifiable stall (see
126	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
127	 * if a command is requeued with no other commands outstanding
128	 * either for the device or for the host.
129	 */
130	switch (reason) {
131	case SCSI_MLQUEUE_HOST_BUSY:
132		host->host_blocked = host->max_host_blocked;
133		break;
134	case SCSI_MLQUEUE_DEVICE_BUSY:
135		device->device_blocked = device->max_device_blocked;
136		break;
137	case SCSI_MLQUEUE_TARGET_BUSY:
138		starget->target_blocked = starget->max_target_blocked;
139		break;
140	}
141
142	/*
143	 * Decrement the counters, since these commands are no longer
144	 * active on the host/device.
145	 */
146	if (unbusy)
147		scsi_device_unbusy(device);
148
149	/*
150	 * Requeue this command.  It will go before all other commands
151	 * that are already in the queue.
152	 *
153	 * NOTE: there is magic here about the way the queue is plugged if
154	 * we have no outstanding commands.
155	 *
156	 * Although we *don't* plug the queue, we call the request
157	 * function.  The SCSI request function detects the blocked condition
158	 * and plugs the queue appropriately.
159         */
160	spin_lock_irqsave(q->queue_lock, flags);
161	blk_requeue_request(q, cmd->request);
162	spin_unlock_irqrestore(q->queue_lock, flags);
163
164	scsi_run_queue(q);
165
166	return 0;
167}
168
169/*
170 * Function:    scsi_queue_insert()
171 *
172 * Purpose:     Insert a command in the midlevel queue.
173 *
174 * Arguments:   cmd    - command that we are adding to queue.
175 *              reason - why we are inserting command to queue.
176 *
177 * Lock status: Assumed that lock is not held upon entry.
178 *
179 * Returns:     Nothing.
180 *
181 * Notes:       We do this for one of two cases.  Either the host is busy
182 *              and it cannot accept any more commands for the time being,
183 *              or the device returned QUEUE_FULL and can accept no more
184 *              commands.
185 * Notes:       This could be called either from an interrupt context or a
186 *              normal process context.
187 */
188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189{
190	return __scsi_queue_insert(cmd, reason, 1);
191}
192/**
193 * scsi_execute - insert request and wait for the result
194 * @sdev:	scsi device
195 * @cmd:	scsi command
196 * @data_direction: data direction
197 * @buffer:	data buffer
198 * @bufflen:	len of buffer
199 * @sense:	optional sense buffer
200 * @timeout:	request timeout in seconds
201 * @retries:	number of times to retry request
202 * @flags:	or into request flags;
203 * @resid:	optional residual length
204 *
205 * returns the req->errors value which is the scsi_cmnd result
206 * field.
207 */
208int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
209		 int data_direction, void *buffer, unsigned bufflen,
210		 unsigned char *sense, int timeout, int retries, int flags,
211		 int *resid)
212{
213	struct request *req;
214	int write = (data_direction == DMA_TO_DEVICE);
215	int ret = DRIVER_ERROR << 24;
216
217	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
218
219	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
220					buffer, bufflen, __GFP_WAIT))
221		goto out;
222
223	req->cmd_len = COMMAND_SIZE(cmd[0]);
224	memcpy(req->cmd, cmd, req->cmd_len);
225	req->sense = sense;
226	req->sense_len = 0;
227	req->retries = retries;
228	req->timeout = timeout;
229	req->cmd_type = REQ_TYPE_BLOCK_PC;
230	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
231
232	/*
233	 * head injection *required* here otherwise quiesce won't work
234	 */
235	blk_execute_rq(req->q, NULL, req, 1);
236
237	/*
238	 * Some devices (USB mass-storage in particular) may transfer
239	 * garbage data together with a residue indicating that the data
240	 * is invalid.  Prevent the garbage from being misinterpreted
241	 * and prevent security leaks by zeroing out the excess data.
242	 */
243	if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244		memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245
246	if (resid)
247		*resid = req->resid_len;
248	ret = req->errors;
249 out:
250	blk_put_request(req);
251
252	return ret;
253}
254EXPORT_SYMBOL(scsi_execute);
255
256
257int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
258		     int data_direction, void *buffer, unsigned bufflen,
259		     struct scsi_sense_hdr *sshdr, int timeout, int retries,
260		     int *resid)
261{
262	char *sense = NULL;
263	int result;
264
265	if (sshdr) {
266		sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
267		if (!sense)
268			return DRIVER_ERROR << 24;
269	}
270	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
271			      sense, timeout, retries, 0, resid);
272	if (sshdr)
273		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
274
275	kfree(sense);
276	return result;
277}
278EXPORT_SYMBOL(scsi_execute_req);
279
280/*
281 * Function:    scsi_init_cmd_errh()
282 *
283 * Purpose:     Initialize cmd fields related to error handling.
284 *
285 * Arguments:   cmd	- command that is ready to be queued.
286 *
287 * Notes:       This function has the job of initializing a number of
288 *              fields related to error handling.   Typically this will
289 *              be called once for each command, as required.
290 */
291static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
292{
293	cmd->serial_number = 0;
294	scsi_set_resid(cmd, 0);
295	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
296	if (cmd->cmd_len == 0)
297		cmd->cmd_len = scsi_command_size(cmd->cmnd);
298}
299
300void scsi_device_unbusy(struct scsi_device *sdev)
301{
302	struct Scsi_Host *shost = sdev->host;
303	struct scsi_target *starget = scsi_target(sdev);
304	unsigned long flags;
305
306	spin_lock_irqsave(shost->host_lock, flags);
307	shost->host_busy--;
308	starget->target_busy--;
309	if (unlikely(scsi_host_in_recovery(shost) &&
310		     (shost->host_failed || shost->host_eh_scheduled)))
311		scsi_eh_wakeup(shost);
312	spin_unlock(shost->host_lock);
313	spin_lock(sdev->request_queue->queue_lock);
314	sdev->device_busy--;
315	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
316}
317
318/*
319 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
320 * and call blk_run_queue for all the scsi_devices on the target -
321 * including current_sdev first.
322 *
323 * Called with *no* scsi locks held.
324 */
325static void scsi_single_lun_run(struct scsi_device *current_sdev)
326{
327	struct Scsi_Host *shost = current_sdev->host;
328	struct scsi_device *sdev, *tmp;
329	struct scsi_target *starget = scsi_target(current_sdev);
330	unsigned long flags;
331
332	spin_lock_irqsave(shost->host_lock, flags);
333	starget->starget_sdev_user = NULL;
334	spin_unlock_irqrestore(shost->host_lock, flags);
335
336	/*
337	 * Call blk_run_queue for all LUNs on the target, starting with
338	 * current_sdev. We race with others (to set starget_sdev_user),
339	 * but in most cases, we will be first. Ideally, each LU on the
340	 * target would get some limited time or requests on the target.
341	 */
342	blk_run_queue(current_sdev->request_queue);
343
344	spin_lock_irqsave(shost->host_lock, flags);
345	if (starget->starget_sdev_user)
346		goto out;
347	list_for_each_entry_safe(sdev, tmp, &starget->devices,
348			same_target_siblings) {
349		if (sdev == current_sdev)
350			continue;
351		if (scsi_device_get(sdev))
352			continue;
353
354		spin_unlock_irqrestore(shost->host_lock, flags);
355		blk_run_queue(sdev->request_queue);
356		spin_lock_irqsave(shost->host_lock, flags);
357
358		scsi_device_put(sdev);
359	}
360 out:
361	spin_unlock_irqrestore(shost->host_lock, flags);
362}
363
364static inline int scsi_device_is_busy(struct scsi_device *sdev)
365{
366	if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
367		return 1;
368
369	return 0;
370}
371
372static inline int scsi_target_is_busy(struct scsi_target *starget)
373{
374	return ((starget->can_queue > 0 &&
375		 starget->target_busy >= starget->can_queue) ||
376		 starget->target_blocked);
377}
378
379static inline int scsi_host_is_busy(struct Scsi_Host *shost)
380{
381	if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
382	    shost->host_blocked || shost->host_self_blocked)
383		return 1;
384
385	return 0;
386}
387
388/*
389 * Function:	scsi_run_queue()
390 *
391 * Purpose:	Select a proper request queue to serve next
392 *
393 * Arguments:	q	- last request's queue
394 *
395 * Returns:     Nothing
396 *
397 * Notes:	The previous command was completely finished, start
398 *		a new one if possible.
399 */
400static void scsi_run_queue(struct request_queue *q)
401{
402	struct scsi_device *sdev = q->queuedata;
403	struct Scsi_Host *shost = sdev->host;
404	LIST_HEAD(starved_list);
405	unsigned long flags;
406
407	if (scsi_target(sdev)->single_lun)
408		scsi_single_lun_run(sdev);
409
410	spin_lock_irqsave(shost->host_lock, flags);
411	list_splice_init(&shost->starved_list, &starved_list);
412
413	while (!list_empty(&starved_list)) {
414		int flagset;
415
416		/*
417		 * As long as shost is accepting commands and we have
418		 * starved queues, call blk_run_queue. scsi_request_fn
419		 * drops the queue_lock and can add us back to the
420		 * starved_list.
421		 *
422		 * host_lock protects the starved_list and starved_entry.
423		 * scsi_request_fn must get the host_lock before checking
424		 * or modifying starved_list or starved_entry.
425		 */
426		if (scsi_host_is_busy(shost))
427			break;
428
429		sdev = list_entry(starved_list.next,
430				  struct scsi_device, starved_entry);
431		list_del_init(&sdev->starved_entry);
432		if (scsi_target_is_busy(scsi_target(sdev))) {
433			list_move_tail(&sdev->starved_entry,
434				       &shost->starved_list);
435			continue;
436		}
437
438		spin_unlock(shost->host_lock);
439
440		spin_lock(sdev->request_queue->queue_lock);
441		flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
442				!test_bit(QUEUE_FLAG_REENTER,
443					&sdev->request_queue->queue_flags);
444		if (flagset)
445			queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446		__blk_run_queue(sdev->request_queue);
447		if (flagset)
448			queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449		spin_unlock(sdev->request_queue->queue_lock);
450
451		spin_lock(shost->host_lock);
452	}
453	/* put any unprocessed entries back */
454	list_splice(&starved_list, &shost->starved_list);
455	spin_unlock_irqrestore(shost->host_lock, flags);
456
457	blk_run_queue(q);
458}
459
460/*
461 * Function:	scsi_requeue_command()
462 *
463 * Purpose:	Handle post-processing of completed commands.
464 *
465 * Arguments:	q	- queue to operate on
466 *		cmd	- command that may need to be requeued.
467 *
468 * Returns:	Nothing
469 *
470 * Notes:	After command completion, there may be blocks left
471 *		over which weren't finished by the previous command
472 *		this can be for a number of reasons - the main one is
473 *		I/O errors in the middle of the request, in which case
474 *		we need to request the blocks that come after the bad
475 *		sector.
476 * Notes:	Upon return, cmd is a stale pointer.
477 */
478static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
479{
480	struct request *req = cmd->request;
481	unsigned long flags;
482
483	spin_lock_irqsave(q->queue_lock, flags);
484	scsi_unprep_request(req);
485	blk_requeue_request(q, req);
486	spin_unlock_irqrestore(q->queue_lock, flags);
487
488	scsi_run_queue(q);
489}
490
491void scsi_next_command(struct scsi_cmnd *cmd)
492{
493	struct scsi_device *sdev = cmd->device;
494	struct request_queue *q = sdev->request_queue;
495
496	/* need to hold a reference on the device before we let go of the cmd */
497	get_device(&sdev->sdev_gendev);
498
499	scsi_put_command(cmd);
500	scsi_run_queue(q);
501
502	/* ok to remove device now */
503	put_device(&sdev->sdev_gendev);
504}
505
506void scsi_run_host_queues(struct Scsi_Host *shost)
507{
508	struct scsi_device *sdev;
509
510	shost_for_each_device(sdev, shost)
511		scsi_run_queue(sdev->request_queue);
512}
513
514static void __scsi_release_buffers(struct scsi_cmnd *, int);
515
516/*
517 * Function:    scsi_end_request()
518 *
519 * Purpose:     Post-processing of completed commands (usually invoked at end
520 *		of upper level post-processing and scsi_io_completion).
521 *
522 * Arguments:   cmd	 - command that is complete.
523 *              error    - 0 if I/O indicates success, < 0 for I/O error.
524 *              bytes    - number of bytes of completed I/O
525 *		requeue  - indicates whether we should requeue leftovers.
526 *
527 * Lock status: Assumed that lock is not held upon entry.
528 *
529 * Returns:     cmd if requeue required, NULL otherwise.
530 *
531 * Notes:       This is called for block device requests in order to
532 *              mark some number of sectors as complete.
533 *
534 *		We are guaranteeing that the request queue will be goosed
535 *		at some point during this call.
536 * Notes:	If cmd was requeued, upon return it will be a stale pointer.
537 */
538static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
539					  int bytes, int requeue)
540{
541	struct request_queue *q = cmd->device->request_queue;
542	struct request *req = cmd->request;
543
544	/*
545	 * If there are blocks left over at the end, set up the command
546	 * to queue the remainder of them.
547	 */
548	if (blk_end_request(req, error, bytes)) {
549		/* kill remainder if no retrys */
550		if (error && scsi_noretry_cmd(cmd))
551			blk_end_request_all(req, error);
552		else {
553			if (requeue) {
554				/*
555				 * Bleah.  Leftovers again.  Stick the
556				 * leftovers in the front of the
557				 * queue, and goose the queue again.
558				 */
559				scsi_release_buffers(cmd);
560				scsi_requeue_command(q, cmd);
561				cmd = NULL;
562			}
563			return cmd;
564		}
565	}
566
567	/*
568	 * This will goose the queue request function at the end, so we don't
569	 * need to worry about launching another command.
570	 */
571	__scsi_release_buffers(cmd, 0);
572	scsi_next_command(cmd);
573	return NULL;
574}
575
576static inline unsigned int scsi_sgtable_index(unsigned short nents)
577{
578	unsigned int index;
579
580	BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
581
582	if (nents <= 8)
583		index = 0;
584	else
585		index = get_count_order(nents) - 3;
586
587	return index;
588}
589
590static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
591{
592	struct scsi_host_sg_pool *sgp;
593
594	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
595	mempool_free(sgl, sgp->pool);
596}
597
598static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
599{
600	struct scsi_host_sg_pool *sgp;
601
602	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
603	return mempool_alloc(sgp->pool, gfp_mask);
604}
605
606static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
607			      gfp_t gfp_mask)
608{
609	int ret;
610
611	BUG_ON(!nents);
612
613	ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
614			       gfp_mask, scsi_sg_alloc);
615	if (unlikely(ret))
616		__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
617				scsi_sg_free);
618
619	return ret;
620}
621
622static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
623{
624	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
625}
626
627static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
628{
629
630	if (cmd->sdb.table.nents)
631		scsi_free_sgtable(&cmd->sdb);
632
633	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
634
635	if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
636		struct scsi_data_buffer *bidi_sdb =
637			cmd->request->next_rq->special;
638		scsi_free_sgtable(bidi_sdb);
639		kmem_cache_free(scsi_sdb_cache, bidi_sdb);
640		cmd->request->next_rq->special = NULL;
641	}
642
643	if (scsi_prot_sg_count(cmd))
644		scsi_free_sgtable(cmd->prot_sdb);
645}
646
647/*
648 * Function:    scsi_release_buffers()
649 *
650 * Purpose:     Completion processing for block device I/O requests.
651 *
652 * Arguments:   cmd	- command that we are bailing.
653 *
654 * Lock status: Assumed that no lock is held upon entry.
655 *
656 * Returns:     Nothing
657 *
658 * Notes:       In the event that an upper level driver rejects a
659 *		command, we must release resources allocated during
660 *		the __init_io() function.  Primarily this would involve
661 *		the scatter-gather table, and potentially any bounce
662 *		buffers.
663 */
664void scsi_release_buffers(struct scsi_cmnd *cmd)
665{
666	__scsi_release_buffers(cmd, 1);
667}
668EXPORT_SYMBOL(scsi_release_buffers);
669
670/*
671 * Function:    scsi_io_completion()
672 *
673 * Purpose:     Completion processing for block device I/O requests.
674 *
675 * Arguments:   cmd   - command that is finished.
676 *
677 * Lock status: Assumed that no lock is held upon entry.
678 *
679 * Returns:     Nothing
680 *
681 * Notes:       This function is matched in terms of capabilities to
682 *              the function that created the scatter-gather list.
683 *              In other words, if there are no bounce buffers
684 *              (the normal case for most drivers), we don't need
685 *              the logic to deal with cleaning up afterwards.
686 *
687 *		We must call scsi_end_request().  This will finish off
688 *		the specified number of sectors.  If we are done, the
689 *		command block will be released and the queue function
690 *		will be goosed.  If we are not done then we have to
691 *		figure out what to do next:
692 *
693 *		a) We can call scsi_requeue_command().  The request
694 *		   will be unprepared and put back on the queue.  Then
695 *		   a new command will be created for it.  This should
696 *		   be used if we made forward progress, or if we want
697 *		   to switch from READ(10) to READ(6) for example.
698 *
699 *		b) We can call scsi_queue_insert().  The request will
700 *		   be put back on the queue and retried using the same
701 *		   command as before, possibly after a delay.
702 *
703 *		c) We can call blk_end_request() with -EIO to fail
704 *		   the remainder of the request.
705 */
706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
707{
708	int result = cmd->result;
709	struct request_queue *q = cmd->device->request_queue;
710	struct request *req = cmd->request;
711	int error = 0;
712	struct scsi_sense_hdr sshdr;
713	int sense_valid = 0;
714	int sense_deferred = 0;
715	enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
716	      ACTION_DELAYED_RETRY} action;
717	char *description = NULL;
718
719	if (result) {
720		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
721		if (sense_valid)
722			sense_deferred = scsi_sense_is_deferred(&sshdr);
723	}
724
725	if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
726		req->errors = result;
727		if (result) {
728			if (sense_valid && req->sense) {
729				/*
730				 * SG_IO wants current and deferred errors
731				 */
732				int len = 8 + cmd->sense_buffer[7];
733
734				if (len > SCSI_SENSE_BUFFERSIZE)
735					len = SCSI_SENSE_BUFFERSIZE;
736				memcpy(req->sense, cmd->sense_buffer,  len);
737				req->sense_len = len;
738			}
739			if (!sense_deferred)
740				error = -EIO;
741		}
742
743		req->resid_len = scsi_get_resid(cmd);
744
745		if (scsi_bidi_cmnd(cmd)) {
746			/*
747			 * Bidi commands Must be complete as a whole,
748			 * both sides at once.
749			 */
750			req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752			blk_end_request_all(req, 0);
753
754			scsi_release_buffers(cmd);
755			scsi_next_command(cmd);
756			return;
757		}
758	}
759
760	BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
761
762	/*
763	 * Next deal with any sectors which we were able to correctly
764	 * handle.
765	 */
766	SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
767				      "%d bytes done.\n",
768				      blk_rq_sectors(req), good_bytes));
769
770	/*
771	 * Recovered errors need reporting, but they're always treated
772	 * as success, so fiddle the result code here.  For BLOCK_PC
773	 * we already took a copy of the original into rq->errors which
774	 * is what gets returned to the user
775	 */
776	if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
777		if (!(req->cmd_flags & REQ_QUIET))
778			scsi_print_sense("", cmd);
779		result = 0;
780		/* BLOCK_PC may have set error */
781		error = 0;
782	}
783
784	/*
785	 * A number of bytes were successfully read.  If there
786	 * are leftovers and there is some kind of error
787	 * (result != 0), retry the rest.
788	 */
789	if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
790		return;
791
792	error = -EIO;
793
794	if (host_byte(result) == DID_RESET) {
795		/* Third party bus reset or reset for error recovery
796		 * reasons.  Just retry the command and see what
797		 * happens.
798		 */
799		action = ACTION_RETRY;
800	} else if (sense_valid && !sense_deferred) {
801		switch (sshdr.sense_key) {
802		case UNIT_ATTENTION:
803			if (cmd->device->removable) {
804				/* Detected disc change.  Set a bit
805				 * and quietly refuse further access.
806				 */
807				cmd->device->changed = 1;
808				description = "Media Changed";
809				action = ACTION_FAIL;
810			} else {
811				/* Must have been a power glitch, or a
812				 * bus reset.  Could not have been a
813				 * media change, so we just retry the
814				 * command and see what happens.
815				 */
816				action = ACTION_RETRY;
817			}
818			break;
819		case ILLEGAL_REQUEST:
820			/* If we had an ILLEGAL REQUEST returned, then
821			 * we may have performed an unsupported
822			 * command.  The only thing this should be
823			 * would be a ten byte read where only a six
824			 * byte read was supported.  Also, on a system
825			 * where READ CAPACITY failed, we may have
826			 * read past the end of the disk.
827			 */
828			if ((cmd->device->use_10_for_rw &&
829			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
830			    (cmd->cmnd[0] == READ_10 ||
831			     cmd->cmnd[0] == WRITE_10)) {
832				/* This will issue a new 6-byte command. */
833				cmd->device->use_10_for_rw = 0;
834				action = ACTION_REPREP;
835			} else if (sshdr.asc == 0x10) /* DIX */ {
836				description = "Host Data Integrity Failure";
837				action = ACTION_FAIL;
838				error = -EILSEQ;
839			} else
840				action = ACTION_FAIL;
841			break;
842		case ABORTED_COMMAND:
843			action = ACTION_FAIL;
844			if (sshdr.asc == 0x10) { /* DIF */
845				description = "Target Data Integrity Failure";
846				error = -EILSEQ;
847			}
848			break;
849		case NOT_READY:
850			/* If the device is in the process of becoming
851			 * ready, or has a temporary blockage, retry.
852			 */
853			if (sshdr.asc == 0x04) {
854				switch (sshdr.ascq) {
855				case 0x01: /* becoming ready */
856				case 0x04: /* format in progress */
857				case 0x05: /* rebuild in progress */
858				case 0x06: /* recalculation in progress */
859				case 0x07: /* operation in progress */
860				case 0x08: /* Long write in progress */
861				case 0x09: /* self test in progress */
862					action = ACTION_DELAYED_RETRY;
863					break;
864				default:
865					description = "Device not ready";
866					action = ACTION_FAIL;
867					break;
868				}
869			} else {
870				description = "Device not ready";
871				action = ACTION_FAIL;
872			}
873			break;
874		case VOLUME_OVERFLOW:
875			/* See SSC3rXX or current. */
876			action = ACTION_FAIL;
877			break;
878		default:
879			description = "Unhandled sense code";
880			action = ACTION_FAIL;
881			break;
882		}
883	} else {
884		description = "Unhandled error code";
885		action = ACTION_FAIL;
886	}
887
888	switch (action) {
889	case ACTION_FAIL:
890		/* Give up and fail the remainder of the request */
891		scsi_release_buffers(cmd);
892		if (!(req->cmd_flags & REQ_QUIET)) {
893			if (description)
894				scmd_printk(KERN_INFO, cmd, "%s\n",
895					    description);
896			scsi_print_result(cmd);
897			if (driver_byte(result) & DRIVER_SENSE)
898				scsi_print_sense("", cmd);
899		}
900		if (blk_end_request_err(req, -EIO))
901			scsi_requeue_command(q, cmd);
902		else
903			scsi_next_command(cmd);
904		break;
905	case ACTION_REPREP:
906		/* Unprep the request and put it back at the head of the queue.
907		 * A new command will be prepared and issued.
908		 */
909		scsi_release_buffers(cmd);
910		scsi_requeue_command(q, cmd);
911		break;
912	case ACTION_RETRY:
913		/* Retry the same command immediately */
914		__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
915		break;
916	case ACTION_DELAYED_RETRY:
917		/* Retry the same command after a delay */
918		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
919		break;
920	}
921}
922
923static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
924			     gfp_t gfp_mask)
925{
926	int count;
927
928	/*
929	 * If sg table allocation fails, requeue request later.
930	 */
931	if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
932					gfp_mask))) {
933		return BLKPREP_DEFER;
934	}
935
936	req->buffer = NULL;
937
938	/*
939	 * Next, walk the list, and fill in the addresses and sizes of
940	 * each segment.
941	 */
942	count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
943	BUG_ON(count > sdb->table.nents);
944	sdb->table.nents = count;
945	sdb->length = blk_rq_bytes(req);
946	return BLKPREP_OK;
947}
948
949/*
950 * Function:    scsi_init_io()
951 *
952 * Purpose:     SCSI I/O initialize function.
953 *
954 * Arguments:   cmd   - Command descriptor we wish to initialize
955 *
956 * Returns:     0 on success
957 *		BLKPREP_DEFER if the failure is retryable
958 *		BLKPREP_KILL if the failure is fatal
959 */
960int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
961{
962	int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
963	if (error)
964		goto err_exit;
965
966	if (blk_bidi_rq(cmd->request)) {
967		struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
968			scsi_sdb_cache, GFP_ATOMIC);
969		if (!bidi_sdb) {
970			error = BLKPREP_DEFER;
971			goto err_exit;
972		}
973
974		cmd->request->next_rq->special = bidi_sdb;
975		error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
976								    GFP_ATOMIC);
977		if (error)
978			goto err_exit;
979	}
980
981	if (blk_integrity_rq(cmd->request)) {
982		struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
983		int ivecs, count;
984
985		BUG_ON(prot_sdb == NULL);
986		ivecs = blk_rq_count_integrity_sg(cmd->request);
987
988		if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
989			error = BLKPREP_DEFER;
990			goto err_exit;
991		}
992
993		count = blk_rq_map_integrity_sg(cmd->request,
994						prot_sdb->table.sgl);
995		BUG_ON(unlikely(count > ivecs));
996
997		cmd->prot_sdb = prot_sdb;
998		cmd->prot_sdb->table.nents = count;
999	}
1000
1001	return BLKPREP_OK ;
1002
1003err_exit:
1004	scsi_release_buffers(cmd);
1005	if (error == BLKPREP_KILL)
1006		scsi_put_command(cmd);
1007	else /* BLKPREP_DEFER */
1008		scsi_unprep_request(cmd->request);
1009
1010	return error;
1011}
1012EXPORT_SYMBOL(scsi_init_io);
1013
1014static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1015		struct request *req)
1016{
1017	struct scsi_cmnd *cmd;
1018
1019	if (!req->special) {
1020		cmd = scsi_get_command(sdev, GFP_ATOMIC);
1021		if (unlikely(!cmd))
1022			return NULL;
1023		req->special = cmd;
1024	} else {
1025		cmd = req->special;
1026	}
1027
1028	/* pull a tag out of the request if we have one */
1029	cmd->tag = req->tag;
1030	cmd->request = req;
1031
1032	cmd->cmnd = req->cmd;
1033
1034	return cmd;
1035}
1036
1037int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1038{
1039	struct scsi_cmnd *cmd;
1040	int ret = scsi_prep_state_check(sdev, req);
1041
1042	if (ret != BLKPREP_OK)
1043		return ret;
1044
1045	cmd = scsi_get_cmd_from_req(sdev, req);
1046	if (unlikely(!cmd))
1047		return BLKPREP_DEFER;
1048
1049	/*
1050	 * BLOCK_PC requests may transfer data, in which case they must
1051	 * a bio attached to them.  Or they might contain a SCSI command
1052	 * that does not transfer data, in which case they may optionally
1053	 * submit a request without an attached bio.
1054	 */
1055	if (req->bio) {
1056		int ret;
1057
1058		BUG_ON(!req->nr_phys_segments);
1059
1060		ret = scsi_init_io(cmd, GFP_ATOMIC);
1061		if (unlikely(ret))
1062			return ret;
1063	} else {
1064		BUG_ON(blk_rq_bytes(req));
1065
1066		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1067		req->buffer = NULL;
1068	}
1069
1070	cmd->cmd_len = req->cmd_len;
1071	if (!blk_rq_bytes(req))
1072		cmd->sc_data_direction = DMA_NONE;
1073	else if (rq_data_dir(req) == WRITE)
1074		cmd->sc_data_direction = DMA_TO_DEVICE;
1075	else
1076		cmd->sc_data_direction = DMA_FROM_DEVICE;
1077
1078	cmd->transfersize = blk_rq_bytes(req);
1079	cmd->allowed = req->retries;
1080	return BLKPREP_OK;
1081}
1082EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1083
1084/*
1085 * Setup a REQ_TYPE_FS command.  These are simple read/write request
1086 * from filesystems that still need to be translated to SCSI CDBs from
1087 * the ULD.
1088 */
1089int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1090{
1091	struct scsi_cmnd *cmd;
1092	int ret = scsi_prep_state_check(sdev, req);
1093
1094	if (ret != BLKPREP_OK)
1095		return ret;
1096
1097	if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1098			 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1099		ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1100		if (ret != BLKPREP_OK)
1101			return ret;
1102	}
1103
1104	/*
1105	 * Filesystem requests must transfer data.
1106	 */
1107	BUG_ON(!req->nr_phys_segments);
1108
1109	cmd = scsi_get_cmd_from_req(sdev, req);
1110	if (unlikely(!cmd))
1111		return BLKPREP_DEFER;
1112
1113	memset(cmd->cmnd, 0, BLK_MAX_CDB);
1114	return scsi_init_io(cmd, GFP_ATOMIC);
1115}
1116EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1117
1118int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1119{
1120	int ret = BLKPREP_OK;
1121
1122	/*
1123	 * If the device is not in running state we will reject some
1124	 * or all commands.
1125	 */
1126	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1127		switch (sdev->sdev_state) {
1128		case SDEV_OFFLINE:
1129			/*
1130			 * If the device is offline we refuse to process any
1131			 * commands.  The device must be brought online
1132			 * before trying any recovery commands.
1133			 */
1134			sdev_printk(KERN_ERR, sdev,
1135				    "rejecting I/O to offline device\n");
1136			ret = BLKPREP_KILL;
1137			break;
1138		case SDEV_DEL:
1139			/*
1140			 * If the device is fully deleted, we refuse to
1141			 * process any commands as well.
1142			 */
1143			sdev_printk(KERN_ERR, sdev,
1144				    "rejecting I/O to dead device\n");
1145			ret = BLKPREP_KILL;
1146			break;
1147		case SDEV_QUIESCE:
1148		case SDEV_BLOCK:
1149		case SDEV_CREATED_BLOCK:
1150			/*
1151			 * If the devices is blocked we defer normal commands.
1152			 */
1153			if (!(req->cmd_flags & REQ_PREEMPT))
1154				ret = BLKPREP_DEFER;
1155			break;
1156		default:
1157			/*
1158			 * For any other not fully online state we only allow
1159			 * special commands.  In particular any user initiated
1160			 * command is not allowed.
1161			 */
1162			if (!(req->cmd_flags & REQ_PREEMPT))
1163				ret = BLKPREP_KILL;
1164			break;
1165		}
1166	}
1167	return ret;
1168}
1169EXPORT_SYMBOL(scsi_prep_state_check);
1170
1171int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1172{
1173	struct scsi_device *sdev = q->queuedata;
1174
1175	switch (ret) {
1176	case BLKPREP_KILL:
1177		req->errors = DID_NO_CONNECT << 16;
1178		/* release the command and kill it */
1179		if (req->special) {
1180			struct scsi_cmnd *cmd = req->special;
1181			scsi_release_buffers(cmd);
1182			scsi_put_command(cmd);
1183			req->special = NULL;
1184		}
1185		break;
1186	case BLKPREP_DEFER:
1187		/*
1188		 * If we defer, the blk_peek_request() returns NULL, but the
1189		 * queue must be restarted, so we plug here if no returning
1190		 * command will automatically do that.
1191		 */
1192		if (sdev->device_busy == 0)
1193			blk_plug_device(q);
1194		break;
1195	default:
1196		req->cmd_flags |= REQ_DONTPREP;
1197	}
1198
1199	return ret;
1200}
1201EXPORT_SYMBOL(scsi_prep_return);
1202
1203int scsi_prep_fn(struct request_queue *q, struct request *req)
1204{
1205	struct scsi_device *sdev = q->queuedata;
1206	int ret = BLKPREP_KILL;
1207
1208	if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1209		ret = scsi_setup_blk_pc_cmnd(sdev, req);
1210	return scsi_prep_return(q, req, ret);
1211}
1212EXPORT_SYMBOL(scsi_prep_fn);
1213
1214/*
1215 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1216 * return 0.
1217 *
1218 * Called with the queue_lock held.
1219 */
1220static inline int scsi_dev_queue_ready(struct request_queue *q,
1221				  struct scsi_device *sdev)
1222{
1223	if (sdev->device_busy == 0 && sdev->device_blocked) {
1224		/*
1225		 * unblock after device_blocked iterates to zero
1226		 */
1227		if (--sdev->device_blocked == 0) {
1228			SCSI_LOG_MLQUEUE(3,
1229				   sdev_printk(KERN_INFO, sdev,
1230				   "unblocking device at zero depth\n"));
1231		} else {
1232			blk_plug_device(q);
1233			return 0;
1234		}
1235	}
1236	if (scsi_device_is_busy(sdev))
1237		return 0;
1238
1239	return 1;
1240}
1241
1242
1243/*
1244 * scsi_target_queue_ready: checks if there we can send commands to target
1245 * @sdev: scsi device on starget to check.
1246 *
1247 * Called with the host lock held.
1248 */
1249static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1250					   struct scsi_device *sdev)
1251{
1252	struct scsi_target *starget = scsi_target(sdev);
1253
1254	if (starget->single_lun) {
1255		if (starget->starget_sdev_user &&
1256		    starget->starget_sdev_user != sdev)
1257			return 0;
1258		starget->starget_sdev_user = sdev;
1259	}
1260
1261	if (starget->target_busy == 0 && starget->target_blocked) {
1262		/*
1263		 * unblock after target_blocked iterates to zero
1264		 */
1265		if (--starget->target_blocked == 0) {
1266			SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1267					 "unblocking target at zero depth\n"));
1268		} else
1269			return 0;
1270	}
1271
1272	if (scsi_target_is_busy(starget)) {
1273		if (list_empty(&sdev->starved_entry)) {
1274			list_add_tail(&sdev->starved_entry,
1275				      &shost->starved_list);
1276			return 0;
1277		}
1278	}
1279
1280	/* We're OK to process the command, so we can't be starved */
1281	if (!list_empty(&sdev->starved_entry))
1282		list_del_init(&sdev->starved_entry);
1283	return 1;
1284}
1285
1286/*
1287 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1288 * return 0. We must end up running the queue again whenever 0 is
1289 * returned, else IO can hang.
1290 *
1291 * Called with host_lock held.
1292 */
1293static inline int scsi_host_queue_ready(struct request_queue *q,
1294				   struct Scsi_Host *shost,
1295				   struct scsi_device *sdev)
1296{
1297	if (scsi_host_in_recovery(shost))
1298		return 0;
1299	if (shost->host_busy == 0 && shost->host_blocked) {
1300		/*
1301		 * unblock after host_blocked iterates to zero
1302		 */
1303		if (--shost->host_blocked == 0) {
1304			SCSI_LOG_MLQUEUE(3,
1305				printk("scsi%d unblocking host at zero depth\n",
1306					shost->host_no));
1307		} else {
1308			return 0;
1309		}
1310	}
1311	if (scsi_host_is_busy(shost)) {
1312		if (list_empty(&sdev->starved_entry))
1313			list_add_tail(&sdev->starved_entry, &shost->starved_list);
1314		return 0;
1315	}
1316
1317	/* We're OK to process the command, so we can't be starved */
1318	if (!list_empty(&sdev->starved_entry))
1319		list_del_init(&sdev->starved_entry);
1320
1321	return 1;
1322}
1323
1324/*
1325 * Busy state exporting function for request stacking drivers.
1326 *
1327 * For efficiency, no lock is taken to check the busy state of
1328 * shost/starget/sdev, since the returned value is not guaranteed and
1329 * may be changed after request stacking drivers call the function,
1330 * regardless of taking lock or not.
1331 *
1332 * When scsi can't dispatch I/Os anymore and needs to kill I/Os
1333 * (e.g. !sdev), scsi needs to return 'not busy'.
1334 * Otherwise, request stacking drivers may hold requests forever.
1335 */
1336static int scsi_lld_busy(struct request_queue *q)
1337{
1338	struct scsi_device *sdev = q->queuedata;
1339	struct Scsi_Host *shost;
1340	struct scsi_target *starget;
1341
1342	if (!sdev)
1343		return 0;
1344
1345	shost = sdev->host;
1346	starget = scsi_target(sdev);
1347
1348	if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1349	    scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1350		return 1;
1351
1352	return 0;
1353}
1354
1355/*
1356 * Kill a request for a dead device
1357 */
1358static void scsi_kill_request(struct request *req, struct request_queue *q)
1359{
1360	struct scsi_cmnd *cmd = req->special;
1361	struct scsi_device *sdev = cmd->device;
1362	struct scsi_target *starget = scsi_target(sdev);
1363	struct Scsi_Host *shost = sdev->host;
1364
1365	blk_start_request(req);
1366
1367	if (unlikely(cmd == NULL)) {
1368		printk(KERN_CRIT "impossible request in %s.\n",
1369				 __func__);
1370		BUG();
1371	}
1372
1373	scsi_init_cmd_errh(cmd);
1374	cmd->result = DID_NO_CONNECT << 16;
1375	atomic_inc(&cmd->device->iorequest_cnt);
1376
1377	/*
1378	 * SCSI request completion path will do scsi_device_unbusy(),
1379	 * bump busy counts.  To bump the counters, we need to dance
1380	 * with the locks as normal issue path does.
1381	 */
1382	sdev->device_busy++;
1383	spin_unlock(sdev->request_queue->queue_lock);
1384	spin_lock(shost->host_lock);
1385	shost->host_busy++;
1386	starget->target_busy++;
1387	spin_unlock(shost->host_lock);
1388	spin_lock(sdev->request_queue->queue_lock);
1389
1390	blk_complete_request(req);
1391}
1392
1393static void scsi_softirq_done(struct request *rq)
1394{
1395	struct scsi_cmnd *cmd = rq->special;
1396	unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1397	int disposition;
1398
1399	INIT_LIST_HEAD(&cmd->eh_entry);
1400
1401	/*
1402	 * Set the serial numbers back to zero
1403	 */
1404	cmd->serial_number = 0;
1405
1406	atomic_inc(&cmd->device->iodone_cnt);
1407	if (cmd->result)
1408		atomic_inc(&cmd->device->ioerr_cnt);
1409
1410	disposition = scsi_decide_disposition(cmd);
1411	if (disposition != SUCCESS &&
1412	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1413		sdev_printk(KERN_ERR, cmd->device,
1414			    "timing out command, waited %lus\n",
1415			    wait_for/HZ);
1416		disposition = SUCCESS;
1417	}
1418
1419	scsi_log_completion(cmd, disposition);
1420
1421	switch (disposition) {
1422		case SUCCESS:
1423			scsi_finish_command(cmd);
1424			break;
1425		case NEEDS_RETRY:
1426			scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1427			break;
1428		case ADD_TO_MLQUEUE:
1429			scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1430			break;
1431		default:
1432			if (!scsi_eh_scmd_add(cmd, 0))
1433				scsi_finish_command(cmd);
1434	}
1435}
1436
1437/*
1438 * Function:    scsi_request_fn()
1439 *
1440 * Purpose:     Main strategy routine for SCSI.
1441 *
1442 * Arguments:   q       - Pointer to actual queue.
1443 *
1444 * Returns:     Nothing
1445 *
1446 * Lock status: IO request lock assumed to be held when called.
1447 */
1448static void scsi_request_fn(struct request_queue *q)
1449{
1450	struct scsi_device *sdev = q->queuedata;
1451	struct Scsi_Host *shost;
1452	struct scsi_cmnd *cmd;
1453	struct request *req;
1454
1455	if (!sdev) {
1456		printk("scsi: killing requests for dead queue\n");
1457		while ((req = blk_peek_request(q)) != NULL)
1458			scsi_kill_request(req, q);
1459		return;
1460	}
1461
1462	if(!get_device(&sdev->sdev_gendev))
1463		/* We must be tearing the block queue down already */
1464		return;
1465
1466	/*
1467	 * To start with, we keep looping until the queue is empty, or until
1468	 * the host is no longer able to accept any more requests.
1469	 */
1470	shost = sdev->host;
1471	while (!blk_queue_plugged(q)) {
1472		int rtn;
1473		/*
1474		 * get next queueable request.  We do this early to make sure
1475		 * that the request is fully prepared even if we cannot
1476		 * accept it.
1477		 */
1478		req = blk_peek_request(q);
1479		if (!req || !scsi_dev_queue_ready(q, sdev))
1480			break;
1481
1482		if (unlikely(!scsi_device_online(sdev))) {
1483			sdev_printk(KERN_ERR, sdev,
1484				    "rejecting I/O to offline device\n");
1485			scsi_kill_request(req, q);
1486			continue;
1487		}
1488
1489
1490		/*
1491		 * Remove the request from the request list.
1492		 */
1493		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1494			blk_start_request(req);
1495		sdev->device_busy++;
1496
1497		spin_unlock(q->queue_lock);
1498		cmd = req->special;
1499		if (unlikely(cmd == NULL)) {
1500			printk(KERN_CRIT "impossible request in %s.\n"
1501					 "please mail a stack trace to "
1502					 "linux-scsi@vger.kernel.org\n",
1503					 __func__);
1504			blk_dump_rq_flags(req, "foo");
1505			BUG();
1506		}
1507		spin_lock(shost->host_lock);
1508
1509		/*
1510		 * We hit this when the driver is using a host wide
1511		 * tag map. For device level tag maps the queue_depth check
1512		 * in the device ready fn would prevent us from trying
1513		 * to allocate a tag. Since the map is a shared host resource
1514		 * we add the dev to the starved list so it eventually gets
1515		 * a run when a tag is freed.
1516		 */
1517		if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1518			if (list_empty(&sdev->starved_entry))
1519				list_add_tail(&sdev->starved_entry,
1520					      &shost->starved_list);
1521			goto not_ready;
1522		}
1523
1524		if (!scsi_target_queue_ready(shost, sdev))
1525			goto not_ready;
1526
1527		if (!scsi_host_queue_ready(q, shost, sdev))
1528			goto not_ready;
1529
1530		scsi_target(sdev)->target_busy++;
1531		shost->host_busy++;
1532
1533		/*
1534		 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1535		 *		take the lock again.
1536		 */
1537		spin_unlock_irq(shost->host_lock);
1538
1539		/*
1540		 * Finally, initialize any error handling parameters, and set up
1541		 * the timers for timeouts.
1542		 */
1543		scsi_init_cmd_errh(cmd);
1544
1545		/*
1546		 * Dispatch the command to the low-level driver.
1547		 */
1548		rtn = scsi_dispatch_cmd(cmd);
1549		spin_lock_irq(q->queue_lock);
1550		if(rtn) {
1551			/* we're refusing the command; because of
1552			 * the way locks get dropped, we need to
1553			 * check here if plugging is required */
1554			if(sdev->device_busy == 0)
1555				blk_plug_device(q);
1556
1557			break;
1558		}
1559	}
1560
1561	goto out;
1562
1563 not_ready:
1564	spin_unlock_irq(shost->host_lock);
1565
1566	/*
1567	 * lock q, handle tag, requeue req, and decrement device_busy. We
1568	 * must return with queue_lock held.
1569	 *
1570	 * Decrementing device_busy without checking it is OK, as all such
1571	 * cases (host limits or settings) should run the queue at some
1572	 * later time.
1573	 */
1574	spin_lock_irq(q->queue_lock);
1575	blk_requeue_request(q, req);
1576	sdev->device_busy--;
1577	if(sdev->device_busy == 0)
1578		blk_plug_device(q);
1579 out:
1580	/* must be careful here...if we trigger the ->remove() function
1581	 * we cannot be holding the q lock */
1582	spin_unlock_irq(q->queue_lock);
1583	put_device(&sdev->sdev_gendev);
1584	spin_lock_irq(q->queue_lock);
1585}
1586
1587u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1588{
1589	struct device *host_dev;
1590	u64 bounce_limit = 0xffffffff;
1591
1592	if (shost->unchecked_isa_dma)
1593		return BLK_BOUNCE_ISA;
1594	/*
1595	 * Platforms with virtual-DMA translation
1596	 * hardware have no practical limit.
1597	 */
1598	if (!PCI_DMA_BUS_IS_PHYS)
1599		return BLK_BOUNCE_ANY;
1600
1601	host_dev = scsi_get_device(shost);
1602	if (host_dev && host_dev->dma_mask)
1603		bounce_limit = *host_dev->dma_mask;
1604
1605	return bounce_limit;
1606}
1607EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1608
1609struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1610					 request_fn_proc *request_fn)
1611{
1612	struct request_queue *q;
1613	struct device *dev = shost->shost_gendev.parent;
1614
1615	q = blk_init_queue(request_fn, NULL);
1616	if (!q)
1617		return NULL;
1618
1619	/*
1620	 * this limit is imposed by hardware restrictions
1621	 */
1622	blk_queue_max_hw_segments(q, shost->sg_tablesize);
1623	blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
1624
1625	blk_queue_max_sectors(q, shost->max_sectors);
1626	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1627	blk_queue_segment_boundary(q, shost->dma_boundary);
1628	dma_set_seg_boundary(dev, shost->dma_boundary);
1629
1630	blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1631
1632	/* New queue, no concurrency on queue_flags */
1633	if (!shost->use_clustering)
1634		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1635
1636	/*
1637	 * set a reasonable default alignment on word boundaries: the
1638	 * host and device may alter it using
1639	 * blk_queue_update_dma_alignment() later.
1640	 */
1641	blk_queue_dma_alignment(q, 0x03);
1642
1643	return q;
1644}
1645EXPORT_SYMBOL(__scsi_alloc_queue);
1646
1647struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1648{
1649	struct request_queue *q;
1650
1651	q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1652	if (!q)
1653		return NULL;
1654
1655	blk_queue_prep_rq(q, scsi_prep_fn);
1656	blk_queue_softirq_done(q, scsi_softirq_done);
1657	blk_queue_rq_timed_out(q, scsi_times_out);
1658	blk_queue_lld_busy(q, scsi_lld_busy);
1659	return q;
1660}
1661
1662void scsi_free_queue(struct request_queue *q)
1663{
1664	blk_cleanup_queue(q);
1665}
1666
1667/*
1668 * Function:    scsi_block_requests()
1669 *
1670 * Purpose:     Utility function used by low-level drivers to prevent further
1671 *		commands from being queued to the device.
1672 *
1673 * Arguments:   shost       - Host in question
1674 *
1675 * Returns:     Nothing
1676 *
1677 * Lock status: No locks are assumed held.
1678 *
1679 * Notes:       There is no timer nor any other means by which the requests
1680 *		get unblocked other than the low-level driver calling
1681 *		scsi_unblock_requests().
1682 */
1683void scsi_block_requests(struct Scsi_Host *shost)
1684{
1685	shost->host_self_blocked = 1;
1686}
1687EXPORT_SYMBOL(scsi_block_requests);
1688
1689/*
1690 * Function:    scsi_unblock_requests()
1691 *
1692 * Purpose:     Utility function used by low-level drivers to allow further
1693 *		commands from being queued to the device.
1694 *
1695 * Arguments:   shost       - Host in question
1696 *
1697 * Returns:     Nothing
1698 *
1699 * Lock status: No locks are assumed held.
1700 *
1701 * Notes:       There is no timer nor any other means by which the requests
1702 *		get unblocked other than the low-level driver calling
1703 *		scsi_unblock_requests().
1704 *
1705 *		This is done as an API function so that changes to the
1706 *		internals of the scsi mid-layer won't require wholesale
1707 *		changes to drivers that use this feature.
1708 */
1709void scsi_unblock_requests(struct Scsi_Host *shost)
1710{
1711	shost->host_self_blocked = 0;
1712	scsi_run_host_queues(shost);
1713}
1714EXPORT_SYMBOL(scsi_unblock_requests);
1715
1716int __init scsi_init_queue(void)
1717{
1718	int i;
1719
1720	scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1721					   sizeof(struct scsi_data_buffer),
1722					   0, 0, NULL);
1723	if (!scsi_sdb_cache) {
1724		printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1725		return -ENOMEM;
1726	}
1727
1728	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1729		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1730		int size = sgp->size * sizeof(struct scatterlist);
1731
1732		sgp->slab = kmem_cache_create(sgp->name, size, 0,
1733				SLAB_HWCACHE_ALIGN, NULL);
1734		if (!sgp->slab) {
1735			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1736					sgp->name);
1737			goto cleanup_sdb;
1738		}
1739
1740		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1741						     sgp->slab);
1742		if (!sgp->pool) {
1743			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1744					sgp->name);
1745			goto cleanup_sdb;
1746		}
1747	}
1748
1749	return 0;
1750
1751cleanup_sdb:
1752	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1753		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1754		if (sgp->pool)
1755			mempool_destroy(sgp->pool);
1756		if (sgp->slab)
1757			kmem_cache_destroy(sgp->slab);
1758	}
1759	kmem_cache_destroy(scsi_sdb_cache);
1760
1761	return -ENOMEM;
1762}
1763
1764void scsi_exit_queue(void)
1765{
1766	int i;
1767
1768	kmem_cache_destroy(scsi_sdb_cache);
1769
1770	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1771		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1772		mempool_destroy(sgp->pool);
1773		kmem_cache_destroy(sgp->slab);
1774	}
1775}
1776
1777/**
1778 *	scsi_mode_select - issue a mode select
1779 *	@sdev:	SCSI device to be queried
1780 *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
1781 *	@sp:	Save page bit (0 == don't save, 1 == save)
1782 *	@modepage: mode page being requested
1783 *	@buffer: request buffer (may not be smaller than eight bytes)
1784 *	@len:	length of request buffer.
1785 *	@timeout: command timeout
1786 *	@retries: number of retries before failing
1787 *	@data: returns a structure abstracting the mode header data
1788 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
1789 *		must be SCSI_SENSE_BUFFERSIZE big.
1790 *
1791 *	Returns zero if successful; negative error number or scsi
1792 *	status on error
1793 *
1794 */
1795int
1796scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1797		 unsigned char *buffer, int len, int timeout, int retries,
1798		 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1799{
1800	unsigned char cmd[10];
1801	unsigned char *real_buffer;
1802	int ret;
1803
1804	memset(cmd, 0, sizeof(cmd));
1805	cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1806
1807	if (sdev->use_10_for_ms) {
1808		if (len > 65535)
1809			return -EINVAL;
1810		real_buffer = kmalloc(8 + len, GFP_KERNEL);
1811		if (!real_buffer)
1812			return -ENOMEM;
1813		memcpy(real_buffer + 8, buffer, len);
1814		len += 8;
1815		real_buffer[0] = 0;
1816		real_buffer[1] = 0;
1817		real_buffer[2] = data->medium_type;
1818		real_buffer[3] = data->device_specific;
1819		real_buffer[4] = data->longlba ? 0x01 : 0;
1820		real_buffer[5] = 0;
1821		real_buffer[6] = data->block_descriptor_length >> 8;
1822		real_buffer[7] = data->block_descriptor_length;
1823
1824		cmd[0] = MODE_SELECT_10;
1825		cmd[7] = len >> 8;
1826		cmd[8] = len;
1827	} else {
1828		if (len > 255 || data->block_descriptor_length > 255 ||
1829		    data->longlba)
1830			return -EINVAL;
1831
1832		real_buffer = kmalloc(4 + len, GFP_KERNEL);
1833		if (!real_buffer)
1834			return -ENOMEM;
1835		memcpy(real_buffer + 4, buffer, len);
1836		len += 4;
1837		real_buffer[0] = 0;
1838		real_buffer[1] = data->medium_type;
1839		real_buffer[2] = data->device_specific;
1840		real_buffer[3] = data->block_descriptor_length;
1841
1842
1843		cmd[0] = MODE_SELECT;
1844		cmd[4] = len;
1845	}
1846
1847	ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1848			       sshdr, timeout, retries, NULL);
1849	kfree(real_buffer);
1850	return ret;
1851}
1852EXPORT_SYMBOL_GPL(scsi_mode_select);
1853
1854/**
1855 *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1856 *	@sdev:	SCSI device to be queried
1857 *	@dbd:	set if mode sense will allow block descriptors to be returned
1858 *	@modepage: mode page being requested
1859 *	@buffer: request buffer (may not be smaller than eight bytes)
1860 *	@len:	length of request buffer.
1861 *	@timeout: command timeout
1862 *	@retries: number of retries before failing
1863 *	@data: returns a structure abstracting the mode header data
1864 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
1865 *		must be SCSI_SENSE_BUFFERSIZE big.
1866 *
1867 *	Returns zero if unsuccessful, or the header offset (either 4
1868 *	or 8 depending on whether a six or ten byte command was
1869 *	issued) if successful.
1870 */
1871int
1872scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1873		  unsigned char *buffer, int len, int timeout, int retries,
1874		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1875{
1876	unsigned char cmd[12];
1877	int use_10_for_ms;
1878	int header_length;
1879	int result;
1880	struct scsi_sense_hdr my_sshdr;
1881
1882	memset(data, 0, sizeof(*data));
1883	memset(&cmd[0], 0, 12);
1884	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
1885	cmd[2] = modepage;
1886
1887	/* caller might not be interested in sense, but we need it */
1888	if (!sshdr)
1889		sshdr = &my_sshdr;
1890
1891 retry:
1892	use_10_for_ms = sdev->use_10_for_ms;
1893
1894	if (use_10_for_ms) {
1895		if (len < 8)
1896			len = 8;
1897
1898		cmd[0] = MODE_SENSE_10;
1899		cmd[8] = len;
1900		header_length = 8;
1901	} else {
1902		if (len < 4)
1903			len = 4;
1904
1905		cmd[0] = MODE_SENSE;
1906		cmd[4] = len;
1907		header_length = 4;
1908	}
1909
1910	memset(buffer, 0, len);
1911
1912	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1913				  sshdr, timeout, retries, NULL);
1914
1915	/* This code looks awful: what it's doing is making sure an
1916	 * ILLEGAL REQUEST sense return identifies the actual command
1917	 * byte as the problem.  MODE_SENSE commands can return
1918	 * ILLEGAL REQUEST if the code page isn't supported */
1919
1920	if (use_10_for_ms && !scsi_status_is_good(result) &&
1921	    (driver_byte(result) & DRIVER_SENSE)) {
1922		if (scsi_sense_valid(sshdr)) {
1923			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1924			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1925				/*
1926				 * Invalid command operation code
1927				 */
1928				sdev->use_10_for_ms = 0;
1929				goto retry;
1930			}
1931		}
1932	}
1933
1934	if(scsi_status_is_good(result)) {
1935		if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1936			     (modepage == 6 || modepage == 8))) {
1937			/* Initio breakage? */
1938			header_length = 0;
1939			data->length = 13;
1940			data->medium_type = 0;
1941			data->device_specific = 0;
1942			data->longlba = 0;
1943			data->block_descriptor_length = 0;
1944		} else if(use_10_for_ms) {
1945			data->length = buffer[0]*256 + buffer[1] + 2;
1946			data->medium_type = buffer[2];
1947			data->device_specific = buffer[3];
1948			data->longlba = buffer[4] & 0x01;
1949			data->block_descriptor_length = buffer[6]*256
1950				+ buffer[7];
1951		} else {
1952			data->length = buffer[0] + 1;
1953			data->medium_type = buffer[1];
1954			data->device_specific = buffer[2];
1955			data->block_descriptor_length = buffer[3];
1956		}
1957		data->header_length = header_length;
1958	}
1959
1960	return result;
1961}
1962EXPORT_SYMBOL(scsi_mode_sense);
1963
1964/**
1965 *	scsi_test_unit_ready - test if unit is ready
1966 *	@sdev:	scsi device to change the state of.
1967 *	@timeout: command timeout
1968 *	@retries: number of retries before failing
1969 *	@sshdr_external: Optional pointer to struct scsi_sense_hdr for
1970 *		returning sense. Make sure that this is cleared before passing
1971 *		in.
1972 *
1973 *	Returns zero if unsuccessful or an error if TUR failed.  For
1974 *	removable media, a return of NOT_READY or UNIT_ATTENTION is
1975 *	translated to success, with the ->changed flag updated.
1976 **/
1977int
1978scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
1979		     struct scsi_sense_hdr *sshdr_external)
1980{
1981	char cmd[] = {
1982		TEST_UNIT_READY, 0, 0, 0, 0, 0,
1983	};
1984	struct scsi_sense_hdr *sshdr;
1985	int result;
1986
1987	if (!sshdr_external)
1988		sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1989	else
1990		sshdr = sshdr_external;
1991
1992	/* try to eat the UNIT_ATTENTION if there are enough retries */
1993	do {
1994		result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
1995					  timeout, retries, NULL);
1996		if (sdev->removable && scsi_sense_valid(sshdr) &&
1997		    sshdr->sense_key == UNIT_ATTENTION)
1998			sdev->changed = 1;
1999	} while (scsi_sense_valid(sshdr) &&
2000		 sshdr->sense_key == UNIT_ATTENTION && --retries);
2001
2002	if (!sshdr)
2003		/* could not allocate sense buffer, so can't process it */
2004		return result;
2005
2006	if (sdev->removable && scsi_sense_valid(sshdr) &&
2007	    (sshdr->sense_key == UNIT_ATTENTION ||
2008	     sshdr->sense_key == NOT_READY)) {
2009		sdev->changed = 1;
2010		result = 0;
2011	}
2012	if (!sshdr_external)
2013		kfree(sshdr);
2014	return result;
2015}
2016EXPORT_SYMBOL(scsi_test_unit_ready);
2017
2018/**
2019 *	scsi_device_set_state - Take the given device through the device state model.
2020 *	@sdev:	scsi device to change the state of.
2021 *	@state:	state to change to.
2022 *
2023 *	Returns zero if unsuccessful or an error if the requested
2024 *	transition is illegal.
2025 */
2026int
2027scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2028{
2029	enum scsi_device_state oldstate = sdev->sdev_state;
2030
2031	if (state == oldstate)
2032		return 0;
2033
2034	switch (state) {
2035	case SDEV_CREATED:
2036		switch (oldstate) {
2037		case SDEV_CREATED_BLOCK:
2038			break;
2039		default:
2040			goto illegal;
2041		}
2042		break;
2043
2044	case SDEV_RUNNING:
2045		switch (oldstate) {
2046		case SDEV_CREATED:
2047		case SDEV_OFFLINE:
2048		case SDEV_QUIESCE:
2049		case SDEV_BLOCK:
2050			break;
2051		default:
2052			goto illegal;
2053		}
2054		break;
2055
2056	case SDEV_QUIESCE:
2057		switch (oldstate) {
2058		case SDEV_RUNNING:
2059		case SDEV_OFFLINE:
2060			break;
2061		default:
2062			goto illegal;
2063		}
2064		break;
2065
2066	case SDEV_OFFLINE:
2067		switch (oldstate) {
2068		case SDEV_CREATED:
2069		case SDEV_RUNNING:
2070		case SDEV_QUIESCE:
2071		case SDEV_BLOCK:
2072			break;
2073		default:
2074			goto illegal;
2075		}
2076		break;
2077
2078	case SDEV_BLOCK:
2079		switch (oldstate) {
2080		case SDEV_RUNNING:
2081		case SDEV_CREATED_BLOCK:
2082			break;
2083		default:
2084			goto illegal;
2085		}
2086		break;
2087
2088	case SDEV_CREATED_BLOCK:
2089		switch (oldstate) {
2090		case SDEV_CREATED:
2091			break;
2092		default:
2093			goto illegal;
2094		}
2095		break;
2096
2097	case SDEV_CANCEL:
2098		switch (oldstate) {
2099		case SDEV_CREATED:
2100		case SDEV_RUNNING:
2101		case SDEV_QUIESCE:
2102		case SDEV_OFFLINE:
2103		case SDEV_BLOCK:
2104			break;
2105		default:
2106			goto illegal;
2107		}
2108		break;
2109
2110	case SDEV_DEL:
2111		switch (oldstate) {
2112		case SDEV_CREATED:
2113		case SDEV_RUNNING:
2114		case SDEV_OFFLINE:
2115		case SDEV_CANCEL:
2116			break;
2117		default:
2118			goto illegal;
2119		}
2120		break;
2121
2122	}
2123	sdev->sdev_state = state;
2124	return 0;
2125
2126 illegal:
2127	SCSI_LOG_ERROR_RECOVERY(1,
2128				sdev_printk(KERN_ERR, sdev,
2129					    "Illegal state transition %s->%s\n",
2130					    scsi_device_state_name(oldstate),
2131					    scsi_device_state_name(state))
2132				);
2133	return -EINVAL;
2134}
2135EXPORT_SYMBOL(scsi_device_set_state);
2136
2137/**
2138 * 	sdev_evt_emit - emit a single SCSI device uevent
2139 *	@sdev: associated SCSI device
2140 *	@evt: event to emit
2141 *
2142 *	Send a single uevent (scsi_event) to the associated scsi_device.
2143 */
2144static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2145{
2146	int idx = 0;
2147	char *envp[3];
2148
2149	switch (evt->evt_type) {
2150	case SDEV_EVT_MEDIA_CHANGE:
2151		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2152		break;
2153
2154	default:
2155		/* do nothing */
2156		break;
2157	}
2158
2159	envp[idx++] = NULL;
2160
2161	kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2162}
2163
2164/**
2165 * 	sdev_evt_thread - send a uevent for each scsi event
2166 *	@work: work struct for scsi_device
2167 *
2168 *	Dispatch queued events to their associated scsi_device kobjects
2169 *	as uevents.
2170 */
2171void scsi_evt_thread(struct work_struct *work)
2172{
2173	struct scsi_device *sdev;
2174	LIST_HEAD(event_list);
2175
2176	sdev = container_of(work, struct scsi_device, event_work);
2177
2178	while (1) {
2179		struct scsi_event *evt;
2180		struct list_head *this, *tmp;
2181		unsigned long flags;
2182
2183		spin_lock_irqsave(&sdev->list_lock, flags);
2184		list_splice_init(&sdev->event_list, &event_list);
2185		spin_unlock_irqrestore(&sdev->list_lock, flags);
2186
2187		if (list_empty(&event_list))
2188			break;
2189
2190		list_for_each_safe(this, tmp, &event_list) {
2191			evt = list_entry(this, struct scsi_event, node);
2192			list_del(&evt->node);
2193			scsi_evt_emit(sdev, evt);
2194			kfree(evt);
2195		}
2196	}
2197}
2198
2199/**
2200 * 	sdev_evt_send - send asserted event to uevent thread
2201 *	@sdev: scsi_device event occurred on
2202 *	@evt: event to send
2203 *
2204 *	Assert scsi device event asynchronously.
2205 */
2206void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2207{
2208	unsigned long flags;
2209
2210#if 0
2211	/* FIXME: currently this check eliminates all media change events
2212	 * for polled devices.  Need to update to discriminate between AN
2213	 * and polled events */
2214	if (!test_bit(evt->evt_type, sdev->supported_events)) {
2215		kfree(evt);
2216		return;
2217	}
2218#endif
2219
2220	spin_lock_irqsave(&sdev->list_lock, flags);
2221	list_add_tail(&evt->node, &sdev->event_list);
2222	schedule_work(&sdev->event_work);
2223	spin_unlock_irqrestore(&sdev->list_lock, flags);
2224}
2225EXPORT_SYMBOL_GPL(sdev_evt_send);
2226
2227/**
2228 * 	sdev_evt_alloc - allocate a new scsi event
2229 *	@evt_type: type of event to allocate
2230 *	@gfpflags: GFP flags for allocation
2231 *
2232 *	Allocates and returns a new scsi_event.
2233 */
2234struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2235				  gfp_t gfpflags)
2236{
2237	struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2238	if (!evt)
2239		return NULL;
2240
2241	evt->evt_type = evt_type;
2242	INIT_LIST_HEAD(&evt->node);
2243
2244	/* evt_type-specific initialization, if any */
2245	switch (evt_type) {
2246	case SDEV_EVT_MEDIA_CHANGE:
2247	default:
2248		/* do nothing */
2249		break;
2250	}
2251
2252	return evt;
2253}
2254EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2255
2256/**
2257 * 	sdev_evt_send_simple - send asserted event to uevent thread
2258 *	@sdev: scsi_device event occurred on
2259 *	@evt_type: type of event to send
2260 *	@gfpflags: GFP flags for allocation
2261 *
2262 *	Assert scsi device event asynchronously, given an event type.
2263 */
2264void sdev_evt_send_simple(struct scsi_device *sdev,
2265			  enum scsi_device_event evt_type, gfp_t gfpflags)
2266{
2267	struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2268	if (!evt) {
2269		sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2270			    evt_type);
2271		return;
2272	}
2273
2274	sdev_evt_send(sdev, evt);
2275}
2276EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2277
2278/**
2279 *	scsi_device_quiesce - Block user issued commands.
2280 *	@sdev:	scsi device to quiesce.
2281 *
2282 *	This works by trying to transition to the SDEV_QUIESCE state
2283 *	(which must be a legal transition).  When the device is in this
2284 *	state, only special requests will be accepted, all others will
2285 *	be deferred.  Since special requests may also be requeued requests,
2286 *	a successful return doesn't guarantee the device will be
2287 *	totally quiescent.
2288 *
2289 *	Must be called with user context, may sleep.
2290 *
2291 *	Returns zero if unsuccessful or an error if not.
2292 */
2293int
2294scsi_device_quiesce(struct scsi_device *sdev)
2295{
2296	int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2297	if (err)
2298		return err;
2299
2300	scsi_run_queue(sdev->request_queue);
2301	while (sdev->device_busy) {
2302		msleep_interruptible(200);
2303		scsi_run_queue(sdev->request_queue);
2304	}
2305	return 0;
2306}
2307EXPORT_SYMBOL(scsi_device_quiesce);
2308
2309/**
2310 *	scsi_device_resume - Restart user issued commands to a quiesced device.
2311 *	@sdev:	scsi device to resume.
2312 *
2313 *	Moves the device from quiesced back to running and restarts the
2314 *	queues.
2315 *
2316 *	Must be called with user context, may sleep.
2317 */
2318void
2319scsi_device_resume(struct scsi_device *sdev)
2320{
2321	if(scsi_device_set_state(sdev, SDEV_RUNNING))
2322		return;
2323	scsi_run_queue(sdev->request_queue);
2324}
2325EXPORT_SYMBOL(scsi_device_resume);
2326
2327static void
2328device_quiesce_fn(struct scsi_device *sdev, void *data)
2329{
2330	scsi_device_quiesce(sdev);
2331}
2332
2333void
2334scsi_target_quiesce(struct scsi_target *starget)
2335{
2336	starget_for_each_device(starget, NULL, device_quiesce_fn);
2337}
2338EXPORT_SYMBOL(scsi_target_quiesce);
2339
2340static void
2341device_resume_fn(struct scsi_device *sdev, void *data)
2342{
2343	scsi_device_resume(sdev);
2344}
2345
2346void
2347scsi_target_resume(struct scsi_target *starget)
2348{
2349	starget_for_each_device(starget, NULL, device_resume_fn);
2350}
2351EXPORT_SYMBOL(scsi_target_resume);
2352
2353/**
2354 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2355 * @sdev:	device to block
2356 *
2357 * Block request made by scsi lld's to temporarily stop all
2358 * scsi commands on the specified device.  Called from interrupt
2359 * or normal process context.
2360 *
2361 * Returns zero if successful or error if not
2362 *
2363 * Notes:
2364 *	This routine transitions the device to the SDEV_BLOCK state
2365 *	(which must be a legal transition).  When the device is in this
2366 *	state, all commands are deferred until the scsi lld reenables
2367 *	the device with scsi_device_unblock or device_block_tmo fires.
2368 *	This routine assumes the host_lock is held on entry.
2369 */
2370int
2371scsi_internal_device_block(struct scsi_device *sdev)
2372{
2373	struct request_queue *q = sdev->request_queue;
2374	unsigned long flags;
2375	int err = 0;
2376
2377	err = scsi_device_set_state(sdev, SDEV_BLOCK);
2378	if (err) {
2379		err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2380
2381		if (err)
2382			return err;
2383	}
2384
2385	/*
2386	 * The device has transitioned to SDEV_BLOCK.  Stop the
2387	 * block layer from calling the midlayer with this device's
2388	 * request queue.
2389	 */
2390	spin_lock_irqsave(q->queue_lock, flags);
2391	blk_stop_queue(q);
2392	spin_unlock_irqrestore(q->queue_lock, flags);
2393
2394	return 0;
2395}
2396EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2397
2398/**
2399 * scsi_internal_device_unblock - resume a device after a block request
2400 * @sdev:	device to resume
2401 *
2402 * Called by scsi lld's or the midlayer to restart the device queue
2403 * for the previously suspended scsi device.  Called from interrupt or
2404 * normal process context.
2405 *
2406 * Returns zero if successful or error if not.
2407 *
2408 * Notes:
2409 *	This routine transitions the device to the SDEV_RUNNING state
2410 *	(which must be a legal transition) allowing the midlayer to
2411 *	goose the queue for this device.  This routine assumes the
2412 *	host_lock is held upon entry.
2413 */
2414int
2415scsi_internal_device_unblock(struct scsi_device *sdev)
2416{
2417	struct request_queue *q = sdev->request_queue;
2418	unsigned long flags;
2419
2420	/*
2421	 * Try to transition the scsi device to SDEV_RUNNING
2422	 * and goose the device queue if successful.
2423	 */
2424	if (sdev->sdev_state == SDEV_BLOCK)
2425		sdev->sdev_state = SDEV_RUNNING;
2426	else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2427		sdev->sdev_state = SDEV_CREATED;
2428	else
2429		return -EINVAL;
2430
2431	spin_lock_irqsave(q->queue_lock, flags);
2432	blk_start_queue(q);
2433	spin_unlock_irqrestore(q->queue_lock, flags);
2434
2435	return 0;
2436}
2437EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2438
2439static void
2440device_block(struct scsi_device *sdev, void *data)
2441{
2442	scsi_internal_device_block(sdev);
2443}
2444
2445static int
2446target_block(struct device *dev, void *data)
2447{
2448	if (scsi_is_target_device(dev))
2449		starget_for_each_device(to_scsi_target(dev), NULL,
2450					device_block);
2451	return 0;
2452}
2453
2454void
2455scsi_target_block(struct device *dev)
2456{
2457	if (scsi_is_target_device(dev))
2458		starget_for_each_device(to_scsi_target(dev), NULL,
2459					device_block);
2460	else
2461		device_for_each_child(dev, NULL, target_block);
2462}
2463EXPORT_SYMBOL_GPL(scsi_target_block);
2464
2465static void
2466device_unblock(struct scsi_device *sdev, void *data)
2467{
2468	scsi_internal_device_unblock(sdev);
2469}
2470
2471static int
2472target_unblock(struct device *dev, void *data)
2473{
2474	if (scsi_is_target_device(dev))
2475		starget_for_each_device(to_scsi_target(dev), NULL,
2476					device_unblock);
2477	return 0;
2478}
2479
2480void
2481scsi_target_unblock(struct device *dev)
2482{
2483	if (scsi_is_target_device(dev))
2484		starget_for_each_device(to_scsi_target(dev), NULL,
2485					device_unblock);
2486	else
2487		device_for_each_child(dev, NULL, target_unblock);
2488}
2489EXPORT_SYMBOL_GPL(scsi_target_unblock);
2490
2491/**
2492 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2493 * @sgl:	scatter-gather list
2494 * @sg_count:	number of segments in sg
2495 * @offset:	offset in bytes into sg, on return offset into the mapped area
2496 * @len:	bytes to map, on return number of bytes mapped
2497 *
2498 * Returns virtual address of the start of the mapped page
2499 */
2500void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2501			  size_t *offset, size_t *len)
2502{
2503	int i;
2504	size_t sg_len = 0, len_complete = 0;
2505	struct scatterlist *sg;
2506	struct page *page;
2507
2508	WARN_ON(!irqs_disabled());
2509
2510	for_each_sg(sgl, sg, sg_count, i) {
2511		len_complete = sg_len; /* Complete sg-entries */
2512		sg_len += sg->length;
2513		if (sg_len > *offset)
2514			break;
2515	}
2516
2517	if (unlikely(i == sg_count)) {
2518		printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2519			"elements %d\n",
2520		       __func__, sg_len, *offset, sg_count);
2521		WARN_ON(1);
2522		return NULL;
2523	}
2524
2525	/* Offset starting from the beginning of first page in this sg-entry */
2526	*offset = *offset - len_complete + sg->offset;
2527
2528	/* Assumption: contiguous pages can be accessed as "page + i" */
2529	page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2530	*offset &= ~PAGE_MASK;
2531
2532	/* Bytes in this sg-entry from *offset to the end of the page */
2533	sg_len = PAGE_SIZE - *offset;
2534	if (*len > sg_len)
2535		*len = sg_len;
2536
2537	return kmap_atomic(page, KM_BIO_SRC_IRQ);
2538}
2539EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2540
2541/**
2542 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2543 * @virt:	virtual address to be unmapped
2544 */
2545void scsi_kunmap_atomic_sg(void *virt)
2546{
2547	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2548}
2549EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2550