scsi_lib.c revision beb40487508290f5d6565598c60a3f44261beef2
1/*
2 *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 *  SCSI queueing library.
5 *      Initial versions: Eric Youngdale (eric@andante.org).
6 *                        Based upon conversations with large numbers
7 *                        of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/completion.h>
13#include <linux/kernel.h>
14#include <linux/mempool.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19#include <linux/hardirq.h>
20
21#include <scsi/scsi.h>
22#include <scsi/scsi_cmnd.h>
23#include <scsi/scsi_dbg.h>
24#include <scsi/scsi_device.h>
25#include <scsi/scsi_driver.h>
26#include <scsi/scsi_eh.h>
27#include <scsi/scsi_host.h>
28
29#include "scsi_priv.h"
30#include "scsi_logging.h"
31
32
33#define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)
34#define SG_MEMPOOL_SIZE		32
35
36struct scsi_host_sg_pool {
37	size_t		size;
38	char		*name;
39	kmem_cache_t	*slab;
40	mempool_t	*pool;
41};
42
43#if (SCSI_MAX_PHYS_SEGMENTS < 32)
44#error SCSI_MAX_PHYS_SEGMENTS is too small
45#endif
46
47#define SP(x) { x, "sgpool-" #x }
48static struct scsi_host_sg_pool scsi_sg_pools[] = {
49	SP(8),
50	SP(16),
51	SP(32),
52#if (SCSI_MAX_PHYS_SEGMENTS > 32)
53	SP(64),
54#if (SCSI_MAX_PHYS_SEGMENTS > 64)
55	SP(128),
56#if (SCSI_MAX_PHYS_SEGMENTS > 128)
57	SP(256),
58#if (SCSI_MAX_PHYS_SEGMENTS > 256)
59#error SCSI_MAX_PHYS_SEGMENTS is too large
60#endif
61#endif
62#endif
63#endif
64};
65#undef SP
66
67static void scsi_run_queue(struct request_queue *q);
68
69/*
70 * Function:	scsi_unprep_request()
71 *
72 * Purpose:	Remove all preparation done for a request, including its
73 *		associated scsi_cmnd, so that it can be requeued.
74 *
75 * Arguments:	req	- request to unprepare
76 *
77 * Lock status:	Assumed that no locks are held upon entry.
78 *
79 * Returns:	Nothing.
80 */
81static void scsi_unprep_request(struct request *req)
82{
83	struct scsi_cmnd *cmd = req->special;
84
85	req->flags &= ~REQ_DONTPREP;
86	req->special = NULL;
87
88	scsi_put_command(cmd);
89}
90
91/*
92 * Function:    scsi_queue_insert()
93 *
94 * Purpose:     Insert a command in the midlevel queue.
95 *
96 * Arguments:   cmd    - command that we are adding to queue.
97 *              reason - why we are inserting command to queue.
98 *
99 * Lock status: Assumed that lock is not held upon entry.
100 *
101 * Returns:     Nothing.
102 *
103 * Notes:       We do this for one of two cases.  Either the host is busy
104 *              and it cannot accept any more commands for the time being,
105 *              or the device returned QUEUE_FULL and can accept no more
106 *              commands.
107 * Notes:       This could be called either from an interrupt context or a
108 *              normal process context.
109 */
110int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
111{
112	struct Scsi_Host *host = cmd->device->host;
113	struct scsi_device *device = cmd->device;
114	struct request_queue *q = device->request_queue;
115	unsigned long flags;
116
117	SCSI_LOG_MLQUEUE(1,
118		 printk("Inserting command %p into mlqueue\n", cmd));
119
120	/*
121	 * Set the appropriate busy bit for the device/host.
122	 *
123	 * If the host/device isn't busy, assume that something actually
124	 * completed, and that we should be able to queue a command now.
125	 *
126	 * Note that the prior mid-layer assumption that any host could
127	 * always queue at least one command is now broken.  The mid-layer
128	 * will implement a user specifiable stall (see
129	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
130	 * if a command is requeued with no other commands outstanding
131	 * either for the device or for the host.
132	 */
133	if (reason == SCSI_MLQUEUE_HOST_BUSY)
134		host->host_blocked = host->max_host_blocked;
135	else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
136		device->device_blocked = device->max_device_blocked;
137
138	/*
139	 * Decrement the counters, since these commands are no longer
140	 * active on the host/device.
141	 */
142	scsi_device_unbusy(device);
143
144	/*
145	 * Requeue this command.  It will go before all other commands
146	 * that are already in the queue.
147	 *
148	 * NOTE: there is magic here about the way the queue is plugged if
149	 * we have no outstanding commands.
150	 *
151	 * Although we *don't* plug the queue, we call the request
152	 * function.  The SCSI request function detects the blocked condition
153	 * and plugs the queue appropriately.
154         */
155	spin_lock_irqsave(q->queue_lock, flags);
156	blk_requeue_request(q, cmd->request);
157	spin_unlock_irqrestore(q->queue_lock, flags);
158
159	scsi_run_queue(q);
160
161	return 0;
162}
163
164/**
165 * scsi_execute - insert request and wait for the result
166 * @sdev:	scsi device
167 * @cmd:	scsi command
168 * @data_direction: data direction
169 * @buffer:	data buffer
170 * @bufflen:	len of buffer
171 * @sense:	optional sense buffer
172 * @timeout:	request timeout in seconds
173 * @retries:	number of times to retry request
174 * @flags:	or into request flags;
175 *
176 * returns the req->errors value which is the the scsi_cmnd result
177 * field.
178 **/
179int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
180		 int data_direction, void *buffer, unsigned bufflen,
181		 unsigned char *sense, int timeout, int retries, int flags)
182{
183	struct request *req;
184	int write = (data_direction == DMA_TO_DEVICE);
185	int ret = DRIVER_ERROR << 24;
186
187	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
188
189	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
190					buffer, bufflen, __GFP_WAIT))
191		goto out;
192
193	req->cmd_len = COMMAND_SIZE(cmd[0]);
194	memcpy(req->cmd, cmd, req->cmd_len);
195	req->sense = sense;
196	req->sense_len = 0;
197	req->retries = retries;
198	req->timeout = timeout;
199	req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
200
201	/*
202	 * head injection *required* here otherwise quiesce won't work
203	 */
204	blk_execute_rq(req->q, NULL, req, 1);
205
206	ret = req->errors;
207 out:
208	blk_put_request(req);
209
210	return ret;
211}
212EXPORT_SYMBOL(scsi_execute);
213
214
215int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
216		     int data_direction, void *buffer, unsigned bufflen,
217		     struct scsi_sense_hdr *sshdr, int timeout, int retries)
218{
219	char *sense = NULL;
220	int result;
221
222	if (sshdr) {
223		sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
224		if (!sense)
225			return DRIVER_ERROR << 24;
226	}
227	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
228			      sense, timeout, retries, 0);
229	if (sshdr)
230		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
231
232	kfree(sense);
233	return result;
234}
235EXPORT_SYMBOL(scsi_execute_req);
236
237struct scsi_io_context {
238	void *data;
239	void (*done)(void *data, char *sense, int result, int resid);
240	char sense[SCSI_SENSE_BUFFERSIZE];
241};
242
243static kmem_cache_t *scsi_io_context_cache;
244
245static void scsi_end_async(struct request *req, int uptodate)
246{
247	struct scsi_io_context *sioc = req->end_io_data;
248
249	if (sioc->done)
250		sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
251
252	kmem_cache_free(scsi_io_context_cache, sioc);
253	__blk_put_request(req->q, req);
254}
255
256static int scsi_merge_bio(struct request *rq, struct bio *bio)
257{
258	struct request_queue *q = rq->q;
259
260	bio->bi_flags &= ~(1 << BIO_SEG_VALID);
261	if (rq_data_dir(rq) == WRITE)
262		bio->bi_rw |= (1 << BIO_RW);
263	blk_queue_bounce(q, &bio);
264
265	if (!rq->bio)
266		blk_rq_bio_prep(q, rq, bio);
267	else if (!q->back_merge_fn(q, rq, bio))
268		return -EINVAL;
269	else {
270		rq->biotail->bi_next = bio;
271		rq->biotail = bio;
272		rq->hard_nr_sectors += bio_sectors(bio);
273		rq->nr_sectors = rq->hard_nr_sectors;
274	}
275
276	return 0;
277}
278
279static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
280{
281	if (bio->bi_size)
282		return 1;
283
284	bio_put(bio);
285	return 0;
286}
287
288/**
289 * scsi_req_map_sg - map a scatterlist into a request
290 * @rq:		request to fill
291 * @sg:		scatterlist
292 * @nsegs:	number of elements
293 * @bufflen:	len of buffer
294 * @gfp:	memory allocation flags
295 *
296 * scsi_req_map_sg maps a scatterlist into a request so that the
297 * request can be sent to the block layer. We do not trust the scatterlist
298 * sent to use, as some ULDs use that struct to only organize the pages.
299 */
300static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
301			   int nsegs, unsigned bufflen, gfp_t gfp)
302{
303	struct request_queue *q = rq->q;
304	int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
305	unsigned int data_len = 0, len, bytes, off;
306	struct page *page;
307	struct bio *bio = NULL;
308	int i, err, nr_vecs = 0;
309
310	for (i = 0; i < nsegs; i++) {
311		page = sgl[i].page;
312		off = sgl[i].offset;
313		len = sgl[i].length;
314		data_len += len;
315
316		while (len > 0) {
317			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
318
319			if (!bio) {
320				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
321				nr_pages -= nr_vecs;
322
323				bio = bio_alloc(gfp, nr_vecs);
324				if (!bio) {
325					err = -ENOMEM;
326					goto free_bios;
327				}
328				bio->bi_end_io = scsi_bi_endio;
329			}
330
331			if (bio_add_pc_page(q, bio, page, bytes, off) !=
332			    bytes) {
333				bio_put(bio);
334				err = -EINVAL;
335				goto free_bios;
336			}
337
338			if (bio->bi_vcnt >= nr_vecs) {
339				err = scsi_merge_bio(rq, bio);
340				if (err) {
341					bio_endio(bio, bio->bi_size, 0);
342					goto free_bios;
343				}
344				bio = NULL;
345			}
346
347			page++;
348			len -= bytes;
349			off = 0;
350		}
351	}
352
353	rq->buffer = rq->data = NULL;
354	rq->data_len = data_len;
355	return 0;
356
357free_bios:
358	while ((bio = rq->bio) != NULL) {
359		rq->bio = bio->bi_next;
360		/*
361		 * call endio instead of bio_put incase it was bounced
362		 */
363		bio_endio(bio, bio->bi_size, 0);
364	}
365
366	return err;
367}
368
369/**
370 * scsi_execute_async - insert request
371 * @sdev:	scsi device
372 * @cmd:	scsi command
373 * @cmd_len:	length of scsi cdb
374 * @data_direction: data direction
375 * @buffer:	data buffer (this can be a kernel buffer or scatterlist)
376 * @bufflen:	len of buffer
377 * @use_sg:	if buffer is a scatterlist this is the number of elements
378 * @timeout:	request timeout in seconds
379 * @retries:	number of times to retry request
380 * @flags:	or into request flags
381 **/
382int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
383		       int cmd_len, int data_direction, void *buffer, unsigned bufflen,
384		       int use_sg, int timeout, int retries, void *privdata,
385		       void (*done)(void *, char *, int, int), gfp_t gfp)
386{
387	struct request *req;
388	struct scsi_io_context *sioc;
389	int err = 0;
390	int write = (data_direction == DMA_TO_DEVICE);
391
392	sioc = kmem_cache_alloc(scsi_io_context_cache, gfp);
393	if (!sioc)
394		return DRIVER_ERROR << 24;
395	memset(sioc, 0, sizeof(*sioc));
396
397	req = blk_get_request(sdev->request_queue, write, gfp);
398	if (!req)
399		goto free_sense;
400	req->flags |= REQ_BLOCK_PC | REQ_QUIET;
401
402	if (use_sg)
403		err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
404	else if (bufflen)
405		err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
406
407	if (err)
408		goto free_req;
409
410	req->cmd_len = cmd_len;
411	memcpy(req->cmd, cmd, req->cmd_len);
412	req->sense = sioc->sense;
413	req->sense_len = 0;
414	req->timeout = timeout;
415	req->retries = retries;
416	req->end_io_data = sioc;
417
418	sioc->data = privdata;
419	sioc->done = done;
420
421	blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
422	return 0;
423
424free_req:
425	blk_put_request(req);
426free_sense:
427	kfree(sioc);
428	return DRIVER_ERROR << 24;
429}
430EXPORT_SYMBOL_GPL(scsi_execute_async);
431
432/*
433 * Function:    scsi_init_cmd_errh()
434 *
435 * Purpose:     Initialize cmd fields related to error handling.
436 *
437 * Arguments:   cmd	- command that is ready to be queued.
438 *
439 * Returns:     Nothing
440 *
441 * Notes:       This function has the job of initializing a number of
442 *              fields related to error handling.   Typically this will
443 *              be called once for each command, as required.
444 */
445static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
446{
447	cmd->serial_number = 0;
448
449	memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
450
451	if (cmd->cmd_len == 0)
452		cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
453
454	/*
455	 * We need saved copies of a number of fields - this is because
456	 * error handling may need to overwrite these with different values
457	 * to run different commands, and once error handling is complete,
458	 * we will need to restore these values prior to running the actual
459	 * command.
460	 */
461	cmd->old_use_sg = cmd->use_sg;
462	cmd->old_cmd_len = cmd->cmd_len;
463	cmd->sc_old_data_direction = cmd->sc_data_direction;
464	cmd->old_underflow = cmd->underflow;
465	memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
466	cmd->buffer = cmd->request_buffer;
467	cmd->bufflen = cmd->request_bufflen;
468
469	return 1;
470}
471
472/*
473 * Function:   scsi_setup_cmd_retry()
474 *
475 * Purpose:    Restore the command state for a retry
476 *
477 * Arguments:  cmd	- command to be restored
478 *
479 * Returns:    Nothing
480 *
481 * Notes:      Immediately prior to retrying a command, we need
482 *             to restore certain fields that we saved above.
483 */
484void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
485{
486	memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
487	cmd->request_buffer = cmd->buffer;
488	cmd->request_bufflen = cmd->bufflen;
489	cmd->use_sg = cmd->old_use_sg;
490	cmd->cmd_len = cmd->old_cmd_len;
491	cmd->sc_data_direction = cmd->sc_old_data_direction;
492	cmd->underflow = cmd->old_underflow;
493}
494
495void scsi_device_unbusy(struct scsi_device *sdev)
496{
497	struct Scsi_Host *shost = sdev->host;
498	unsigned long flags;
499
500	spin_lock_irqsave(shost->host_lock, flags);
501	shost->host_busy--;
502	if (unlikely(scsi_host_in_recovery(shost) &&
503		     shost->host_failed))
504		scsi_eh_wakeup(shost);
505	spin_unlock(shost->host_lock);
506	spin_lock(sdev->request_queue->queue_lock);
507	sdev->device_busy--;
508	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
509}
510
511/*
512 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
513 * and call blk_run_queue for all the scsi_devices on the target -
514 * including current_sdev first.
515 *
516 * Called with *no* scsi locks held.
517 */
518static void scsi_single_lun_run(struct scsi_device *current_sdev)
519{
520	struct Scsi_Host *shost = current_sdev->host;
521	struct scsi_device *sdev, *tmp;
522	struct scsi_target *starget = scsi_target(current_sdev);
523	unsigned long flags;
524
525	spin_lock_irqsave(shost->host_lock, flags);
526	starget->starget_sdev_user = NULL;
527	spin_unlock_irqrestore(shost->host_lock, flags);
528
529	/*
530	 * Call blk_run_queue for all LUNs on the target, starting with
531	 * current_sdev. We race with others (to set starget_sdev_user),
532	 * but in most cases, we will be first. Ideally, each LU on the
533	 * target would get some limited time or requests on the target.
534	 */
535	blk_run_queue(current_sdev->request_queue);
536
537	spin_lock_irqsave(shost->host_lock, flags);
538	if (starget->starget_sdev_user)
539		goto out;
540	list_for_each_entry_safe(sdev, tmp, &starget->devices,
541			same_target_siblings) {
542		if (sdev == current_sdev)
543			continue;
544		if (scsi_device_get(sdev))
545			continue;
546
547		spin_unlock_irqrestore(shost->host_lock, flags);
548		blk_run_queue(sdev->request_queue);
549		spin_lock_irqsave(shost->host_lock, flags);
550
551		scsi_device_put(sdev);
552	}
553 out:
554	spin_unlock_irqrestore(shost->host_lock, flags);
555}
556
557/*
558 * Function:	scsi_run_queue()
559 *
560 * Purpose:	Select a proper request queue to serve next
561 *
562 * Arguments:	q	- last request's queue
563 *
564 * Returns:     Nothing
565 *
566 * Notes:	The previous command was completely finished, start
567 *		a new one if possible.
568 */
569static void scsi_run_queue(struct request_queue *q)
570{
571	struct scsi_device *sdev = q->queuedata;
572	struct Scsi_Host *shost = sdev->host;
573	unsigned long flags;
574
575	if (sdev->single_lun)
576		scsi_single_lun_run(sdev);
577
578	spin_lock_irqsave(shost->host_lock, flags);
579	while (!list_empty(&shost->starved_list) &&
580	       !shost->host_blocked && !shost->host_self_blocked &&
581		!((shost->can_queue > 0) &&
582		  (shost->host_busy >= shost->can_queue))) {
583		/*
584		 * As long as shost is accepting commands and we have
585		 * starved queues, call blk_run_queue. scsi_request_fn
586		 * drops the queue_lock and can add us back to the
587		 * starved_list.
588		 *
589		 * host_lock protects the starved_list and starved_entry.
590		 * scsi_request_fn must get the host_lock before checking
591		 * or modifying starved_list or starved_entry.
592		 */
593		sdev = list_entry(shost->starved_list.next,
594					  struct scsi_device, starved_entry);
595		list_del_init(&sdev->starved_entry);
596		spin_unlock_irqrestore(shost->host_lock, flags);
597
598		blk_run_queue(sdev->request_queue);
599
600		spin_lock_irqsave(shost->host_lock, flags);
601		if (unlikely(!list_empty(&sdev->starved_entry)))
602			/*
603			 * sdev lost a race, and was put back on the
604			 * starved list. This is unlikely but without this
605			 * in theory we could loop forever.
606			 */
607			break;
608	}
609	spin_unlock_irqrestore(shost->host_lock, flags);
610
611	blk_run_queue(q);
612}
613
614/*
615 * Function:	scsi_requeue_command()
616 *
617 * Purpose:	Handle post-processing of completed commands.
618 *
619 * Arguments:	q	- queue to operate on
620 *		cmd	- command that may need to be requeued.
621 *
622 * Returns:	Nothing
623 *
624 * Notes:	After command completion, there may be blocks left
625 *		over which weren't finished by the previous command
626 *		this can be for a number of reasons - the main one is
627 *		I/O errors in the middle of the request, in which case
628 *		we need to request the blocks that come after the bad
629 *		sector.
630 * Notes:	Upon return, cmd is a stale pointer.
631 */
632static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
633{
634	struct request *req = cmd->request;
635	unsigned long flags;
636
637	scsi_unprep_request(req);
638	spin_lock_irqsave(q->queue_lock, flags);
639	blk_requeue_request(q, req);
640	spin_unlock_irqrestore(q->queue_lock, flags);
641
642	scsi_run_queue(q);
643}
644
645void scsi_next_command(struct scsi_cmnd *cmd)
646{
647	struct scsi_device *sdev = cmd->device;
648	struct request_queue *q = sdev->request_queue;
649
650	/* need to hold a reference on the device before we let go of the cmd */
651	get_device(&sdev->sdev_gendev);
652
653	scsi_put_command(cmd);
654	scsi_run_queue(q);
655
656	/* ok to remove device now */
657	put_device(&sdev->sdev_gendev);
658}
659
660void scsi_run_host_queues(struct Scsi_Host *shost)
661{
662	struct scsi_device *sdev;
663
664	shost_for_each_device(sdev, shost)
665		scsi_run_queue(sdev->request_queue);
666}
667
668/*
669 * Function:    scsi_end_request()
670 *
671 * Purpose:     Post-processing of completed commands (usually invoked at end
672 *		of upper level post-processing and scsi_io_completion).
673 *
674 * Arguments:   cmd	 - command that is complete.
675 *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
676 *              bytes    - number of bytes of completed I/O
677 *		requeue  - indicates whether we should requeue leftovers.
678 *
679 * Lock status: Assumed that lock is not held upon entry.
680 *
681 * Returns:     cmd if requeue required, NULL otherwise.
682 *
683 * Notes:       This is called for block device requests in order to
684 *              mark some number of sectors as complete.
685 *
686 *		We are guaranteeing that the request queue will be goosed
687 *		at some point during this call.
688 * Notes:	If cmd was requeued, upon return it will be a stale pointer.
689 */
690static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
691					  int bytes, int requeue)
692{
693	request_queue_t *q = cmd->device->request_queue;
694	struct request *req = cmd->request;
695	unsigned long flags;
696
697	/*
698	 * If there are blocks left over at the end, set up the command
699	 * to queue the remainder of them.
700	 */
701	if (end_that_request_chunk(req, uptodate, bytes)) {
702		int leftover = (req->hard_nr_sectors << 9);
703
704		if (blk_pc_request(req))
705			leftover = req->data_len;
706
707		/* kill remainder if no retrys */
708		if (!uptodate && blk_noretry_request(req))
709			end_that_request_chunk(req, 0, leftover);
710		else {
711			if (requeue) {
712				/*
713				 * Bleah.  Leftovers again.  Stick the
714				 * leftovers in the front of the
715				 * queue, and goose the queue again.
716				 */
717				scsi_requeue_command(q, cmd);
718				cmd = NULL;
719			}
720			return cmd;
721		}
722	}
723
724	add_disk_randomness(req->rq_disk);
725
726	spin_lock_irqsave(q->queue_lock, flags);
727	if (blk_rq_tagged(req))
728		blk_queue_end_tag(q, req);
729	end_that_request_last(req, uptodate);
730	spin_unlock_irqrestore(q->queue_lock, flags);
731
732	/*
733	 * This will goose the queue request function at the end, so we don't
734	 * need to worry about launching another command.
735	 */
736	scsi_next_command(cmd);
737	return NULL;
738}
739
740static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
741{
742	struct scsi_host_sg_pool *sgp;
743	struct scatterlist *sgl;
744
745	BUG_ON(!cmd->use_sg);
746
747	switch (cmd->use_sg) {
748	case 1 ... 8:
749		cmd->sglist_len = 0;
750		break;
751	case 9 ... 16:
752		cmd->sglist_len = 1;
753		break;
754	case 17 ... 32:
755		cmd->sglist_len = 2;
756		break;
757#if (SCSI_MAX_PHYS_SEGMENTS > 32)
758	case 33 ... 64:
759		cmd->sglist_len = 3;
760		break;
761#if (SCSI_MAX_PHYS_SEGMENTS > 64)
762	case 65 ... 128:
763		cmd->sglist_len = 4;
764		break;
765#if (SCSI_MAX_PHYS_SEGMENTS  > 128)
766	case 129 ... 256:
767		cmd->sglist_len = 5;
768		break;
769#endif
770#endif
771#endif
772	default:
773		return NULL;
774	}
775
776	sgp = scsi_sg_pools + cmd->sglist_len;
777	sgl = mempool_alloc(sgp->pool, gfp_mask);
778	return sgl;
779}
780
781static void scsi_free_sgtable(struct scatterlist *sgl, int index)
782{
783	struct scsi_host_sg_pool *sgp;
784
785	BUG_ON(index >= SG_MEMPOOL_NR);
786
787	sgp = scsi_sg_pools + index;
788	mempool_free(sgl, sgp->pool);
789}
790
791/*
792 * Function:    scsi_release_buffers()
793 *
794 * Purpose:     Completion processing for block device I/O requests.
795 *
796 * Arguments:   cmd	- command that we are bailing.
797 *
798 * Lock status: Assumed that no lock is held upon entry.
799 *
800 * Returns:     Nothing
801 *
802 * Notes:       In the event that an upper level driver rejects a
803 *		command, we must release resources allocated during
804 *		the __init_io() function.  Primarily this would involve
805 *		the scatter-gather table, and potentially any bounce
806 *		buffers.
807 */
808static void scsi_release_buffers(struct scsi_cmnd *cmd)
809{
810	struct request *req = cmd->request;
811
812	/*
813	 * Free up any indirection buffers we allocated for DMA purposes.
814	 */
815	if (cmd->use_sg)
816		scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
817	else if (cmd->request_buffer != req->buffer)
818		kfree(cmd->request_buffer);
819
820	/*
821	 * Zero these out.  They now point to freed memory, and it is
822	 * dangerous to hang onto the pointers.
823	 */
824	cmd->buffer  = NULL;
825	cmd->bufflen = 0;
826	cmd->request_buffer = NULL;
827	cmd->request_bufflen = 0;
828}
829
830/*
831 * Function:    scsi_io_completion()
832 *
833 * Purpose:     Completion processing for block device I/O requests.
834 *
835 * Arguments:   cmd   - command that is finished.
836 *
837 * Lock status: Assumed that no lock is held upon entry.
838 *
839 * Returns:     Nothing
840 *
841 * Notes:       This function is matched in terms of capabilities to
842 *              the function that created the scatter-gather list.
843 *              In other words, if there are no bounce buffers
844 *              (the normal case for most drivers), we don't need
845 *              the logic to deal with cleaning up afterwards.
846 *
847 *		We must do one of several things here:
848 *
849 *		a) Call scsi_end_request.  This will finish off the
850 *		   specified number of sectors.  If we are done, the
851 *		   command block will be released, and the queue
852 *		   function will be goosed.  If we are not done, then
853 *		   scsi_end_request will directly goose the queue.
854 *
855 *		b) We can just use scsi_requeue_command() here.  This would
856 *		   be used if we just wanted to retry, for example.
857 */
858void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
859			unsigned int block_bytes)
860{
861	int result = cmd->result;
862	int this_count = cmd->bufflen;
863	request_queue_t *q = cmd->device->request_queue;
864	struct request *req = cmd->request;
865	int clear_errors = 1;
866	struct scsi_sense_hdr sshdr;
867	int sense_valid = 0;
868	int sense_deferred = 0;
869
870	/*
871	 * Free up any indirection buffers we allocated for DMA purposes.
872	 * For the case of a READ, we need to copy the data out of the
873	 * bounce buffer and into the real buffer.
874	 */
875	if (cmd->use_sg)
876		scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
877	else if (cmd->buffer != req->buffer) {
878		if (rq_data_dir(req) == READ) {
879			unsigned long flags;
880			char *to = bio_kmap_irq(req->bio, &flags);
881			memcpy(to, cmd->buffer, cmd->bufflen);
882			bio_kunmap_irq(to, &flags);
883		}
884		kfree(cmd->buffer);
885	}
886
887	if (result) {
888		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
889		if (sense_valid)
890			sense_deferred = scsi_sense_is_deferred(&sshdr);
891	}
892	if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
893		req->errors = result;
894		if (result) {
895			clear_errors = 0;
896			if (sense_valid && req->sense) {
897				/*
898				 * SG_IO wants current and deferred errors
899				 */
900				int len = 8 + cmd->sense_buffer[7];
901
902				if (len > SCSI_SENSE_BUFFERSIZE)
903					len = SCSI_SENSE_BUFFERSIZE;
904				memcpy(req->sense, cmd->sense_buffer,  len);
905				req->sense_len = len;
906			}
907		} else
908			req->data_len = cmd->resid;
909	}
910
911	/*
912	 * Zero these out.  They now point to freed memory, and it is
913	 * dangerous to hang onto the pointers.
914	 */
915	cmd->buffer  = NULL;
916	cmd->bufflen = 0;
917	cmd->request_buffer = NULL;
918	cmd->request_bufflen = 0;
919
920	/*
921	 * Next deal with any sectors which we were able to correctly
922	 * handle.
923	 */
924	if (good_bytes >= 0) {
925		SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
926					      req->nr_sectors, good_bytes));
927		SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
928
929		if (clear_errors)
930			req->errors = 0;
931		/*
932		 * If multiple sectors are requested in one buffer, then
933		 * they will have been finished off by the first command.
934		 * If not, then we have a multi-buffer command.
935		 *
936		 * If block_bytes != 0, it means we had a medium error
937		 * of some sort, and that we want to mark some number of
938		 * sectors as not uptodate.  Thus we want to inhibit
939		 * requeueing right here - we will requeue down below
940		 * when we handle the bad sectors.
941		 */
942
943		/*
944		 * If the command completed without error, then either
945		 * finish off the rest of the command, or start a new one.
946		 */
947		if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
948			return;
949	}
950	/*
951	 * Now, if we were good little boys and girls, Santa left us a request
952	 * sense buffer.  We can extract information from this, so we
953	 * can choose a block to remap, etc.
954	 */
955	if (sense_valid && !sense_deferred) {
956		switch (sshdr.sense_key) {
957		case UNIT_ATTENTION:
958			if (cmd->device->removable) {
959				/* detected disc change.  set a bit
960				 * and quietly refuse further access.
961				 */
962				cmd->device->changed = 1;
963				scsi_end_request(cmd, 0,
964						this_count, 1);
965				return;
966			} else {
967				/*
968				* Must have been a power glitch, or a
969				* bus reset.  Could not have been a
970				* media change, so we just retry the
971				* request and see what happens.
972				*/
973				scsi_requeue_command(q, cmd);
974				return;
975			}
976			break;
977		case ILLEGAL_REQUEST:
978			/*
979		 	* If we had an ILLEGAL REQUEST returned, then we may
980		 	* have performed an unsupported command.  The only
981		 	* thing this should be would be a ten byte read where
982			* only a six byte read was supported.  Also, on a
983			* system where READ CAPACITY failed, we may have read
984			* past the end of the disk.
985		 	*/
986			if ((cmd->device->use_10_for_rw &&
987			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
988			    (cmd->cmnd[0] == READ_10 ||
989			     cmd->cmnd[0] == WRITE_10)) {
990				cmd->device->use_10_for_rw = 0;
991				/*
992				 * This will cause a retry with a 6-byte
993				 * command.
994				 */
995				scsi_requeue_command(q, cmd);
996				result = 0;
997			} else {
998				scsi_end_request(cmd, 0, this_count, 1);
999				return;
1000			}
1001			break;
1002		case NOT_READY:
1003			/*
1004			 * If the device is in the process of becoming
1005			 * ready, or has a temporary blockage, retry.
1006			 */
1007			if (sshdr.asc == 0x04) {
1008				switch (sshdr.ascq) {
1009				case 0x01: /* becoming ready */
1010				case 0x04: /* format in progress */
1011				case 0x05: /* rebuild in progress */
1012				case 0x06: /* recalculation in progress */
1013				case 0x07: /* operation in progress */
1014				case 0x08: /* Long write in progress */
1015				case 0x09: /* self test in progress */
1016					scsi_requeue_command(q, cmd);
1017					return;
1018				default:
1019					break;
1020				}
1021			}
1022			if (!(req->flags & REQ_QUIET)) {
1023				scmd_printk(KERN_INFO, cmd,
1024					   "Device not ready: ");
1025				scsi_print_sense_hdr("", &sshdr);
1026			}
1027			scsi_end_request(cmd, 0, this_count, 1);
1028			return;
1029		case VOLUME_OVERFLOW:
1030			if (!(req->flags & REQ_QUIET)) {
1031				scmd_printk(KERN_INFO, cmd,
1032					   "Volume overflow, CDB: ");
1033				__scsi_print_command(cmd->data_cmnd);
1034				scsi_print_sense("", cmd);
1035			}
1036			scsi_end_request(cmd, 0, block_bytes, 1);
1037			return;
1038		default:
1039			break;
1040		}
1041	}			/* driver byte != 0 */
1042	if (host_byte(result) == DID_RESET) {
1043		/*
1044		 * Third party bus reset or reset for error
1045		 * recovery reasons.  Just retry the request
1046		 * and see what happens.
1047		 */
1048		scsi_requeue_command(q, cmd);
1049		return;
1050	}
1051	if (result) {
1052		if (!(req->flags & REQ_QUIET)) {
1053			scmd_printk(KERN_INFO, cmd,
1054				   "SCSI error: return code = 0x%x\n", result);
1055
1056			if (driver_byte(result) & DRIVER_SENSE)
1057				scsi_print_sense("", cmd);
1058		}
1059		/*
1060		 * Mark a single buffer as not uptodate.  Queue the remainder.
1061		 * We sometimes get this cruft in the event that a medium error
1062		 * isn't properly reported.
1063		 */
1064		block_bytes = req->hard_cur_sectors << 9;
1065		if (!block_bytes)
1066			block_bytes = req->data_len;
1067		scsi_end_request(cmd, 0, block_bytes, 1);
1068	}
1069}
1070EXPORT_SYMBOL(scsi_io_completion);
1071
1072/*
1073 * Function:    scsi_init_io()
1074 *
1075 * Purpose:     SCSI I/O initialize function.
1076 *
1077 * Arguments:   cmd   - Command descriptor we wish to initialize
1078 *
1079 * Returns:     0 on success
1080 *		BLKPREP_DEFER if the failure is retryable
1081 *		BLKPREP_KILL if the failure is fatal
1082 */
1083static int scsi_init_io(struct scsi_cmnd *cmd)
1084{
1085	struct request     *req = cmd->request;
1086	struct scatterlist *sgpnt;
1087	int		   count;
1088
1089	/*
1090	 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
1091	 */
1092	if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
1093		cmd->request_bufflen = req->data_len;
1094		cmd->request_buffer = req->data;
1095		req->buffer = req->data;
1096		cmd->use_sg = 0;
1097		return 0;
1098	}
1099
1100	/*
1101	 * we used to not use scatter-gather for single segment request,
1102	 * but now we do (it makes highmem I/O easier to support without
1103	 * kmapping pages)
1104	 */
1105	cmd->use_sg = req->nr_phys_segments;
1106
1107	/*
1108	 * if sg table allocation fails, requeue request later.
1109	 */
1110	sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1111	if (unlikely(!sgpnt)) {
1112		scsi_unprep_request(req);
1113		return BLKPREP_DEFER;
1114	}
1115
1116	cmd->request_buffer = (char *) sgpnt;
1117	cmd->request_bufflen = req->nr_sectors << 9;
1118	if (blk_pc_request(req))
1119		cmd->request_bufflen = req->data_len;
1120	req->buffer = NULL;
1121
1122	/*
1123	 * Next, walk the list, and fill in the addresses and sizes of
1124	 * each segment.
1125	 */
1126	count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1127
1128	/*
1129	 * mapped well, send it off
1130	 */
1131	if (likely(count <= cmd->use_sg)) {
1132		cmd->use_sg = count;
1133		return 0;
1134	}
1135
1136	printk(KERN_ERR "Incorrect number of segments after building list\n");
1137	printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1138	printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1139			req->current_nr_sectors);
1140
1141	/* release the command and kill it */
1142	scsi_release_buffers(cmd);
1143	scsi_put_command(cmd);
1144	return BLKPREP_KILL;
1145}
1146
1147static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1148			       sector_t *error_sector)
1149{
1150	struct scsi_device *sdev = q->queuedata;
1151	struct scsi_driver *drv;
1152
1153	if (sdev->sdev_state != SDEV_RUNNING)
1154		return -ENXIO;
1155
1156	drv = *(struct scsi_driver **) disk->private_data;
1157	if (drv->issue_flush)
1158		return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1159
1160	return -EOPNOTSUPP;
1161}
1162
1163static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1164{
1165	BUG_ON(!blk_pc_request(cmd->request));
1166	/*
1167	 * This will complete the whole command with uptodate=1 so
1168	 * as far as the block layer is concerned the command completed
1169	 * successfully. Since this is a REQ_BLOCK_PC command the
1170	 * caller should check the request's errors value
1171	 */
1172	scsi_io_completion(cmd, cmd->bufflen, 0);
1173}
1174
1175static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
1176{
1177	struct request *req = cmd->request;
1178
1179	BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1180	memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1181	cmd->cmd_len = req->cmd_len;
1182	if (!req->data_len)
1183		cmd->sc_data_direction = DMA_NONE;
1184	else if (rq_data_dir(req) == WRITE)
1185		cmd->sc_data_direction = DMA_TO_DEVICE;
1186	else
1187		cmd->sc_data_direction = DMA_FROM_DEVICE;
1188
1189	cmd->transfersize = req->data_len;
1190	cmd->allowed = req->retries;
1191	cmd->timeout_per_command = req->timeout;
1192	cmd->done = scsi_blk_pc_done;
1193}
1194
1195static int scsi_prep_fn(struct request_queue *q, struct request *req)
1196{
1197	struct scsi_device *sdev = q->queuedata;
1198	struct scsi_cmnd *cmd;
1199	int specials_only = 0;
1200
1201	/*
1202	 * Just check to see if the device is online.  If it isn't, we
1203	 * refuse to process any commands.  The device must be brought
1204	 * online before trying any recovery commands
1205	 */
1206	if (unlikely(!scsi_device_online(sdev))) {
1207		sdev_printk(KERN_ERR, sdev,
1208			    "rejecting I/O to offline device\n");
1209		goto kill;
1210	}
1211	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1212		/* OK, we're not in a running state don't prep
1213		 * user commands */
1214		if (sdev->sdev_state == SDEV_DEL) {
1215			/* Device is fully deleted, no commands
1216			 * at all allowed down */
1217			sdev_printk(KERN_ERR, sdev,
1218				    "rejecting I/O to dead device\n");
1219			goto kill;
1220		}
1221		/* OK, we only allow special commands (i.e. not
1222		 * user initiated ones */
1223		specials_only = sdev->sdev_state;
1224	}
1225
1226	/*
1227	 * Find the actual device driver associated with this command.
1228	 * The SPECIAL requests are things like character device or
1229	 * ioctls, which did not originate from ll_rw_blk.  Note that
1230	 * the special field is also used to indicate the cmd for
1231	 * the remainder of a partially fulfilled request that can
1232	 * come up when there is a medium error.  We have to treat
1233	 * these two cases differently.  We differentiate by looking
1234	 * at request->cmd, as this tells us the real story.
1235	 */
1236	if (req->flags & REQ_SPECIAL && req->special) {
1237		cmd = req->special;
1238	} else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1239
1240		if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1241			if(specials_only == SDEV_QUIESCE ||
1242					specials_only == SDEV_BLOCK)
1243				goto defer;
1244
1245			sdev_printk(KERN_ERR, sdev,
1246				    "rejecting I/O to device being removed\n");
1247			goto kill;
1248		}
1249
1250
1251		/*
1252		 * Now try and find a command block that we can use.
1253		 */
1254		if (!req->special) {
1255			cmd = scsi_get_command(sdev, GFP_ATOMIC);
1256			if (unlikely(!cmd))
1257				goto defer;
1258		} else
1259			cmd = req->special;
1260
1261		/* pull a tag out of the request if we have one */
1262		cmd->tag = req->tag;
1263	} else {
1264		blk_dump_rq_flags(req, "SCSI bad req");
1265		goto kill;
1266	}
1267
1268	/* note the overloading of req->special.  When the tag
1269	 * is active it always means cmd.  If the tag goes
1270	 * back for re-queueing, it may be reset */
1271	req->special = cmd;
1272	cmd->request = req;
1273
1274	/*
1275	 * FIXME: drop the lock here because the functions below
1276	 * expect to be called without the queue lock held.  Also,
1277	 * previously, we dequeued the request before dropping the
1278	 * lock.  We hope REQ_STARTED prevents anything untoward from
1279	 * happening now.
1280	 */
1281	if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1282		int ret;
1283
1284		/*
1285		 * This will do a couple of things:
1286		 *  1) Fill in the actual SCSI command.
1287		 *  2) Fill in any other upper-level specific fields
1288		 * (timeout).
1289		 *
1290		 * If this returns 0, it means that the request failed
1291		 * (reading past end of disk, reading offline device,
1292		 * etc).   This won't actually talk to the device, but
1293		 * some kinds of consistency checking may cause the
1294		 * request to be rejected immediately.
1295		 */
1296
1297		/*
1298		 * This sets up the scatter-gather table (allocating if
1299		 * required).
1300		 */
1301		ret = scsi_init_io(cmd);
1302		switch(ret) {
1303			/* For BLKPREP_KILL/DEFER the cmd was released */
1304		case BLKPREP_KILL:
1305			goto kill;
1306		case BLKPREP_DEFER:
1307			goto defer;
1308		}
1309
1310		/*
1311		 * Initialize the actual SCSI command for this request.
1312		 */
1313		if (req->flags & REQ_BLOCK_PC) {
1314			scsi_setup_blk_pc_cmnd(cmd);
1315		} else if (req->rq_disk) {
1316			struct scsi_driver *drv;
1317
1318			drv = *(struct scsi_driver **)req->rq_disk->private_data;
1319			if (unlikely(!drv->init_command(cmd))) {
1320				scsi_release_buffers(cmd);
1321				scsi_put_command(cmd);
1322				goto kill;
1323			}
1324		}
1325	}
1326
1327	/*
1328	 * The request is now prepped, no need to come back here
1329	 */
1330	req->flags |= REQ_DONTPREP;
1331	return BLKPREP_OK;
1332
1333 defer:
1334	/* If we defer, the elv_next_request() returns NULL, but the
1335	 * queue must be restarted, so we plug here if no returning
1336	 * command will automatically do that. */
1337	if (sdev->device_busy == 0)
1338		blk_plug_device(q);
1339	return BLKPREP_DEFER;
1340 kill:
1341	req->errors = DID_NO_CONNECT << 16;
1342	return BLKPREP_KILL;
1343}
1344
1345/*
1346 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1347 * return 0.
1348 *
1349 * Called with the queue_lock held.
1350 */
1351static inline int scsi_dev_queue_ready(struct request_queue *q,
1352				  struct scsi_device *sdev)
1353{
1354	if (sdev->device_busy >= sdev->queue_depth)
1355		return 0;
1356	if (sdev->device_busy == 0 && sdev->device_blocked) {
1357		/*
1358		 * unblock after device_blocked iterates to zero
1359		 */
1360		if (--sdev->device_blocked == 0) {
1361			SCSI_LOG_MLQUEUE(3,
1362				   sdev_printk(KERN_INFO, sdev,
1363				   "unblocking device at zero depth\n"));
1364		} else {
1365			blk_plug_device(q);
1366			return 0;
1367		}
1368	}
1369	if (sdev->device_blocked)
1370		return 0;
1371
1372	return 1;
1373}
1374
1375/*
1376 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1377 * return 0. We must end up running the queue again whenever 0 is
1378 * returned, else IO can hang.
1379 *
1380 * Called with host_lock held.
1381 */
1382static inline int scsi_host_queue_ready(struct request_queue *q,
1383				   struct Scsi_Host *shost,
1384				   struct scsi_device *sdev)
1385{
1386	if (scsi_host_in_recovery(shost))
1387		return 0;
1388	if (shost->host_busy == 0 && shost->host_blocked) {
1389		/*
1390		 * unblock after host_blocked iterates to zero
1391		 */
1392		if (--shost->host_blocked == 0) {
1393			SCSI_LOG_MLQUEUE(3,
1394				printk("scsi%d unblocking host at zero depth\n",
1395					shost->host_no));
1396		} else {
1397			blk_plug_device(q);
1398			return 0;
1399		}
1400	}
1401	if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1402	    shost->host_blocked || shost->host_self_blocked) {
1403		if (list_empty(&sdev->starved_entry))
1404			list_add_tail(&sdev->starved_entry, &shost->starved_list);
1405		return 0;
1406	}
1407
1408	/* We're OK to process the command, so we can't be starved */
1409	if (!list_empty(&sdev->starved_entry))
1410		list_del_init(&sdev->starved_entry);
1411
1412	return 1;
1413}
1414
1415/*
1416 * Kill a request for a dead device
1417 */
1418static void scsi_kill_request(struct request *req, request_queue_t *q)
1419{
1420	struct scsi_cmnd *cmd = req->special;
1421	struct scsi_device *sdev = cmd->device;
1422	struct Scsi_Host *shost = sdev->host;
1423
1424	blkdev_dequeue_request(req);
1425
1426	if (unlikely(cmd == NULL)) {
1427		printk(KERN_CRIT "impossible request in %s.\n",
1428				 __FUNCTION__);
1429		BUG();
1430	}
1431
1432	scsi_init_cmd_errh(cmd);
1433	cmd->result = DID_NO_CONNECT << 16;
1434	atomic_inc(&cmd->device->iorequest_cnt);
1435
1436	/*
1437	 * SCSI request completion path will do scsi_device_unbusy(),
1438	 * bump busy counts.  To bump the counters, we need to dance
1439	 * with the locks as normal issue path does.
1440	 */
1441	sdev->device_busy++;
1442	spin_unlock(sdev->request_queue->queue_lock);
1443	spin_lock(shost->host_lock);
1444	shost->host_busy++;
1445	spin_unlock(shost->host_lock);
1446	spin_lock(sdev->request_queue->queue_lock);
1447
1448	__scsi_done(cmd);
1449}
1450
1451static void scsi_softirq_done(struct request *rq)
1452{
1453	struct scsi_cmnd *cmd = rq->completion_data;
1454	unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command;
1455	int disposition;
1456
1457	INIT_LIST_HEAD(&cmd->eh_entry);
1458
1459	disposition = scsi_decide_disposition(cmd);
1460	if (disposition != SUCCESS &&
1461	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1462		sdev_printk(KERN_ERR, cmd->device,
1463			    "timing out command, waited %lus\n",
1464			    wait_for/HZ);
1465		disposition = SUCCESS;
1466	}
1467
1468	scsi_log_completion(cmd, disposition);
1469
1470	switch (disposition) {
1471		case SUCCESS:
1472			scsi_finish_command(cmd);
1473			break;
1474		case NEEDS_RETRY:
1475			scsi_retry_command(cmd);
1476			break;
1477		case ADD_TO_MLQUEUE:
1478			scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1479			break;
1480		default:
1481			if (!scsi_eh_scmd_add(cmd, 0))
1482				scsi_finish_command(cmd);
1483	}
1484}
1485
1486/*
1487 * Function:    scsi_request_fn()
1488 *
1489 * Purpose:     Main strategy routine for SCSI.
1490 *
1491 * Arguments:   q       - Pointer to actual queue.
1492 *
1493 * Returns:     Nothing
1494 *
1495 * Lock status: IO request lock assumed to be held when called.
1496 */
1497static void scsi_request_fn(struct request_queue *q)
1498{
1499	struct scsi_device *sdev = q->queuedata;
1500	struct Scsi_Host *shost;
1501	struct scsi_cmnd *cmd;
1502	struct request *req;
1503
1504	if (!sdev) {
1505		printk("scsi: killing requests for dead queue\n");
1506		while ((req = elv_next_request(q)) != NULL)
1507			scsi_kill_request(req, q);
1508		return;
1509	}
1510
1511	if(!get_device(&sdev->sdev_gendev))
1512		/* We must be tearing the block queue down already */
1513		return;
1514
1515	/*
1516	 * To start with, we keep looping until the queue is empty, or until
1517	 * the host is no longer able to accept any more requests.
1518	 */
1519	shost = sdev->host;
1520	while (!blk_queue_plugged(q)) {
1521		int rtn;
1522		/*
1523		 * get next queueable request.  We do this early to make sure
1524		 * that the request is fully prepared even if we cannot
1525		 * accept it.
1526		 */
1527		req = elv_next_request(q);
1528		if (!req || !scsi_dev_queue_ready(q, sdev))
1529			break;
1530
1531		if (unlikely(!scsi_device_online(sdev))) {
1532			sdev_printk(KERN_ERR, sdev,
1533				    "rejecting I/O to offline device\n");
1534			scsi_kill_request(req, q);
1535			continue;
1536		}
1537
1538
1539		/*
1540		 * Remove the request from the request list.
1541		 */
1542		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1543			blkdev_dequeue_request(req);
1544		sdev->device_busy++;
1545
1546		spin_unlock(q->queue_lock);
1547		cmd = req->special;
1548		if (unlikely(cmd == NULL)) {
1549			printk(KERN_CRIT "impossible request in %s.\n"
1550					 "please mail a stack trace to "
1551					 "linux-scsi@vger.kernel.org",
1552					 __FUNCTION__);
1553			BUG();
1554		}
1555		spin_lock(shost->host_lock);
1556
1557		if (!scsi_host_queue_ready(q, shost, sdev))
1558			goto not_ready;
1559		if (sdev->single_lun) {
1560			if (scsi_target(sdev)->starget_sdev_user &&
1561			    scsi_target(sdev)->starget_sdev_user != sdev)
1562				goto not_ready;
1563			scsi_target(sdev)->starget_sdev_user = sdev;
1564		}
1565		shost->host_busy++;
1566
1567		/*
1568		 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1569		 *		take the lock again.
1570		 */
1571		spin_unlock_irq(shost->host_lock);
1572
1573		/*
1574		 * Finally, initialize any error handling parameters, and set up
1575		 * the timers for timeouts.
1576		 */
1577		scsi_init_cmd_errh(cmd);
1578
1579		/*
1580		 * Dispatch the command to the low-level driver.
1581		 */
1582		rtn = scsi_dispatch_cmd(cmd);
1583		spin_lock_irq(q->queue_lock);
1584		if(rtn) {
1585			/* we're refusing the command; because of
1586			 * the way locks get dropped, we need to
1587			 * check here if plugging is required */
1588			if(sdev->device_busy == 0)
1589				blk_plug_device(q);
1590
1591			break;
1592		}
1593	}
1594
1595	goto out;
1596
1597 not_ready:
1598	spin_unlock_irq(shost->host_lock);
1599
1600	/*
1601	 * lock q, handle tag, requeue req, and decrement device_busy. We
1602	 * must return with queue_lock held.
1603	 *
1604	 * Decrementing device_busy without checking it is OK, as all such
1605	 * cases (host limits or settings) should run the queue at some
1606	 * later time.
1607	 */
1608	spin_lock_irq(q->queue_lock);
1609	blk_requeue_request(q, req);
1610	sdev->device_busy--;
1611	if(sdev->device_busy == 0)
1612		blk_plug_device(q);
1613 out:
1614	/* must be careful here...if we trigger the ->remove() function
1615	 * we cannot be holding the q lock */
1616	spin_unlock_irq(q->queue_lock);
1617	put_device(&sdev->sdev_gendev);
1618	spin_lock_irq(q->queue_lock);
1619}
1620
1621u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1622{
1623	struct device *host_dev;
1624	u64 bounce_limit = 0xffffffff;
1625
1626	if (shost->unchecked_isa_dma)
1627		return BLK_BOUNCE_ISA;
1628	/*
1629	 * Platforms with virtual-DMA translation
1630	 * hardware have no practical limit.
1631	 */
1632	if (!PCI_DMA_BUS_IS_PHYS)
1633		return BLK_BOUNCE_ANY;
1634
1635	host_dev = scsi_get_device(shost);
1636	if (host_dev && host_dev->dma_mask)
1637		bounce_limit = *host_dev->dma_mask;
1638
1639	return bounce_limit;
1640}
1641EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1642
1643struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1644{
1645	struct Scsi_Host *shost = sdev->host;
1646	struct request_queue *q;
1647
1648	q = blk_init_queue(scsi_request_fn, NULL);
1649	if (!q)
1650		return NULL;
1651
1652	blk_queue_prep_rq(q, scsi_prep_fn);
1653
1654	blk_queue_max_hw_segments(q, shost->sg_tablesize);
1655	blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1656	blk_queue_max_sectors(q, shost->max_sectors);
1657	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1658	blk_queue_segment_boundary(q, shost->dma_boundary);
1659	blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1660	blk_queue_softirq_done(q, scsi_softirq_done);
1661
1662	if (!shost->use_clustering)
1663		clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1664	return q;
1665}
1666
1667void scsi_free_queue(struct request_queue *q)
1668{
1669	blk_cleanup_queue(q);
1670}
1671
1672/*
1673 * Function:    scsi_block_requests()
1674 *
1675 * Purpose:     Utility function used by low-level drivers to prevent further
1676 *		commands from being queued to the device.
1677 *
1678 * Arguments:   shost       - Host in question
1679 *
1680 * Returns:     Nothing
1681 *
1682 * Lock status: No locks are assumed held.
1683 *
1684 * Notes:       There is no timer nor any other means by which the requests
1685 *		get unblocked other than the low-level driver calling
1686 *		scsi_unblock_requests().
1687 */
1688void scsi_block_requests(struct Scsi_Host *shost)
1689{
1690	shost->host_self_blocked = 1;
1691}
1692EXPORT_SYMBOL(scsi_block_requests);
1693
1694/*
1695 * Function:    scsi_unblock_requests()
1696 *
1697 * Purpose:     Utility function used by low-level drivers to allow further
1698 *		commands from being queued to the device.
1699 *
1700 * Arguments:   shost       - Host in question
1701 *
1702 * Returns:     Nothing
1703 *
1704 * Lock status: No locks are assumed held.
1705 *
1706 * Notes:       There is no timer nor any other means by which the requests
1707 *		get unblocked other than the low-level driver calling
1708 *		scsi_unblock_requests().
1709 *
1710 *		This is done as an API function so that changes to the
1711 *		internals of the scsi mid-layer won't require wholesale
1712 *		changes to drivers that use this feature.
1713 */
1714void scsi_unblock_requests(struct Scsi_Host *shost)
1715{
1716	shost->host_self_blocked = 0;
1717	scsi_run_host_queues(shost);
1718}
1719EXPORT_SYMBOL(scsi_unblock_requests);
1720
1721int __init scsi_init_queue(void)
1722{
1723	int i;
1724
1725	scsi_io_context_cache = kmem_cache_create("scsi_io_context",
1726					sizeof(struct scsi_io_context),
1727					0, 0, NULL, NULL);
1728	if (!scsi_io_context_cache) {
1729		printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
1730		return -ENOMEM;
1731	}
1732
1733	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1734		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1735		int size = sgp->size * sizeof(struct scatterlist);
1736
1737		sgp->slab = kmem_cache_create(sgp->name, size, 0,
1738				SLAB_HWCACHE_ALIGN, NULL, NULL);
1739		if (!sgp->slab) {
1740			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1741					sgp->name);
1742		}
1743
1744		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1745						     sgp->slab);
1746		if (!sgp->pool) {
1747			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1748					sgp->name);
1749		}
1750	}
1751
1752	return 0;
1753}
1754
1755void scsi_exit_queue(void)
1756{
1757	int i;
1758
1759	kmem_cache_destroy(scsi_io_context_cache);
1760
1761	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1762		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1763		mempool_destroy(sgp->pool);
1764		kmem_cache_destroy(sgp->slab);
1765	}
1766}
1767
1768/**
1769 *	scsi_mode_select - issue a mode select
1770 *	@sdev:	SCSI device to be queried
1771 *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
1772 *	@sp:	Save page bit (0 == don't save, 1 == save)
1773 *	@modepage: mode page being requested
1774 *	@buffer: request buffer (may not be smaller than eight bytes)
1775 *	@len:	length of request buffer.
1776 *	@timeout: command timeout
1777 *	@retries: number of retries before failing
1778 *	@data: returns a structure abstracting the mode header data
1779 *	@sense: place to put sense data (or NULL if no sense to be collected).
1780 *		must be SCSI_SENSE_BUFFERSIZE big.
1781 *
1782 *	Returns zero if successful; negative error number or scsi
1783 *	status on error
1784 *
1785 */
1786int
1787scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1788		 unsigned char *buffer, int len, int timeout, int retries,
1789		 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1790{
1791	unsigned char cmd[10];
1792	unsigned char *real_buffer;
1793	int ret;
1794
1795	memset(cmd, 0, sizeof(cmd));
1796	cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1797
1798	if (sdev->use_10_for_ms) {
1799		if (len > 65535)
1800			return -EINVAL;
1801		real_buffer = kmalloc(8 + len, GFP_KERNEL);
1802		if (!real_buffer)
1803			return -ENOMEM;
1804		memcpy(real_buffer + 8, buffer, len);
1805		len += 8;
1806		real_buffer[0] = 0;
1807		real_buffer[1] = 0;
1808		real_buffer[2] = data->medium_type;
1809		real_buffer[3] = data->device_specific;
1810		real_buffer[4] = data->longlba ? 0x01 : 0;
1811		real_buffer[5] = 0;
1812		real_buffer[6] = data->block_descriptor_length >> 8;
1813		real_buffer[7] = data->block_descriptor_length;
1814
1815		cmd[0] = MODE_SELECT_10;
1816		cmd[7] = len >> 8;
1817		cmd[8] = len;
1818	} else {
1819		if (len > 255 || data->block_descriptor_length > 255 ||
1820		    data->longlba)
1821			return -EINVAL;
1822
1823		real_buffer = kmalloc(4 + len, GFP_KERNEL);
1824		if (!real_buffer)
1825			return -ENOMEM;
1826		memcpy(real_buffer + 4, buffer, len);
1827		len += 4;
1828		real_buffer[0] = 0;
1829		real_buffer[1] = data->medium_type;
1830		real_buffer[2] = data->device_specific;
1831		real_buffer[3] = data->block_descriptor_length;
1832
1833
1834		cmd[0] = MODE_SELECT;
1835		cmd[4] = len;
1836	}
1837
1838	ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1839			       sshdr, timeout, retries);
1840	kfree(real_buffer);
1841	return ret;
1842}
1843EXPORT_SYMBOL_GPL(scsi_mode_select);
1844
1845/**
1846 *	scsi_mode_sense - issue a mode sense, falling back from 10 to
1847 *		six bytes if necessary.
1848 *	@sdev:	SCSI device to be queried
1849 *	@dbd:	set if mode sense will allow block descriptors to be returned
1850 *	@modepage: mode page being requested
1851 *	@buffer: request buffer (may not be smaller than eight bytes)
1852 *	@len:	length of request buffer.
1853 *	@timeout: command timeout
1854 *	@retries: number of retries before failing
1855 *	@data: returns a structure abstracting the mode header data
1856 *	@sense: place to put sense data (or NULL if no sense to be collected).
1857 *		must be SCSI_SENSE_BUFFERSIZE big.
1858 *
1859 *	Returns zero if unsuccessful, or the header offset (either 4
1860 *	or 8 depending on whether a six or ten byte command was
1861 *	issued) if successful.
1862 **/
1863int
1864scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1865		  unsigned char *buffer, int len, int timeout, int retries,
1866		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1867{
1868	unsigned char cmd[12];
1869	int use_10_for_ms;
1870	int header_length;
1871	int result;
1872	struct scsi_sense_hdr my_sshdr;
1873
1874	memset(data, 0, sizeof(*data));
1875	memset(&cmd[0], 0, 12);
1876	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
1877	cmd[2] = modepage;
1878
1879	/* caller might not be interested in sense, but we need it */
1880	if (!sshdr)
1881		sshdr = &my_sshdr;
1882
1883 retry:
1884	use_10_for_ms = sdev->use_10_for_ms;
1885
1886	if (use_10_for_ms) {
1887		if (len < 8)
1888			len = 8;
1889
1890		cmd[0] = MODE_SENSE_10;
1891		cmd[8] = len;
1892		header_length = 8;
1893	} else {
1894		if (len < 4)
1895			len = 4;
1896
1897		cmd[0] = MODE_SENSE;
1898		cmd[4] = len;
1899		header_length = 4;
1900	}
1901
1902	memset(buffer, 0, len);
1903
1904	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1905				  sshdr, timeout, retries);
1906
1907	/* This code looks awful: what it's doing is making sure an
1908	 * ILLEGAL REQUEST sense return identifies the actual command
1909	 * byte as the problem.  MODE_SENSE commands can return
1910	 * ILLEGAL REQUEST if the code page isn't supported */
1911
1912	if (use_10_for_ms && !scsi_status_is_good(result) &&
1913	    (driver_byte(result) & DRIVER_SENSE)) {
1914		if (scsi_sense_valid(sshdr)) {
1915			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1916			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1917				/*
1918				 * Invalid command operation code
1919				 */
1920				sdev->use_10_for_ms = 0;
1921				goto retry;
1922			}
1923		}
1924	}
1925
1926	if(scsi_status_is_good(result)) {
1927		if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1928			     (modepage == 6 || modepage == 8))) {
1929			/* Initio breakage? */
1930			header_length = 0;
1931			data->length = 13;
1932			data->medium_type = 0;
1933			data->device_specific = 0;
1934			data->longlba = 0;
1935			data->block_descriptor_length = 0;
1936		} else if(use_10_for_ms) {
1937			data->length = buffer[0]*256 + buffer[1] + 2;
1938			data->medium_type = buffer[2];
1939			data->device_specific = buffer[3];
1940			data->longlba = buffer[4] & 0x01;
1941			data->block_descriptor_length = buffer[6]*256
1942				+ buffer[7];
1943		} else {
1944			data->length = buffer[0] + 1;
1945			data->medium_type = buffer[1];
1946			data->device_specific = buffer[2];
1947			data->block_descriptor_length = buffer[3];
1948		}
1949		data->header_length = header_length;
1950	}
1951
1952	return result;
1953}
1954EXPORT_SYMBOL(scsi_mode_sense);
1955
1956int
1957scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1958{
1959	char cmd[] = {
1960		TEST_UNIT_READY, 0, 0, 0, 0, 0,
1961	};
1962	struct scsi_sense_hdr sshdr;
1963	int result;
1964
1965	result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
1966				  timeout, retries);
1967
1968	if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1969
1970		if ((scsi_sense_valid(&sshdr)) &&
1971		    ((sshdr.sense_key == UNIT_ATTENTION) ||
1972		     (sshdr.sense_key == NOT_READY))) {
1973			sdev->changed = 1;
1974			result = 0;
1975		}
1976	}
1977	return result;
1978}
1979EXPORT_SYMBOL(scsi_test_unit_ready);
1980
1981/**
1982 *	scsi_device_set_state - Take the given device through the device
1983 *		state model.
1984 *	@sdev:	scsi device to change the state of.
1985 *	@state:	state to change to.
1986 *
1987 *	Returns zero if unsuccessful or an error if the requested
1988 *	transition is illegal.
1989 **/
1990int
1991scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1992{
1993	enum scsi_device_state oldstate = sdev->sdev_state;
1994
1995	if (state == oldstate)
1996		return 0;
1997
1998	switch (state) {
1999	case SDEV_CREATED:
2000		/* There are no legal states that come back to
2001		 * created.  This is the manually initialised start
2002		 * state */
2003		goto illegal;
2004
2005	case SDEV_RUNNING:
2006		switch (oldstate) {
2007		case SDEV_CREATED:
2008		case SDEV_OFFLINE:
2009		case SDEV_QUIESCE:
2010		case SDEV_BLOCK:
2011			break;
2012		default:
2013			goto illegal;
2014		}
2015		break;
2016
2017	case SDEV_QUIESCE:
2018		switch (oldstate) {
2019		case SDEV_RUNNING:
2020		case SDEV_OFFLINE:
2021			break;
2022		default:
2023			goto illegal;
2024		}
2025		break;
2026
2027	case SDEV_OFFLINE:
2028		switch (oldstate) {
2029		case SDEV_CREATED:
2030		case SDEV_RUNNING:
2031		case SDEV_QUIESCE:
2032		case SDEV_BLOCK:
2033			break;
2034		default:
2035			goto illegal;
2036		}
2037		break;
2038
2039	case SDEV_BLOCK:
2040		switch (oldstate) {
2041		case SDEV_CREATED:
2042		case SDEV_RUNNING:
2043			break;
2044		default:
2045			goto illegal;
2046		}
2047		break;
2048
2049	case SDEV_CANCEL:
2050		switch (oldstate) {
2051		case SDEV_CREATED:
2052		case SDEV_RUNNING:
2053		case SDEV_OFFLINE:
2054		case SDEV_BLOCK:
2055			break;
2056		default:
2057			goto illegal;
2058		}
2059		break;
2060
2061	case SDEV_DEL:
2062		switch (oldstate) {
2063		case SDEV_CANCEL:
2064			break;
2065		default:
2066			goto illegal;
2067		}
2068		break;
2069
2070	}
2071	sdev->sdev_state = state;
2072	return 0;
2073
2074 illegal:
2075	SCSI_LOG_ERROR_RECOVERY(1,
2076				sdev_printk(KERN_ERR, sdev,
2077					    "Illegal state transition %s->%s\n",
2078					    scsi_device_state_name(oldstate),
2079					    scsi_device_state_name(state))
2080				);
2081	return -EINVAL;
2082}
2083EXPORT_SYMBOL(scsi_device_set_state);
2084
2085/**
2086 *	scsi_device_quiesce - Block user issued commands.
2087 *	@sdev:	scsi device to quiesce.
2088 *
2089 *	This works by trying to transition to the SDEV_QUIESCE state
2090 *	(which must be a legal transition).  When the device is in this
2091 *	state, only special requests will be accepted, all others will
2092 *	be deferred.  Since special requests may also be requeued requests,
2093 *	a successful return doesn't guarantee the device will be
2094 *	totally quiescent.
2095 *
2096 *	Must be called with user context, may sleep.
2097 *
2098 *	Returns zero if unsuccessful or an error if not.
2099 **/
2100int
2101scsi_device_quiesce(struct scsi_device *sdev)
2102{
2103	int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2104	if (err)
2105		return err;
2106
2107	scsi_run_queue(sdev->request_queue);
2108	while (sdev->device_busy) {
2109		msleep_interruptible(200);
2110		scsi_run_queue(sdev->request_queue);
2111	}
2112	return 0;
2113}
2114EXPORT_SYMBOL(scsi_device_quiesce);
2115
2116/**
2117 *	scsi_device_resume - Restart user issued commands to a quiesced device.
2118 *	@sdev:	scsi device to resume.
2119 *
2120 *	Moves the device from quiesced back to running and restarts the
2121 *	queues.
2122 *
2123 *	Must be called with user context, may sleep.
2124 **/
2125void
2126scsi_device_resume(struct scsi_device *sdev)
2127{
2128	if(scsi_device_set_state(sdev, SDEV_RUNNING))
2129		return;
2130	scsi_run_queue(sdev->request_queue);
2131}
2132EXPORT_SYMBOL(scsi_device_resume);
2133
2134static void
2135device_quiesce_fn(struct scsi_device *sdev, void *data)
2136{
2137	scsi_device_quiesce(sdev);
2138}
2139
2140void
2141scsi_target_quiesce(struct scsi_target *starget)
2142{
2143	starget_for_each_device(starget, NULL, device_quiesce_fn);
2144}
2145EXPORT_SYMBOL(scsi_target_quiesce);
2146
2147static void
2148device_resume_fn(struct scsi_device *sdev, void *data)
2149{
2150	scsi_device_resume(sdev);
2151}
2152
2153void
2154scsi_target_resume(struct scsi_target *starget)
2155{
2156	starget_for_each_device(starget, NULL, device_resume_fn);
2157}
2158EXPORT_SYMBOL(scsi_target_resume);
2159
2160/**
2161 * scsi_internal_device_block - internal function to put a device
2162 *				temporarily into the SDEV_BLOCK state
2163 * @sdev:	device to block
2164 *
2165 * Block request made by scsi lld's to temporarily stop all
2166 * scsi commands on the specified device.  Called from interrupt
2167 * or normal process context.
2168 *
2169 * Returns zero if successful or error if not
2170 *
2171 * Notes:
2172 *	This routine transitions the device to the SDEV_BLOCK state
2173 *	(which must be a legal transition).  When the device is in this
2174 *	state, all commands are deferred until the scsi lld reenables
2175 *	the device with scsi_device_unblock or device_block_tmo fires.
2176 *	This routine assumes the host_lock is held on entry.
2177 **/
2178int
2179scsi_internal_device_block(struct scsi_device *sdev)
2180{
2181	request_queue_t *q = sdev->request_queue;
2182	unsigned long flags;
2183	int err = 0;
2184
2185	err = scsi_device_set_state(sdev, SDEV_BLOCK);
2186	if (err)
2187		return err;
2188
2189	/*
2190	 * The device has transitioned to SDEV_BLOCK.  Stop the
2191	 * block layer from calling the midlayer with this device's
2192	 * request queue.
2193	 */
2194	spin_lock_irqsave(q->queue_lock, flags);
2195	blk_stop_queue(q);
2196	spin_unlock_irqrestore(q->queue_lock, flags);
2197
2198	return 0;
2199}
2200EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2201
2202/**
2203 * scsi_internal_device_unblock - resume a device after a block request
2204 * @sdev:	device to resume
2205 *
2206 * Called by scsi lld's or the midlayer to restart the device queue
2207 * for the previously suspended scsi device.  Called from interrupt or
2208 * normal process context.
2209 *
2210 * Returns zero if successful or error if not.
2211 *
2212 * Notes:
2213 *	This routine transitions the device to the SDEV_RUNNING state
2214 *	(which must be a legal transition) allowing the midlayer to
2215 *	goose the queue for this device.  This routine assumes the
2216 *	host_lock is held upon entry.
2217 **/
2218int
2219scsi_internal_device_unblock(struct scsi_device *sdev)
2220{
2221	request_queue_t *q = sdev->request_queue;
2222	int err;
2223	unsigned long flags;
2224
2225	/*
2226	 * Try to transition the scsi device to SDEV_RUNNING
2227	 * and goose the device queue if successful.
2228	 */
2229	err = scsi_device_set_state(sdev, SDEV_RUNNING);
2230	if (err)
2231		return err;
2232
2233	spin_lock_irqsave(q->queue_lock, flags);
2234	blk_start_queue(q);
2235	spin_unlock_irqrestore(q->queue_lock, flags);
2236
2237	return 0;
2238}
2239EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2240
2241static void
2242device_block(struct scsi_device *sdev, void *data)
2243{
2244	scsi_internal_device_block(sdev);
2245}
2246
2247static int
2248target_block(struct device *dev, void *data)
2249{
2250	if (scsi_is_target_device(dev))
2251		starget_for_each_device(to_scsi_target(dev), NULL,
2252					device_block);
2253	return 0;
2254}
2255
2256void
2257scsi_target_block(struct device *dev)
2258{
2259	if (scsi_is_target_device(dev))
2260		starget_for_each_device(to_scsi_target(dev), NULL,
2261					device_block);
2262	else
2263		device_for_each_child(dev, NULL, target_block);
2264}
2265EXPORT_SYMBOL_GPL(scsi_target_block);
2266
2267static void
2268device_unblock(struct scsi_device *sdev, void *data)
2269{
2270	scsi_internal_device_unblock(sdev);
2271}
2272
2273static int
2274target_unblock(struct device *dev, void *data)
2275{
2276	if (scsi_is_target_device(dev))
2277		starget_for_each_device(to_scsi_target(dev), NULL,
2278					device_unblock);
2279	return 0;
2280}
2281
2282void
2283scsi_target_unblock(struct device *dev)
2284{
2285	if (scsi_is_target_device(dev))
2286		starget_for_each_device(to_scsi_target(dev), NULL,
2287					device_unblock);
2288	else
2289		device_for_each_child(dev, NULL, target_unblock);
2290}
2291EXPORT_SYMBOL_GPL(scsi_target_unblock);
2292
2293/**
2294 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2295 * @sg:		scatter-gather list
2296 * @sg_count:	number of segments in sg
2297 * @offset:	offset in bytes into sg, on return offset into the mapped area
2298 * @len:	bytes to map, on return number of bytes mapped
2299 *
2300 * Returns virtual address of the start of the mapped page
2301 */
2302void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
2303			  size_t *offset, size_t *len)
2304{
2305	int i;
2306	size_t sg_len = 0, len_complete = 0;
2307	struct page *page;
2308
2309	for (i = 0; i < sg_count; i++) {
2310		len_complete = sg_len; /* Complete sg-entries */
2311		sg_len += sg[i].length;
2312		if (sg_len > *offset)
2313			break;
2314	}
2315
2316	if (unlikely(i == sg_count)) {
2317		printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2318			"elements %d\n",
2319		       __FUNCTION__, sg_len, *offset, sg_count);
2320		WARN_ON(1);
2321		return NULL;
2322	}
2323
2324	/* Offset starting from the beginning of first page in this sg-entry */
2325	*offset = *offset - len_complete + sg[i].offset;
2326
2327	/* Assumption: contiguous pages can be accessed as "page + i" */
2328	page = nth_page(sg[i].page, (*offset >> PAGE_SHIFT));
2329	*offset &= ~PAGE_MASK;
2330
2331	/* Bytes in this sg-entry from *offset to the end of the page */
2332	sg_len = PAGE_SIZE - *offset;
2333	if (*len > sg_len)
2334		*len = sg_len;
2335
2336	return kmap_atomic(page, KM_BIO_SRC_IRQ);
2337}
2338EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2339
2340/**
2341 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously
2342 *			   mapped with scsi_kmap_atomic_sg
2343 * @virt:	virtual address to be unmapped
2344 */
2345void scsi_kunmap_atomic_sg(void *virt)
2346{
2347	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2348}
2349EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2350