scsi_lib.c revision e537a36d528053f6b9dbe6c88e763e835c0d3517
1/*
2 *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 *  SCSI queueing library.
5 *      Initial versions: Eric Youngdale (eric@andante.org).
6 *                        Based upon conversations with large numbers
7 *                        of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/completion.h>
13#include <linux/kernel.h>
14#include <linux/mempool.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19
20#include <scsi/scsi.h>
21#include <scsi/scsi_dbg.h>
22#include <scsi/scsi_device.h>
23#include <scsi/scsi_driver.h>
24#include <scsi/scsi_eh.h>
25#include <scsi/scsi_host.h>
26#include <scsi/scsi_request.h>
27
28#include "scsi_priv.h"
29#include "scsi_logging.h"
30
31
32#define SG_MEMPOOL_NR		(sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33#define SG_MEMPOOL_SIZE		32
34
35struct scsi_host_sg_pool {
36	size_t		size;
37	char		*name;
38	kmem_cache_t	*slab;
39	mempool_t	*pool;
40};
41
42#if (SCSI_MAX_PHYS_SEGMENTS < 32)
43#error SCSI_MAX_PHYS_SEGMENTS is too small
44#endif
45
46#define SP(x) { x, "sgpool-" #x }
47static struct scsi_host_sg_pool scsi_sg_pools[] = {
48	SP(8),
49	SP(16),
50	SP(32),
51#if (SCSI_MAX_PHYS_SEGMENTS > 32)
52	SP(64),
53#if (SCSI_MAX_PHYS_SEGMENTS > 64)
54	SP(128),
55#if (SCSI_MAX_PHYS_SEGMENTS > 128)
56	SP(256),
57#if (SCSI_MAX_PHYS_SEGMENTS > 256)
58#error SCSI_MAX_PHYS_SEGMENTS is too large
59#endif
60#endif
61#endif
62#endif
63};
64#undef SP
65
66
67/*
68 * Function:    scsi_insert_special_req()
69 *
70 * Purpose:     Insert pre-formed request into request queue.
71 *
72 * Arguments:   sreq	- request that is ready to be queued.
73 *              at_head	- boolean.  True if we should insert at head
74 *                        of queue, false if we should insert at tail.
75 *
76 * Lock status: Assumed that lock is not held upon entry.
77 *
78 * Returns:     Nothing
79 *
80 * Notes:       This function is called from character device and from
81 *              ioctl types of functions where the caller knows exactly
82 *              what SCSI command needs to be issued.   The idea is that
83 *              we merely inject the command into the queue (at the head
84 *              for now), and then call the queue request function to actually
85 *              process it.
86 */
87int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88{
89	/*
90	 * Because users of this function are apt to reuse requests with no
91	 * modification, we have to sanitise the request flags here
92	 */
93	sreq->sr_request->flags &= ~REQ_DONTPREP;
94	blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95		       	   at_head, sreq);
96	return 0;
97}
98
99static void scsi_run_queue(struct request_queue *q);
100
101/*
102 * Function:    scsi_queue_insert()
103 *
104 * Purpose:     Insert a command in the midlevel queue.
105 *
106 * Arguments:   cmd    - command that we are adding to queue.
107 *              reason - why we are inserting command to queue.
108 *
109 * Lock status: Assumed that lock is not held upon entry.
110 *
111 * Returns:     Nothing.
112 *
113 * Notes:       We do this for one of two cases.  Either the host is busy
114 *              and it cannot accept any more commands for the time being,
115 *              or the device returned QUEUE_FULL and can accept no more
116 *              commands.
117 * Notes:       This could be called either from an interrupt context or a
118 *              normal process context.
119 */
120int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
121{
122	struct Scsi_Host *host = cmd->device->host;
123	struct scsi_device *device = cmd->device;
124	struct request_queue *q = device->request_queue;
125	unsigned long flags;
126
127	SCSI_LOG_MLQUEUE(1,
128		 printk("Inserting command %p into mlqueue\n", cmd));
129
130	/*
131	 * Set the appropriate busy bit for the device/host.
132	 *
133	 * If the host/device isn't busy, assume that something actually
134	 * completed, and that we should be able to queue a command now.
135	 *
136	 * Note that the prior mid-layer assumption that any host could
137	 * always queue at least one command is now broken.  The mid-layer
138	 * will implement a user specifiable stall (see
139	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
140	 * if a command is requeued with no other commands outstanding
141	 * either for the device or for the host.
142	 */
143	if (reason == SCSI_MLQUEUE_HOST_BUSY)
144		host->host_blocked = host->max_host_blocked;
145	else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
146		device->device_blocked = device->max_device_blocked;
147
148	/*
149	 * Decrement the counters, since these commands are no longer
150	 * active on the host/device.
151	 */
152	scsi_device_unbusy(device);
153
154	/*
155	 * Requeue this command.  It will go before all other commands
156	 * that are already in the queue.
157	 *
158	 * NOTE: there is magic here about the way the queue is plugged if
159	 * we have no outstanding commands.
160	 *
161	 * Although we *don't* plug the queue, we call the request
162	 * function.  The SCSI request function detects the blocked condition
163	 * and plugs the queue appropriately.
164         */
165	spin_lock_irqsave(q->queue_lock, flags);
166	blk_requeue_request(q, cmd->request);
167	spin_unlock_irqrestore(q->queue_lock, flags);
168
169	scsi_run_queue(q);
170
171	return 0;
172}
173
174/*
175 * Function:    scsi_do_req
176 *
177 * Purpose:     Queue a SCSI request
178 *
179 * Arguments:   sreq	  - command descriptor.
180 *              cmnd      - actual SCSI command to be performed.
181 *              buffer    - data buffer.
182 *              bufflen   - size of data buffer.
183 *              done      - completion function to be run.
184 *              timeout   - how long to let it run before timeout.
185 *              retries   - number of retries we allow.
186 *
187 * Lock status: No locks held upon entry.
188 *
189 * Returns:     Nothing.
190 *
191 * Notes:	This function is only used for queueing requests for things
192 *		like ioctls and character device requests - this is because
193 *		we essentially just inject a request into the queue for the
194 *		device.
195 *
196 *		In order to support the scsi_device_quiesce function, we
197 *		now inject requests on the *head* of the device queue
198 *		rather than the tail.
199 */
200void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
201		 void *buffer, unsigned bufflen,
202		 void (*done)(struct scsi_cmnd *),
203		 int timeout, int retries)
204{
205	/*
206	 * If the upper level driver is reusing these things, then
207	 * we should release the low-level block now.  Another one will
208	 * be allocated later when this request is getting queued.
209	 */
210	__scsi_release_request(sreq);
211
212	/*
213	 * Our own function scsi_done (which marks the host as not busy,
214	 * disables the timeout counter, etc) will be called by us or by the
215	 * scsi_hosts[host].queuecommand() function needs to also call
216	 * the completion function for the high level driver.
217	 */
218	memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
219	sreq->sr_bufflen = bufflen;
220	sreq->sr_buffer = buffer;
221	sreq->sr_allowed = retries;
222	sreq->sr_done = done;
223	sreq->sr_timeout_per_command = timeout;
224
225	if (sreq->sr_cmd_len == 0)
226		sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
227
228	/*
229	 * head injection *required* here otherwise quiesce won't work
230	 */
231	scsi_insert_special_req(sreq, 1);
232}
233EXPORT_SYMBOL(scsi_do_req);
234
235/* This is the end routine we get to if a command was never attached
236 * to the request.  Simply complete the request without changing
237 * rq_status; this will cause a DRIVER_ERROR. */
238static void scsi_wait_req_end_io(struct request *req)
239{
240	BUG_ON(!req->waiting);
241
242	complete(req->waiting);
243}
244
245void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
246		   unsigned bufflen, int timeout, int retries)
247{
248	DECLARE_COMPLETION(wait);
249	struct request *req;
250
251	if (bufflen)
252		req = blk_rq_map_kern(sreq->sr_device->request_queue,
253				      sreq->sr_data_direction == DMA_TO_DEVICE,
254				      buffer, bufflen, __GFP_WAIT);
255	else
256		req = blk_get_request(sreq->sr_device->request_queue, READ,
257				      __GFP_WAIT);
258	req->flags |= REQ_NOMERGE;
259	req->waiting = &wait;
260	req->end_io = scsi_wait_req_end_io;
261	req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
262	req->sense = sreq->sr_sense_buffer;
263	req->sense_len = 0;
264	memcpy(req->cmd, cmnd, req->cmd_len);
265	req->timeout = timeout;
266	req->flags |= REQ_BLOCK_PC;
267	req->rq_disk = NULL;
268	blk_insert_request(sreq->sr_device->request_queue, req,
269			   sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
270	wait_for_completion(&wait);
271	sreq->sr_request->waiting = NULL;
272	sreq->sr_result = req->errors;
273	if (req->errors)
274		sreq->sr_result |= (DRIVER_ERROR << 24);
275
276	blk_put_request(req);
277}
278
279EXPORT_SYMBOL(scsi_wait_req);
280
281/*
282 * Function:    scsi_init_cmd_errh()
283 *
284 * Purpose:     Initialize cmd fields related to error handling.
285 *
286 * Arguments:   cmd	- command that is ready to be queued.
287 *
288 * Returns:     Nothing
289 *
290 * Notes:       This function has the job of initializing a number of
291 *              fields related to error handling.   Typically this will
292 *              be called once for each command, as required.
293 */
294static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
295{
296	cmd->serial_number = 0;
297
298	memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
299
300	if (cmd->cmd_len == 0)
301		cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
302
303	/*
304	 * We need saved copies of a number of fields - this is because
305	 * error handling may need to overwrite these with different values
306	 * to run different commands, and once error handling is complete,
307	 * we will need to restore these values prior to running the actual
308	 * command.
309	 */
310	cmd->old_use_sg = cmd->use_sg;
311	cmd->old_cmd_len = cmd->cmd_len;
312	cmd->sc_old_data_direction = cmd->sc_data_direction;
313	cmd->old_underflow = cmd->underflow;
314	memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
315	cmd->buffer = cmd->request_buffer;
316	cmd->bufflen = cmd->request_bufflen;
317
318	return 1;
319}
320
321/*
322 * Function:   scsi_setup_cmd_retry()
323 *
324 * Purpose:    Restore the command state for a retry
325 *
326 * Arguments:  cmd	- command to be restored
327 *
328 * Returns:    Nothing
329 *
330 * Notes:      Immediately prior to retrying a command, we need
331 *             to restore certain fields that we saved above.
332 */
333void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
334{
335	memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
336	cmd->request_buffer = cmd->buffer;
337	cmd->request_bufflen = cmd->bufflen;
338	cmd->use_sg = cmd->old_use_sg;
339	cmd->cmd_len = cmd->old_cmd_len;
340	cmd->sc_data_direction = cmd->sc_old_data_direction;
341	cmd->underflow = cmd->old_underflow;
342}
343
344void scsi_device_unbusy(struct scsi_device *sdev)
345{
346	struct Scsi_Host *shost = sdev->host;
347	unsigned long flags;
348
349	spin_lock_irqsave(shost->host_lock, flags);
350	shost->host_busy--;
351	if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) &&
352		     shost->host_failed))
353		scsi_eh_wakeup(shost);
354	spin_unlock(shost->host_lock);
355	spin_lock(sdev->request_queue->queue_lock);
356	sdev->device_busy--;
357	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
358}
359
360/*
361 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
362 * and call blk_run_queue for all the scsi_devices on the target -
363 * including current_sdev first.
364 *
365 * Called with *no* scsi locks held.
366 */
367static void scsi_single_lun_run(struct scsi_device *current_sdev)
368{
369	struct Scsi_Host *shost = current_sdev->host;
370	struct scsi_device *sdev, *tmp;
371	struct scsi_target *starget = scsi_target(current_sdev);
372	unsigned long flags;
373
374	spin_lock_irqsave(shost->host_lock, flags);
375	starget->starget_sdev_user = NULL;
376	spin_unlock_irqrestore(shost->host_lock, flags);
377
378	/*
379	 * Call blk_run_queue for all LUNs on the target, starting with
380	 * current_sdev. We race with others (to set starget_sdev_user),
381	 * but in most cases, we will be first. Ideally, each LU on the
382	 * target would get some limited time or requests on the target.
383	 */
384	blk_run_queue(current_sdev->request_queue);
385
386	spin_lock_irqsave(shost->host_lock, flags);
387	if (starget->starget_sdev_user)
388		goto out;
389	list_for_each_entry_safe(sdev, tmp, &starget->devices,
390			same_target_siblings) {
391		if (sdev == current_sdev)
392			continue;
393		if (scsi_device_get(sdev))
394			continue;
395
396		spin_unlock_irqrestore(shost->host_lock, flags);
397		blk_run_queue(sdev->request_queue);
398		spin_lock_irqsave(shost->host_lock, flags);
399
400		scsi_device_put(sdev);
401	}
402 out:
403	spin_unlock_irqrestore(shost->host_lock, flags);
404}
405
406/*
407 * Function:	scsi_run_queue()
408 *
409 * Purpose:	Select a proper request queue to serve next
410 *
411 * Arguments:	q	- last request's queue
412 *
413 * Returns:     Nothing
414 *
415 * Notes:	The previous command was completely finished, start
416 *		a new one if possible.
417 */
418static void scsi_run_queue(struct request_queue *q)
419{
420	struct scsi_device *sdev = q->queuedata;
421	struct Scsi_Host *shost = sdev->host;
422	unsigned long flags;
423
424	if (sdev->single_lun)
425		scsi_single_lun_run(sdev);
426
427	spin_lock_irqsave(shost->host_lock, flags);
428	while (!list_empty(&shost->starved_list) &&
429	       !shost->host_blocked && !shost->host_self_blocked &&
430		!((shost->can_queue > 0) &&
431		  (shost->host_busy >= shost->can_queue))) {
432		/*
433		 * As long as shost is accepting commands and we have
434		 * starved queues, call blk_run_queue. scsi_request_fn
435		 * drops the queue_lock and can add us back to the
436		 * starved_list.
437		 *
438		 * host_lock protects the starved_list and starved_entry.
439		 * scsi_request_fn must get the host_lock before checking
440		 * or modifying starved_list or starved_entry.
441		 */
442		sdev = list_entry(shost->starved_list.next,
443					  struct scsi_device, starved_entry);
444		list_del_init(&sdev->starved_entry);
445		spin_unlock_irqrestore(shost->host_lock, flags);
446
447		blk_run_queue(sdev->request_queue);
448
449		spin_lock_irqsave(shost->host_lock, flags);
450		if (unlikely(!list_empty(&sdev->starved_entry)))
451			/*
452			 * sdev lost a race, and was put back on the
453			 * starved list. This is unlikely but without this
454			 * in theory we could loop forever.
455			 */
456			break;
457	}
458	spin_unlock_irqrestore(shost->host_lock, flags);
459
460	blk_run_queue(q);
461}
462
463/*
464 * Function:	scsi_requeue_command()
465 *
466 * Purpose:	Handle post-processing of completed commands.
467 *
468 * Arguments:	q	- queue to operate on
469 *		cmd	- command that may need to be requeued.
470 *
471 * Returns:	Nothing
472 *
473 * Notes:	After command completion, there may be blocks left
474 *		over which weren't finished by the previous command
475 *		this can be for a number of reasons - the main one is
476 *		I/O errors in the middle of the request, in which case
477 *		we need to request the blocks that come after the bad
478 *		sector.
479 */
480static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
481{
482	unsigned long flags;
483
484	cmd->request->flags &= ~REQ_DONTPREP;
485
486	spin_lock_irqsave(q->queue_lock, flags);
487	blk_requeue_request(q, cmd->request);
488	spin_unlock_irqrestore(q->queue_lock, flags);
489
490	scsi_run_queue(q);
491}
492
493void scsi_next_command(struct scsi_cmnd *cmd)
494{
495	struct request_queue *q = cmd->device->request_queue;
496
497	scsi_put_command(cmd);
498	scsi_run_queue(q);
499}
500
501void scsi_run_host_queues(struct Scsi_Host *shost)
502{
503	struct scsi_device *sdev;
504
505	shost_for_each_device(sdev, shost)
506		scsi_run_queue(sdev->request_queue);
507}
508
509/*
510 * Function:    scsi_end_request()
511 *
512 * Purpose:     Post-processing of completed commands (usually invoked at end
513 *		of upper level post-processing and scsi_io_completion).
514 *
515 * Arguments:   cmd	 - command that is complete.
516 *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
517 *              bytes    - number of bytes of completed I/O
518 *		requeue  - indicates whether we should requeue leftovers.
519 *
520 * Lock status: Assumed that lock is not held upon entry.
521 *
522 * Returns:     cmd if requeue done or required, NULL otherwise
523 *
524 * Notes:       This is called for block device requests in order to
525 *              mark some number of sectors as complete.
526 *
527 *		We are guaranteeing that the request queue will be goosed
528 *		at some point during this call.
529 */
530static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
531					  int bytes, int requeue)
532{
533	request_queue_t *q = cmd->device->request_queue;
534	struct request *req = cmd->request;
535	unsigned long flags;
536
537	/*
538	 * If there are blocks left over at the end, set up the command
539	 * to queue the remainder of them.
540	 */
541	if (end_that_request_chunk(req, uptodate, bytes)) {
542		int leftover = (req->hard_nr_sectors << 9);
543
544		if (blk_pc_request(req))
545			leftover = req->data_len;
546
547		/* kill remainder if no retrys */
548		if (!uptodate && blk_noretry_request(req))
549			end_that_request_chunk(req, 0, leftover);
550		else {
551			if (requeue)
552				/*
553				 * Bleah.  Leftovers again.  Stick the
554				 * leftovers in the front of the
555				 * queue, and goose the queue again.
556				 */
557				scsi_requeue_command(q, cmd);
558
559			return cmd;
560		}
561	}
562
563	add_disk_randomness(req->rq_disk);
564
565	spin_lock_irqsave(q->queue_lock, flags);
566	if (blk_rq_tagged(req))
567		blk_queue_end_tag(q, req);
568	end_that_request_last(req);
569	spin_unlock_irqrestore(q->queue_lock, flags);
570
571	/*
572	 * This will goose the queue request function at the end, so we don't
573	 * need to worry about launching another command.
574	 */
575	scsi_next_command(cmd);
576	return NULL;
577}
578
579static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
580{
581	struct scsi_host_sg_pool *sgp;
582	struct scatterlist *sgl;
583
584	BUG_ON(!cmd->use_sg);
585
586	switch (cmd->use_sg) {
587	case 1 ... 8:
588		cmd->sglist_len = 0;
589		break;
590	case 9 ... 16:
591		cmd->sglist_len = 1;
592		break;
593	case 17 ... 32:
594		cmd->sglist_len = 2;
595		break;
596#if (SCSI_MAX_PHYS_SEGMENTS > 32)
597	case 33 ... 64:
598		cmd->sglist_len = 3;
599		break;
600#if (SCSI_MAX_PHYS_SEGMENTS > 64)
601	case 65 ... 128:
602		cmd->sglist_len = 4;
603		break;
604#if (SCSI_MAX_PHYS_SEGMENTS  > 128)
605	case 129 ... 256:
606		cmd->sglist_len = 5;
607		break;
608#endif
609#endif
610#endif
611	default:
612		return NULL;
613	}
614
615	sgp = scsi_sg_pools + cmd->sglist_len;
616	sgl = mempool_alloc(sgp->pool, gfp_mask);
617	return sgl;
618}
619
620static void scsi_free_sgtable(struct scatterlist *sgl, int index)
621{
622	struct scsi_host_sg_pool *sgp;
623
624	BUG_ON(index >= SG_MEMPOOL_NR);
625
626	sgp = scsi_sg_pools + index;
627	mempool_free(sgl, sgp->pool);
628}
629
630/*
631 * Function:    scsi_release_buffers()
632 *
633 * Purpose:     Completion processing for block device I/O requests.
634 *
635 * Arguments:   cmd	- command that we are bailing.
636 *
637 * Lock status: Assumed that no lock is held upon entry.
638 *
639 * Returns:     Nothing
640 *
641 * Notes:       In the event that an upper level driver rejects a
642 *		command, we must release resources allocated during
643 *		the __init_io() function.  Primarily this would involve
644 *		the scatter-gather table, and potentially any bounce
645 *		buffers.
646 */
647static void scsi_release_buffers(struct scsi_cmnd *cmd)
648{
649	struct request *req = cmd->request;
650
651	/*
652	 * Free up any indirection buffers we allocated for DMA purposes.
653	 */
654	if (cmd->use_sg)
655		scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
656	else if (cmd->request_buffer != req->buffer)
657		kfree(cmd->request_buffer);
658
659	/*
660	 * Zero these out.  They now point to freed memory, and it is
661	 * dangerous to hang onto the pointers.
662	 */
663	cmd->buffer  = NULL;
664	cmd->bufflen = 0;
665	cmd->request_buffer = NULL;
666	cmd->request_bufflen = 0;
667}
668
669/*
670 * Function:    scsi_io_completion()
671 *
672 * Purpose:     Completion processing for block device I/O requests.
673 *
674 * Arguments:   cmd   - command that is finished.
675 *
676 * Lock status: Assumed that no lock is held upon entry.
677 *
678 * Returns:     Nothing
679 *
680 * Notes:       This function is matched in terms of capabilities to
681 *              the function that created the scatter-gather list.
682 *              In other words, if there are no bounce buffers
683 *              (the normal case for most drivers), we don't need
684 *              the logic to deal with cleaning up afterwards.
685 *
686 *		We must do one of several things here:
687 *
688 *		a) Call scsi_end_request.  This will finish off the
689 *		   specified number of sectors.  If we are done, the
690 *		   command block will be released, and the queue
691 *		   function will be goosed.  If we are not done, then
692 *		   scsi_end_request will directly goose the queue.
693 *
694 *		b) We can just use scsi_requeue_command() here.  This would
695 *		   be used if we just wanted to retry, for example.
696 */
697void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
698			unsigned int block_bytes)
699{
700	int result = cmd->result;
701	int this_count = cmd->bufflen;
702	request_queue_t *q = cmd->device->request_queue;
703	struct request *req = cmd->request;
704	int clear_errors = 1;
705	struct scsi_sense_hdr sshdr;
706	int sense_valid = 0;
707	int sense_deferred = 0;
708
709	if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
710		return;
711
712	/*
713	 * Free up any indirection buffers we allocated for DMA purposes.
714	 * For the case of a READ, we need to copy the data out of the
715	 * bounce buffer and into the real buffer.
716	 */
717	if (cmd->use_sg)
718		scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
719	else if (cmd->buffer != req->buffer) {
720		if (rq_data_dir(req) == READ) {
721			unsigned long flags;
722			char *to = bio_kmap_irq(req->bio, &flags);
723			memcpy(to, cmd->buffer, cmd->bufflen);
724			bio_kunmap_irq(to, &flags);
725		}
726		kfree(cmd->buffer);
727	}
728
729	if (result) {
730		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
731		if (sense_valid)
732			sense_deferred = scsi_sense_is_deferred(&sshdr);
733	}
734	if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
735		req->errors = result;
736		if (result) {
737			clear_errors = 0;
738			if (sense_valid && req->sense) {
739				/*
740				 * SG_IO wants current and deferred errors
741				 */
742				int len = 8 + cmd->sense_buffer[7];
743
744				if (len > SCSI_SENSE_BUFFERSIZE)
745					len = SCSI_SENSE_BUFFERSIZE;
746				memcpy(req->sense, cmd->sense_buffer,  len);
747				req->sense_len = len;
748			}
749		} else
750			req->data_len = cmd->resid;
751	}
752
753	/*
754	 * Zero these out.  They now point to freed memory, and it is
755	 * dangerous to hang onto the pointers.
756	 */
757	cmd->buffer  = NULL;
758	cmd->bufflen = 0;
759	cmd->request_buffer = NULL;
760	cmd->request_bufflen = 0;
761
762	/*
763	 * Next deal with any sectors which we were able to correctly
764	 * handle.
765	 */
766	if (good_bytes >= 0) {
767		SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
768					      req->nr_sectors, good_bytes));
769		SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
770
771		if (clear_errors)
772			req->errors = 0;
773		/*
774		 * If multiple sectors are requested in one buffer, then
775		 * they will have been finished off by the first command.
776		 * If not, then we have a multi-buffer command.
777		 *
778		 * If block_bytes != 0, it means we had a medium error
779		 * of some sort, and that we want to mark some number of
780		 * sectors as not uptodate.  Thus we want to inhibit
781		 * requeueing right here - we will requeue down below
782		 * when we handle the bad sectors.
783		 */
784		cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
785
786		/*
787		 * If the command completed without error, then either finish off the
788		 * rest of the command, or start a new one.
789		 */
790		if (result == 0 || cmd == NULL ) {
791			return;
792		}
793	}
794	/*
795	 * Now, if we were good little boys and girls, Santa left us a request
796	 * sense buffer.  We can extract information from this, so we
797	 * can choose a block to remap, etc.
798	 */
799	if (sense_valid && !sense_deferred) {
800		switch (sshdr.sense_key) {
801		case UNIT_ATTENTION:
802			if (cmd->device->removable) {
803				/* detected disc change.  set a bit
804				 * and quietly refuse further access.
805				 */
806				cmd->device->changed = 1;
807				cmd = scsi_end_request(cmd, 0,
808						this_count, 1);
809				return;
810			} else {
811				/*
812				* Must have been a power glitch, or a
813				* bus reset.  Could not have been a
814				* media change, so we just retry the
815				* request and see what happens.
816				*/
817				scsi_requeue_command(q, cmd);
818				return;
819			}
820			break;
821		case ILLEGAL_REQUEST:
822			/*
823		 	* If we had an ILLEGAL REQUEST returned, then we may
824		 	* have performed an unsupported command.  The only
825		 	* thing this should be would be a ten byte read where
826			* only a six byte read was supported.  Also, on a
827			* system where READ CAPACITY failed, we may have read
828			* past the end of the disk.
829		 	*/
830			if (cmd->device->use_10_for_rw &&
831			    (cmd->cmnd[0] == READ_10 ||
832			     cmd->cmnd[0] == WRITE_10)) {
833				cmd->device->use_10_for_rw = 0;
834				/*
835				 * This will cause a retry with a 6-byte
836				 * command.
837				 */
838				scsi_requeue_command(q, cmd);
839				result = 0;
840			} else {
841				cmd = scsi_end_request(cmd, 0, this_count, 1);
842				return;
843			}
844			break;
845		case NOT_READY:
846			/*
847			 * If the device is in the process of becoming ready,
848			 * retry.
849			 */
850			if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
851				scsi_requeue_command(q, cmd);
852				return;
853			}
854			printk(KERN_INFO "Device %s not ready.\n",
855			       req->rq_disk ? req->rq_disk->disk_name : "");
856			cmd = scsi_end_request(cmd, 0, this_count, 1);
857			return;
858		case VOLUME_OVERFLOW:
859			printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ",
860			       cmd->device->host->host_no,
861			       (int)cmd->device->channel,
862			       (int)cmd->device->id, (int)cmd->device->lun);
863			__scsi_print_command(cmd->data_cmnd);
864			scsi_print_sense("", cmd);
865			cmd = scsi_end_request(cmd, 0, block_bytes, 1);
866			return;
867		default:
868			break;
869		}
870	}			/* driver byte != 0 */
871	if (host_byte(result) == DID_RESET) {
872		/*
873		 * Third party bus reset or reset for error
874		 * recovery reasons.  Just retry the request
875		 * and see what happens.
876		 */
877		scsi_requeue_command(q, cmd);
878		return;
879	}
880	if (result) {
881		if (!(req->flags & REQ_SPECIAL))
882			printk(KERN_INFO "SCSI error : <%d %d %d %d> return code "
883			       "= 0x%x\n", cmd->device->host->host_no,
884			       cmd->device->channel,
885			       cmd->device->id,
886			       cmd->device->lun, result);
887
888		if (driver_byte(result) & DRIVER_SENSE)
889			scsi_print_sense("", cmd);
890		/*
891		 * Mark a single buffer as not uptodate.  Queue the remainder.
892		 * We sometimes get this cruft in the event that a medium error
893		 * isn't properly reported.
894		 */
895		block_bytes = req->hard_cur_sectors << 9;
896		if (!block_bytes)
897			block_bytes = req->data_len;
898		cmd = scsi_end_request(cmd, 0, block_bytes, 1);
899	}
900}
901EXPORT_SYMBOL(scsi_io_completion);
902
903/*
904 * Function:    scsi_init_io()
905 *
906 * Purpose:     SCSI I/O initialize function.
907 *
908 * Arguments:   cmd   - Command descriptor we wish to initialize
909 *
910 * Returns:     0 on success
911 *		BLKPREP_DEFER if the failure is retryable
912 *		BLKPREP_KILL if the failure is fatal
913 */
914static int scsi_init_io(struct scsi_cmnd *cmd)
915{
916	struct request     *req = cmd->request;
917	struct scatterlist *sgpnt;
918	int		   count;
919
920	/*
921	 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
922	 */
923	if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
924		cmd->request_bufflen = req->data_len;
925		cmd->request_buffer = req->data;
926		req->buffer = req->data;
927		cmd->use_sg = 0;
928		return 0;
929	}
930
931	/*
932	 * we used to not use scatter-gather for single segment request,
933	 * but now we do (it makes highmem I/O easier to support without
934	 * kmapping pages)
935	 */
936	cmd->use_sg = req->nr_phys_segments;
937
938	/*
939	 * if sg table allocation fails, requeue request later.
940	 */
941	sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
942	if (unlikely(!sgpnt))
943		return BLKPREP_DEFER;
944
945	cmd->request_buffer = (char *) sgpnt;
946	cmd->request_bufflen = req->nr_sectors << 9;
947	if (blk_pc_request(req))
948		cmd->request_bufflen = req->data_len;
949	req->buffer = NULL;
950
951	/*
952	 * Next, walk the list, and fill in the addresses and sizes of
953	 * each segment.
954	 */
955	count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
956
957	/*
958	 * mapped well, send it off
959	 */
960	if (likely(count <= cmd->use_sg)) {
961		cmd->use_sg = count;
962		return 0;
963	}
964
965	printk(KERN_ERR "Incorrect number of segments after building list\n");
966	printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
967	printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
968			req->current_nr_sectors);
969
970	/* release the command and kill it */
971	scsi_release_buffers(cmd);
972	scsi_put_command(cmd);
973	return BLKPREP_KILL;
974}
975
976static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
977{
978	struct scsi_device *sdev = q->queuedata;
979	struct scsi_driver *drv;
980
981	if (sdev->sdev_state == SDEV_RUNNING) {
982		drv = *(struct scsi_driver **) rq->rq_disk->private_data;
983
984		if (drv->prepare_flush)
985			return drv->prepare_flush(q, rq);
986	}
987
988	return 0;
989}
990
991static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
992{
993	struct scsi_device *sdev = q->queuedata;
994	struct request *flush_rq = rq->end_io_data;
995	struct scsi_driver *drv;
996
997	if (flush_rq->errors) {
998		printk("scsi: barrier error, disabling flush support\n");
999		blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1000	}
1001
1002	if (sdev->sdev_state == SDEV_RUNNING) {
1003		drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1004		drv->end_flush(q, rq);
1005	}
1006}
1007
1008static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1009			       sector_t *error_sector)
1010{
1011	struct scsi_device *sdev = q->queuedata;
1012	struct scsi_driver *drv;
1013
1014	if (sdev->sdev_state != SDEV_RUNNING)
1015		return -ENXIO;
1016
1017	drv = *(struct scsi_driver **) disk->private_data;
1018	if (drv->issue_flush)
1019		return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1020
1021	return -EOPNOTSUPP;
1022}
1023
1024static void scsi_generic_done(struct scsi_cmnd *cmd)
1025{
1026	BUG_ON(!blk_pc_request(cmd->request));
1027	scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1028}
1029
1030static int scsi_prep_fn(struct request_queue *q, struct request *req)
1031{
1032	struct scsi_device *sdev = q->queuedata;
1033	struct scsi_cmnd *cmd;
1034	int specials_only = 0;
1035
1036	/*
1037	 * Just check to see if the device is online.  If it isn't, we
1038	 * refuse to process any commands.  The device must be brought
1039	 * online before trying any recovery commands
1040	 */
1041	if (unlikely(!scsi_device_online(sdev))) {
1042		printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1043		       sdev->host->host_no, sdev->id, sdev->lun);
1044		return BLKPREP_KILL;
1045	}
1046	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1047		/* OK, we're not in a running state don't prep
1048		 * user commands */
1049		if (sdev->sdev_state == SDEV_DEL) {
1050			/* Device is fully deleted, no commands
1051			 * at all allowed down */
1052			printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1053			       sdev->host->host_no, sdev->id, sdev->lun);
1054			return BLKPREP_KILL;
1055		}
1056		/* OK, we only allow special commands (i.e. not
1057		 * user initiated ones */
1058		specials_only = sdev->sdev_state;
1059	}
1060
1061	/*
1062	 * Find the actual device driver associated with this command.
1063	 * The SPECIAL requests are things like character device or
1064	 * ioctls, which did not originate from ll_rw_blk.  Note that
1065	 * the special field is also used to indicate the cmd for
1066	 * the remainder of a partially fulfilled request that can
1067	 * come up when there is a medium error.  We have to treat
1068	 * these two cases differently.  We differentiate by looking
1069	 * at request->cmd, as this tells us the real story.
1070	 */
1071	if (req->flags & REQ_SPECIAL && req->special) {
1072		struct scsi_request *sreq = req->special;
1073
1074		if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1075			cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1076			if (unlikely(!cmd))
1077				goto defer;
1078			scsi_init_cmd_from_req(cmd, sreq);
1079		} else
1080			cmd = req->special;
1081	} else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1082
1083		if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1084			if(specials_only == SDEV_QUIESCE ||
1085					specials_only == SDEV_BLOCK)
1086				return BLKPREP_DEFER;
1087
1088			printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1089			       sdev->host->host_no, sdev->id, sdev->lun);
1090			return BLKPREP_KILL;
1091		}
1092
1093
1094		/*
1095		 * Now try and find a command block that we can use.
1096		 */
1097		if (!req->special) {
1098			cmd = scsi_get_command(sdev, GFP_ATOMIC);
1099			if (unlikely(!cmd))
1100				goto defer;
1101		} else
1102			cmd = req->special;
1103
1104		/* pull a tag out of the request if we have one */
1105		cmd->tag = req->tag;
1106	} else {
1107		blk_dump_rq_flags(req, "SCSI bad req");
1108		return BLKPREP_KILL;
1109	}
1110
1111	/* note the overloading of req->special.  When the tag
1112	 * is active it always means cmd.  If the tag goes
1113	 * back for re-queueing, it may be reset */
1114	req->special = cmd;
1115	cmd->request = req;
1116
1117	/*
1118	 * FIXME: drop the lock here because the functions below
1119	 * expect to be called without the queue lock held.  Also,
1120	 * previously, we dequeued the request before dropping the
1121	 * lock.  We hope REQ_STARTED prevents anything untoward from
1122	 * happening now.
1123	 */
1124	if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1125		struct scsi_driver *drv;
1126		int ret;
1127
1128		/*
1129		 * This will do a couple of things:
1130		 *  1) Fill in the actual SCSI command.
1131		 *  2) Fill in any other upper-level specific fields
1132		 * (timeout).
1133		 *
1134		 * If this returns 0, it means that the request failed
1135		 * (reading past end of disk, reading offline device,
1136		 * etc).   This won't actually talk to the device, but
1137		 * some kinds of consistency checking may cause the
1138		 * request to be rejected immediately.
1139		 */
1140
1141		/*
1142		 * This sets up the scatter-gather table (allocating if
1143		 * required).
1144		 */
1145		ret = scsi_init_io(cmd);
1146		if (ret)	/* BLKPREP_KILL return also releases the command */
1147			return ret;
1148
1149		/*
1150		 * Initialize the actual SCSI command for this request.
1151		 */
1152		if (req->rq_disk) {
1153			drv = *(struct scsi_driver **)req->rq_disk->private_data;
1154			if (unlikely(!drv->init_command(cmd))) {
1155				scsi_release_buffers(cmd);
1156				scsi_put_command(cmd);
1157				return BLKPREP_KILL;
1158			}
1159		} else {
1160			memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1161			if (rq_data_dir(req) == WRITE)
1162				cmd->sc_data_direction = DMA_TO_DEVICE;
1163			else if (req->data_len)
1164				cmd->sc_data_direction = DMA_FROM_DEVICE;
1165			else
1166				cmd->sc_data_direction = DMA_NONE;
1167
1168			cmd->transfersize = req->data_len;
1169			cmd->allowed = 3;
1170			cmd->timeout_per_command = req->timeout;
1171			cmd->done = scsi_generic_done;
1172		}
1173	}
1174
1175	/*
1176	 * The request is now prepped, no need to come back here
1177	 */
1178	req->flags |= REQ_DONTPREP;
1179	return BLKPREP_OK;
1180
1181 defer:
1182	/* If we defer, the elv_next_request() returns NULL, but the
1183	 * queue must be restarted, so we plug here if no returning
1184	 * command will automatically do that. */
1185	if (sdev->device_busy == 0)
1186		blk_plug_device(q);
1187	return BLKPREP_DEFER;
1188}
1189
1190/*
1191 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1192 * return 0.
1193 *
1194 * Called with the queue_lock held.
1195 */
1196static inline int scsi_dev_queue_ready(struct request_queue *q,
1197				  struct scsi_device *sdev)
1198{
1199	if (sdev->device_busy >= sdev->queue_depth)
1200		return 0;
1201	if (sdev->device_busy == 0 && sdev->device_blocked) {
1202		/*
1203		 * unblock after device_blocked iterates to zero
1204		 */
1205		if (--sdev->device_blocked == 0) {
1206			SCSI_LOG_MLQUEUE(3,
1207				printk("scsi%d (%d:%d) unblocking device at"
1208				       " zero depth\n", sdev->host->host_no,
1209				       sdev->id, sdev->lun));
1210		} else {
1211			blk_plug_device(q);
1212			return 0;
1213		}
1214	}
1215	if (sdev->device_blocked)
1216		return 0;
1217
1218	return 1;
1219}
1220
1221/*
1222 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1223 * return 0. We must end up running the queue again whenever 0 is
1224 * returned, else IO can hang.
1225 *
1226 * Called with host_lock held.
1227 */
1228static inline int scsi_host_queue_ready(struct request_queue *q,
1229				   struct Scsi_Host *shost,
1230				   struct scsi_device *sdev)
1231{
1232	if (test_bit(SHOST_RECOVERY, &shost->shost_state))
1233		return 0;
1234	if (shost->host_busy == 0 && shost->host_blocked) {
1235		/*
1236		 * unblock after host_blocked iterates to zero
1237		 */
1238		if (--shost->host_blocked == 0) {
1239			SCSI_LOG_MLQUEUE(3,
1240				printk("scsi%d unblocking host at zero depth\n",
1241					shost->host_no));
1242		} else {
1243			blk_plug_device(q);
1244			return 0;
1245		}
1246	}
1247	if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1248	    shost->host_blocked || shost->host_self_blocked) {
1249		if (list_empty(&sdev->starved_entry))
1250			list_add_tail(&sdev->starved_entry, &shost->starved_list);
1251		return 0;
1252	}
1253
1254	/* We're OK to process the command, so we can't be starved */
1255	if (!list_empty(&sdev->starved_entry))
1256		list_del_init(&sdev->starved_entry);
1257
1258	return 1;
1259}
1260
1261/*
1262 * Kill requests for a dead device
1263 */
1264static void scsi_kill_requests(request_queue_t *q)
1265{
1266	struct request *req;
1267
1268	while ((req = elv_next_request(q)) != NULL) {
1269		blkdev_dequeue_request(req);
1270		req->flags |= REQ_QUIET;
1271		while (end_that_request_first(req, 0, req->nr_sectors))
1272			;
1273		end_that_request_last(req);
1274	}
1275}
1276
1277/*
1278 * Function:    scsi_request_fn()
1279 *
1280 * Purpose:     Main strategy routine for SCSI.
1281 *
1282 * Arguments:   q       - Pointer to actual queue.
1283 *
1284 * Returns:     Nothing
1285 *
1286 * Lock status: IO request lock assumed to be held when called.
1287 */
1288static void scsi_request_fn(struct request_queue *q)
1289{
1290	struct scsi_device *sdev = q->queuedata;
1291	struct Scsi_Host *shost;
1292	struct scsi_cmnd *cmd;
1293	struct request *req;
1294
1295	if (!sdev) {
1296		printk("scsi: killing requests for dead queue\n");
1297		scsi_kill_requests(q);
1298		return;
1299	}
1300
1301	if(!get_device(&sdev->sdev_gendev))
1302		/* We must be tearing the block queue down already */
1303		return;
1304
1305	/*
1306	 * To start with, we keep looping until the queue is empty, or until
1307	 * the host is no longer able to accept any more requests.
1308	 */
1309	shost = sdev->host;
1310	while (!blk_queue_plugged(q)) {
1311		int rtn;
1312		/*
1313		 * get next queueable request.  We do this early to make sure
1314		 * that the request is fully prepared even if we cannot
1315		 * accept it.
1316		 */
1317		req = elv_next_request(q);
1318		if (!req || !scsi_dev_queue_ready(q, sdev))
1319			break;
1320
1321		if (unlikely(!scsi_device_online(sdev))) {
1322			printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1323			       sdev->host->host_no, sdev->id, sdev->lun);
1324			blkdev_dequeue_request(req);
1325			req->flags |= REQ_QUIET;
1326			while (end_that_request_first(req, 0, req->nr_sectors))
1327				;
1328			end_that_request_last(req);
1329			continue;
1330		}
1331
1332
1333		/*
1334		 * Remove the request from the request list.
1335		 */
1336		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1337			blkdev_dequeue_request(req);
1338		sdev->device_busy++;
1339
1340		spin_unlock(q->queue_lock);
1341		spin_lock(shost->host_lock);
1342
1343		if (!scsi_host_queue_ready(q, shost, sdev))
1344			goto not_ready;
1345		if (sdev->single_lun) {
1346			if (scsi_target(sdev)->starget_sdev_user &&
1347			    scsi_target(sdev)->starget_sdev_user != sdev)
1348				goto not_ready;
1349			scsi_target(sdev)->starget_sdev_user = sdev;
1350		}
1351		shost->host_busy++;
1352
1353		/*
1354		 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1355		 *		take the lock again.
1356		 */
1357		spin_unlock_irq(shost->host_lock);
1358
1359		cmd = req->special;
1360		if (unlikely(cmd == NULL)) {
1361			printk(KERN_CRIT "impossible request in %s.\n"
1362					 "please mail a stack trace to "
1363					 "linux-scsi@vger.kernel.org",
1364					 __FUNCTION__);
1365			BUG();
1366		}
1367
1368		/*
1369		 * Finally, initialize any error handling parameters, and set up
1370		 * the timers for timeouts.
1371		 */
1372		scsi_init_cmd_errh(cmd);
1373
1374		/*
1375		 * Dispatch the command to the low-level driver.
1376		 */
1377		rtn = scsi_dispatch_cmd(cmd);
1378		spin_lock_irq(q->queue_lock);
1379		if(rtn) {
1380			/* we're refusing the command; because of
1381			 * the way locks get dropped, we need to
1382			 * check here if plugging is required */
1383			if(sdev->device_busy == 0)
1384				blk_plug_device(q);
1385
1386			break;
1387		}
1388	}
1389
1390	goto out;
1391
1392 not_ready:
1393	spin_unlock_irq(shost->host_lock);
1394
1395	/*
1396	 * lock q, handle tag, requeue req, and decrement device_busy. We
1397	 * must return with queue_lock held.
1398	 *
1399	 * Decrementing device_busy without checking it is OK, as all such
1400	 * cases (host limits or settings) should run the queue at some
1401	 * later time.
1402	 */
1403	spin_lock_irq(q->queue_lock);
1404	blk_requeue_request(q, req);
1405	sdev->device_busy--;
1406	if(sdev->device_busy == 0)
1407		blk_plug_device(q);
1408 out:
1409	/* must be careful here...if we trigger the ->remove() function
1410	 * we cannot be holding the q lock */
1411	spin_unlock_irq(q->queue_lock);
1412	put_device(&sdev->sdev_gendev);
1413	spin_lock_irq(q->queue_lock);
1414}
1415
1416u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1417{
1418	struct device *host_dev;
1419	u64 bounce_limit = 0xffffffff;
1420
1421	if (shost->unchecked_isa_dma)
1422		return BLK_BOUNCE_ISA;
1423	/*
1424	 * Platforms with virtual-DMA translation
1425	 * hardware have no practical limit.
1426	 */
1427	if (!PCI_DMA_BUS_IS_PHYS)
1428		return BLK_BOUNCE_ANY;
1429
1430	host_dev = scsi_get_device(shost);
1431	if (host_dev && host_dev->dma_mask)
1432		bounce_limit = *host_dev->dma_mask;
1433
1434	return bounce_limit;
1435}
1436EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1437
1438struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1439{
1440	struct Scsi_Host *shost = sdev->host;
1441	struct request_queue *q;
1442
1443	q = blk_init_queue(scsi_request_fn, NULL);
1444	if (!q)
1445		return NULL;
1446
1447	blk_queue_prep_rq(q, scsi_prep_fn);
1448
1449	blk_queue_max_hw_segments(q, shost->sg_tablesize);
1450	blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1451	blk_queue_max_sectors(q, shost->max_sectors);
1452	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1453	blk_queue_segment_boundary(q, shost->dma_boundary);
1454	blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1455
1456	/*
1457	 * ordered tags are superior to flush ordering
1458	 */
1459	if (shost->ordered_tag)
1460		blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1461	else if (shost->ordered_flush) {
1462		blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1463		q->prepare_flush_fn = scsi_prepare_flush_fn;
1464		q->end_flush_fn = scsi_end_flush_fn;
1465	}
1466
1467	if (!shost->use_clustering)
1468		clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1469	return q;
1470}
1471
1472void scsi_free_queue(struct request_queue *q)
1473{
1474	blk_cleanup_queue(q);
1475}
1476
1477/*
1478 * Function:    scsi_block_requests()
1479 *
1480 * Purpose:     Utility function used by low-level drivers to prevent further
1481 *		commands from being queued to the device.
1482 *
1483 * Arguments:   shost       - Host in question
1484 *
1485 * Returns:     Nothing
1486 *
1487 * Lock status: No locks are assumed held.
1488 *
1489 * Notes:       There is no timer nor any other means by which the requests
1490 *		get unblocked other than the low-level driver calling
1491 *		scsi_unblock_requests().
1492 */
1493void scsi_block_requests(struct Scsi_Host *shost)
1494{
1495	shost->host_self_blocked = 1;
1496}
1497EXPORT_SYMBOL(scsi_block_requests);
1498
1499/*
1500 * Function:    scsi_unblock_requests()
1501 *
1502 * Purpose:     Utility function used by low-level drivers to allow further
1503 *		commands from being queued to the device.
1504 *
1505 * Arguments:   shost       - Host in question
1506 *
1507 * Returns:     Nothing
1508 *
1509 * Lock status: No locks are assumed held.
1510 *
1511 * Notes:       There is no timer nor any other means by which the requests
1512 *		get unblocked other than the low-level driver calling
1513 *		scsi_unblock_requests().
1514 *
1515 *		This is done as an API function so that changes to the
1516 *		internals of the scsi mid-layer won't require wholesale
1517 *		changes to drivers that use this feature.
1518 */
1519void scsi_unblock_requests(struct Scsi_Host *shost)
1520{
1521	shost->host_self_blocked = 0;
1522	scsi_run_host_queues(shost);
1523}
1524EXPORT_SYMBOL(scsi_unblock_requests);
1525
1526int __init scsi_init_queue(void)
1527{
1528	int i;
1529
1530	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1531		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1532		int size = sgp->size * sizeof(struct scatterlist);
1533
1534		sgp->slab = kmem_cache_create(sgp->name, size, 0,
1535				SLAB_HWCACHE_ALIGN, NULL, NULL);
1536		if (!sgp->slab) {
1537			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1538					sgp->name);
1539		}
1540
1541		sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1542				mempool_alloc_slab, mempool_free_slab,
1543				sgp->slab);
1544		if (!sgp->pool) {
1545			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1546					sgp->name);
1547		}
1548	}
1549
1550	return 0;
1551}
1552
1553void scsi_exit_queue(void)
1554{
1555	int i;
1556
1557	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1558		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1559		mempool_destroy(sgp->pool);
1560		kmem_cache_destroy(sgp->slab);
1561	}
1562}
1563/**
1564 *	__scsi_mode_sense - issue a mode sense, falling back from 10 to
1565 *		six bytes if necessary.
1566 *	@sreq:	SCSI request to fill in with the MODE_SENSE
1567 *	@dbd:	set if mode sense will allow block descriptors to be returned
1568 *	@modepage: mode page being requested
1569 *	@buffer: request buffer (may not be smaller than eight bytes)
1570 *	@len:	length of request buffer.
1571 *	@timeout: command timeout
1572 *	@retries: number of retries before failing
1573 *	@data: returns a structure abstracting the mode header data
1574 *
1575 *	Returns zero if unsuccessful, or the header offset (either 4
1576 *	or 8 depending on whether a six or ten byte command was
1577 *	issued) if successful.
1578 **/
1579int
1580__scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1581		  unsigned char *buffer, int len, int timeout, int retries,
1582		  struct scsi_mode_data *data) {
1583	unsigned char cmd[12];
1584	int use_10_for_ms;
1585	int header_length;
1586
1587	memset(data, 0, sizeof(*data));
1588	memset(&cmd[0], 0, 12);
1589	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
1590	cmd[2] = modepage;
1591
1592 retry:
1593	use_10_for_ms = sreq->sr_device->use_10_for_ms;
1594
1595	if (use_10_for_ms) {
1596		if (len < 8)
1597			len = 8;
1598
1599		cmd[0] = MODE_SENSE_10;
1600		cmd[8] = len;
1601		header_length = 8;
1602	} else {
1603		if (len < 4)
1604			len = 4;
1605
1606		cmd[0] = MODE_SENSE;
1607		cmd[4] = len;
1608		header_length = 4;
1609	}
1610
1611	sreq->sr_cmd_len = 0;
1612	memset(sreq->sr_sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
1613	sreq->sr_data_direction = DMA_FROM_DEVICE;
1614
1615	memset(buffer, 0, len);
1616
1617	scsi_wait_req(sreq, cmd, buffer, len, timeout, retries);
1618
1619	/* This code looks awful: what it's doing is making sure an
1620	 * ILLEGAL REQUEST sense return identifies the actual command
1621	 * byte as the problem.  MODE_SENSE commands can return
1622	 * ILLEGAL REQUEST if the code page isn't supported */
1623
1624	if (use_10_for_ms && !scsi_status_is_good(sreq->sr_result) &&
1625	    (driver_byte(sreq->sr_result) & DRIVER_SENSE)) {
1626		struct scsi_sense_hdr sshdr;
1627
1628		if (scsi_request_normalize_sense(sreq, &sshdr)) {
1629			if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
1630			    (sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
1631				/*
1632				 * Invalid command operation code
1633				 */
1634				sreq->sr_device->use_10_for_ms = 0;
1635				goto retry;
1636			}
1637		}
1638	}
1639
1640	if(scsi_status_is_good(sreq->sr_result)) {
1641		data->header_length = header_length;
1642		if(use_10_for_ms) {
1643			data->length = buffer[0]*256 + buffer[1] + 2;
1644			data->medium_type = buffer[2];
1645			data->device_specific = buffer[3];
1646			data->longlba = buffer[4] & 0x01;
1647			data->block_descriptor_length = buffer[6]*256
1648				+ buffer[7];
1649		} else {
1650			data->length = buffer[0] + 1;
1651			data->medium_type = buffer[1];
1652			data->device_specific = buffer[2];
1653			data->block_descriptor_length = buffer[3];
1654		}
1655	}
1656
1657	return sreq->sr_result;
1658}
1659EXPORT_SYMBOL(__scsi_mode_sense);
1660
1661/**
1662 *	scsi_mode_sense - issue a mode sense, falling back from 10 to
1663 *		six bytes if necessary.
1664 *	@sdev:	scsi device to send command to.
1665 *	@dbd:	set if mode sense will disable block descriptors in the return
1666 *	@modepage: mode page being requested
1667 *	@buffer: request buffer (may not be smaller than eight bytes)
1668 *	@len:	length of request buffer.
1669 *	@timeout: command timeout
1670 *	@retries: number of retries before failing
1671 *
1672 *	Returns zero if unsuccessful, or the header offset (either 4
1673 *	or 8 depending on whether a six or ten byte command was
1674 *	issued) if successful.
1675 **/
1676int
1677scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1678		unsigned char *buffer, int len, int timeout, int retries,
1679		struct scsi_mode_data *data)
1680{
1681	struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1682	int ret;
1683
1684	if (!sreq)
1685		return -1;
1686
1687	ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
1688				timeout, retries, data);
1689
1690	scsi_release_request(sreq);
1691
1692	return ret;
1693}
1694EXPORT_SYMBOL(scsi_mode_sense);
1695
1696int
1697scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1698{
1699	struct scsi_request *sreq;
1700	char cmd[] = {
1701		TEST_UNIT_READY, 0, 0, 0, 0, 0,
1702	};
1703	int result;
1704
1705	sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1706	if (!sreq)
1707		return -ENOMEM;
1708
1709	sreq->sr_data_direction = DMA_NONE;
1710	scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
1711
1712	if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && sdev->removable) {
1713		struct scsi_sense_hdr sshdr;
1714
1715		if ((scsi_request_normalize_sense(sreq, &sshdr)) &&
1716		    ((sshdr.sense_key == UNIT_ATTENTION) ||
1717		     (sshdr.sense_key == NOT_READY))) {
1718			sdev->changed = 1;
1719			sreq->sr_result = 0;
1720		}
1721	}
1722	result = sreq->sr_result;
1723	scsi_release_request(sreq);
1724	return result;
1725}
1726EXPORT_SYMBOL(scsi_test_unit_ready);
1727
1728/**
1729 *	scsi_device_set_state - Take the given device through the device
1730 *		state model.
1731 *	@sdev:	scsi device to change the state of.
1732 *	@state:	state to change to.
1733 *
1734 *	Returns zero if unsuccessful or an error if the requested
1735 *	transition is illegal.
1736 **/
1737int
1738scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1739{
1740	enum scsi_device_state oldstate = sdev->sdev_state;
1741
1742	if (state == oldstate)
1743		return 0;
1744
1745	switch (state) {
1746	case SDEV_CREATED:
1747		/* There are no legal states that come back to
1748		 * created.  This is the manually initialised start
1749		 * state */
1750		goto illegal;
1751
1752	case SDEV_RUNNING:
1753		switch (oldstate) {
1754		case SDEV_CREATED:
1755		case SDEV_OFFLINE:
1756		case SDEV_QUIESCE:
1757		case SDEV_BLOCK:
1758			break;
1759		default:
1760			goto illegal;
1761		}
1762		break;
1763
1764	case SDEV_QUIESCE:
1765		switch (oldstate) {
1766		case SDEV_RUNNING:
1767		case SDEV_OFFLINE:
1768			break;
1769		default:
1770			goto illegal;
1771		}
1772		break;
1773
1774	case SDEV_OFFLINE:
1775		switch (oldstate) {
1776		case SDEV_CREATED:
1777		case SDEV_RUNNING:
1778		case SDEV_QUIESCE:
1779		case SDEV_BLOCK:
1780			break;
1781		default:
1782			goto illegal;
1783		}
1784		break;
1785
1786	case SDEV_BLOCK:
1787		switch (oldstate) {
1788		case SDEV_CREATED:
1789		case SDEV_RUNNING:
1790			break;
1791		default:
1792			goto illegal;
1793		}
1794		break;
1795
1796	case SDEV_CANCEL:
1797		switch (oldstate) {
1798		case SDEV_CREATED:
1799		case SDEV_RUNNING:
1800		case SDEV_OFFLINE:
1801		case SDEV_BLOCK:
1802			break;
1803		default:
1804			goto illegal;
1805		}
1806		break;
1807
1808	case SDEV_DEL:
1809		switch (oldstate) {
1810		case SDEV_CANCEL:
1811			break;
1812		default:
1813			goto illegal;
1814		}
1815		break;
1816
1817	}
1818	sdev->sdev_state = state;
1819	return 0;
1820
1821 illegal:
1822	SCSI_LOG_ERROR_RECOVERY(1,
1823				dev_printk(KERN_ERR, &sdev->sdev_gendev,
1824					   "Illegal state transition %s->%s\n",
1825					   scsi_device_state_name(oldstate),
1826					   scsi_device_state_name(state))
1827				);
1828	return -EINVAL;
1829}
1830EXPORT_SYMBOL(scsi_device_set_state);
1831
1832/**
1833 *	scsi_device_quiesce - Block user issued commands.
1834 *	@sdev:	scsi device to quiesce.
1835 *
1836 *	This works by trying to transition to the SDEV_QUIESCE state
1837 *	(which must be a legal transition).  When the device is in this
1838 *	state, only special requests will be accepted, all others will
1839 *	be deferred.  Since special requests may also be requeued requests,
1840 *	a successful return doesn't guarantee the device will be
1841 *	totally quiescent.
1842 *
1843 *	Must be called with user context, may sleep.
1844 *
1845 *	Returns zero if unsuccessful or an error if not.
1846 **/
1847int
1848scsi_device_quiesce(struct scsi_device *sdev)
1849{
1850	int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1851	if (err)
1852		return err;
1853
1854	scsi_run_queue(sdev->request_queue);
1855	while (sdev->device_busy) {
1856		msleep_interruptible(200);
1857		scsi_run_queue(sdev->request_queue);
1858	}
1859	return 0;
1860}
1861EXPORT_SYMBOL(scsi_device_quiesce);
1862
1863/**
1864 *	scsi_device_resume - Restart user issued commands to a quiesced device.
1865 *	@sdev:	scsi device to resume.
1866 *
1867 *	Moves the device from quiesced back to running and restarts the
1868 *	queues.
1869 *
1870 *	Must be called with user context, may sleep.
1871 **/
1872void
1873scsi_device_resume(struct scsi_device *sdev)
1874{
1875	if(scsi_device_set_state(sdev, SDEV_RUNNING))
1876		return;
1877	scsi_run_queue(sdev->request_queue);
1878}
1879EXPORT_SYMBOL(scsi_device_resume);
1880
1881static void
1882device_quiesce_fn(struct scsi_device *sdev, void *data)
1883{
1884	scsi_device_quiesce(sdev);
1885}
1886
1887void
1888scsi_target_quiesce(struct scsi_target *starget)
1889{
1890	starget_for_each_device(starget, NULL, device_quiesce_fn);
1891}
1892EXPORT_SYMBOL(scsi_target_quiesce);
1893
1894static void
1895device_resume_fn(struct scsi_device *sdev, void *data)
1896{
1897	scsi_device_resume(sdev);
1898}
1899
1900void
1901scsi_target_resume(struct scsi_target *starget)
1902{
1903	starget_for_each_device(starget, NULL, device_resume_fn);
1904}
1905EXPORT_SYMBOL(scsi_target_resume);
1906
1907/**
1908 * scsi_internal_device_block - internal function to put a device
1909 *				temporarily into the SDEV_BLOCK state
1910 * @sdev:	device to block
1911 *
1912 * Block request made by scsi lld's to temporarily stop all
1913 * scsi commands on the specified device.  Called from interrupt
1914 * or normal process context.
1915 *
1916 * Returns zero if successful or error if not
1917 *
1918 * Notes:
1919 *	This routine transitions the device to the SDEV_BLOCK state
1920 *	(which must be a legal transition).  When the device is in this
1921 *	state, all commands are deferred until the scsi lld reenables
1922 *	the device with scsi_device_unblock or device_block_tmo fires.
1923 *	This routine assumes the host_lock is held on entry.
1924 **/
1925int
1926scsi_internal_device_block(struct scsi_device *sdev)
1927{
1928	request_queue_t *q = sdev->request_queue;
1929	unsigned long flags;
1930	int err = 0;
1931
1932	err = scsi_device_set_state(sdev, SDEV_BLOCK);
1933	if (err)
1934		return err;
1935
1936	/*
1937	 * The device has transitioned to SDEV_BLOCK.  Stop the
1938	 * block layer from calling the midlayer with this device's
1939	 * request queue.
1940	 */
1941	spin_lock_irqsave(q->queue_lock, flags);
1942	blk_stop_queue(q);
1943	spin_unlock_irqrestore(q->queue_lock, flags);
1944
1945	return 0;
1946}
1947EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1948
1949/**
1950 * scsi_internal_device_unblock - resume a device after a block request
1951 * @sdev:	device to resume
1952 *
1953 * Called by scsi lld's or the midlayer to restart the device queue
1954 * for the previously suspended scsi device.  Called from interrupt or
1955 * normal process context.
1956 *
1957 * Returns zero if successful or error if not.
1958 *
1959 * Notes:
1960 *	This routine transitions the device to the SDEV_RUNNING state
1961 *	(which must be a legal transition) allowing the midlayer to
1962 *	goose the queue for this device.  This routine assumes the
1963 *	host_lock is held upon entry.
1964 **/
1965int
1966scsi_internal_device_unblock(struct scsi_device *sdev)
1967{
1968	request_queue_t *q = sdev->request_queue;
1969	int err;
1970	unsigned long flags;
1971
1972	/*
1973	 * Try to transition the scsi device to SDEV_RUNNING
1974	 * and goose the device queue if successful.
1975	 */
1976	err = scsi_device_set_state(sdev, SDEV_RUNNING);
1977	if (err)
1978		return err;
1979
1980	spin_lock_irqsave(q->queue_lock, flags);
1981	blk_start_queue(q);
1982	spin_unlock_irqrestore(q->queue_lock, flags);
1983
1984	return 0;
1985}
1986EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
1987
1988static void
1989device_block(struct scsi_device *sdev, void *data)
1990{
1991	scsi_internal_device_block(sdev);
1992}
1993
1994static int
1995target_block(struct device *dev, void *data)
1996{
1997	if (scsi_is_target_device(dev))
1998		starget_for_each_device(to_scsi_target(dev), NULL,
1999					device_block);
2000	return 0;
2001}
2002
2003void
2004scsi_target_block(struct device *dev)
2005{
2006	if (scsi_is_target_device(dev))
2007		starget_for_each_device(to_scsi_target(dev), NULL,
2008					device_block);
2009	else
2010		device_for_each_child(dev, NULL, target_block);
2011}
2012EXPORT_SYMBOL_GPL(scsi_target_block);
2013
2014static void
2015device_unblock(struct scsi_device *sdev, void *data)
2016{
2017	scsi_internal_device_unblock(sdev);
2018}
2019
2020static int
2021target_unblock(struct device *dev, void *data)
2022{
2023	if (scsi_is_target_device(dev))
2024		starget_for_each_device(to_scsi_target(dev), NULL,
2025					device_unblock);
2026	return 0;
2027}
2028
2029void
2030scsi_target_unblock(struct device *dev)
2031{
2032	if (scsi_is_target_device(dev))
2033		starget_for_each_device(to_scsi_target(dev), NULL,
2034					device_unblock);
2035	else
2036		device_for_each_child(dev, NULL, target_unblock);
2037}
2038EXPORT_SYMBOL_GPL(scsi_target_unblock);
2039