scsi_lib.c revision 99c84dbdc73d158a1ab955a4a5f74c18074796a3
1/* 2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale 3 * 4 * SCSI queueing library. 5 * Initial versions: Eric Youngdale (eric@andante.org). 6 * Based upon conversations with large numbers 7 * of people at Linux Expo. 8 */ 9 10#include <linux/bio.h> 11#include <linux/bitops.h> 12#include <linux/blkdev.h> 13#include <linux/completion.h> 14#include <linux/kernel.h> 15#include <linux/mempool.h> 16#include <linux/slab.h> 17#include <linux/init.h> 18#include <linux/pci.h> 19#include <linux/delay.h> 20#include <linux/hardirq.h> 21#include <linux/scatterlist.h> 22 23#include <scsi/scsi.h> 24#include <scsi/scsi_cmnd.h> 25#include <scsi/scsi_dbg.h> 26#include <scsi/scsi_device.h> 27#include <scsi/scsi_driver.h> 28#include <scsi/scsi_eh.h> 29#include <scsi/scsi_host.h> 30 31#include "scsi_priv.h" 32#include "scsi_logging.h" 33 34 35#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 36#define SG_MEMPOOL_SIZE 2 37 38struct scsi_host_sg_pool { 39 size_t size; 40 char *name; 41 struct kmem_cache *slab; 42 mempool_t *pool; 43}; 44 45#define SP(x) { x, "sgpool-" __stringify(x) } 46#if (SCSI_MAX_SG_SEGMENTS < 32) 47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 48#endif 49static struct scsi_host_sg_pool scsi_sg_pools[] = { 50 SP(8), 51 SP(16), 52#if (SCSI_MAX_SG_SEGMENTS > 32) 53 SP(32), 54#if (SCSI_MAX_SG_SEGMENTS > 64) 55 SP(64), 56#if (SCSI_MAX_SG_SEGMENTS > 128) 57 SP(128), 58#if (SCSI_MAX_SG_SEGMENTS > 256) 59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 60#endif 61#endif 62#endif 63#endif 64 SP(SCSI_MAX_SG_SEGMENTS) 65}; 66#undef SP 67 68static struct kmem_cache *scsi_bidi_sdb_cache; 69 70static void scsi_run_queue(struct request_queue *q); 71 72/* 73 * Function: scsi_unprep_request() 74 * 75 * Purpose: Remove all preparation done for a request, including its 76 * associated scsi_cmnd, so that it can be requeued. 77 * 78 * Arguments: req - request to unprepare 79 * 80 * Lock status: Assumed that no locks are held upon entry. 81 * 82 * Returns: Nothing. 83 */ 84static void scsi_unprep_request(struct request *req) 85{ 86 struct scsi_cmnd *cmd = req->special; 87 88 req->cmd_flags &= ~REQ_DONTPREP; 89 req->special = NULL; 90 91 scsi_put_command(cmd); 92} 93 94/* 95 * Function: scsi_queue_insert() 96 * 97 * Purpose: Insert a command in the midlevel queue. 98 * 99 * Arguments: cmd - command that we are adding to queue. 100 * reason - why we are inserting command to queue. 101 * 102 * Lock status: Assumed that lock is not held upon entry. 103 * 104 * Returns: Nothing. 105 * 106 * Notes: We do this for one of two cases. Either the host is busy 107 * and it cannot accept any more commands for the time being, 108 * or the device returned QUEUE_FULL and can accept no more 109 * commands. 110 * Notes: This could be called either from an interrupt context or a 111 * normal process context. 112 */ 113int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 114{ 115 struct Scsi_Host *host = cmd->device->host; 116 struct scsi_device *device = cmd->device; 117 struct request_queue *q = device->request_queue; 118 unsigned long flags; 119 120 SCSI_LOG_MLQUEUE(1, 121 printk("Inserting command %p into mlqueue\n", cmd)); 122 123 /* 124 * Set the appropriate busy bit for the device/host. 125 * 126 * If the host/device isn't busy, assume that something actually 127 * completed, and that we should be able to queue a command now. 128 * 129 * Note that the prior mid-layer assumption that any host could 130 * always queue at least one command is now broken. The mid-layer 131 * will implement a user specifiable stall (see 132 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 133 * if a command is requeued with no other commands outstanding 134 * either for the device or for the host. 135 */ 136 if (reason == SCSI_MLQUEUE_HOST_BUSY) 137 host->host_blocked = host->max_host_blocked; 138 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) 139 device->device_blocked = device->max_device_blocked; 140 141 /* 142 * Decrement the counters, since these commands are no longer 143 * active on the host/device. 144 */ 145 scsi_device_unbusy(device); 146 147 /* 148 * Requeue this command. It will go before all other commands 149 * that are already in the queue. 150 * 151 * NOTE: there is magic here about the way the queue is plugged if 152 * we have no outstanding commands. 153 * 154 * Although we *don't* plug the queue, we call the request 155 * function. The SCSI request function detects the blocked condition 156 * and plugs the queue appropriately. 157 */ 158 spin_lock_irqsave(q->queue_lock, flags); 159 blk_requeue_request(q, cmd->request); 160 spin_unlock_irqrestore(q->queue_lock, flags); 161 162 scsi_run_queue(q); 163 164 return 0; 165} 166 167/** 168 * scsi_execute - insert request and wait for the result 169 * @sdev: scsi device 170 * @cmd: scsi command 171 * @data_direction: data direction 172 * @buffer: data buffer 173 * @bufflen: len of buffer 174 * @sense: optional sense buffer 175 * @timeout: request timeout in seconds 176 * @retries: number of times to retry request 177 * @flags: or into request flags; 178 * 179 * returns the req->errors value which is the scsi_cmnd result 180 * field. 181 */ 182int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 183 int data_direction, void *buffer, unsigned bufflen, 184 unsigned char *sense, int timeout, int retries, int flags) 185{ 186 struct request *req; 187 int write = (data_direction == DMA_TO_DEVICE); 188 int ret = DRIVER_ERROR << 24; 189 190 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 191 192 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 193 buffer, bufflen, __GFP_WAIT)) 194 goto out; 195 196 req->cmd_len = COMMAND_SIZE(cmd[0]); 197 memcpy(req->cmd, cmd, req->cmd_len); 198 req->sense = sense; 199 req->sense_len = 0; 200 req->retries = retries; 201 req->timeout = timeout; 202 req->cmd_type = REQ_TYPE_BLOCK_PC; 203 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 204 205 /* 206 * head injection *required* here otherwise quiesce won't work 207 */ 208 blk_execute_rq(req->q, NULL, req, 1); 209 210 ret = req->errors; 211 out: 212 blk_put_request(req); 213 214 return ret; 215} 216EXPORT_SYMBOL(scsi_execute); 217 218 219int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 220 int data_direction, void *buffer, unsigned bufflen, 221 struct scsi_sense_hdr *sshdr, int timeout, int retries) 222{ 223 char *sense = NULL; 224 int result; 225 226 if (sshdr) { 227 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 228 if (!sense) 229 return DRIVER_ERROR << 24; 230 } 231 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 232 sense, timeout, retries, 0); 233 if (sshdr) 234 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 235 236 kfree(sense); 237 return result; 238} 239EXPORT_SYMBOL(scsi_execute_req); 240 241struct scsi_io_context { 242 void *data; 243 void (*done)(void *data, char *sense, int result, int resid); 244 char sense[SCSI_SENSE_BUFFERSIZE]; 245}; 246 247static struct kmem_cache *scsi_io_context_cache; 248 249static void scsi_end_async(struct request *req, int uptodate) 250{ 251 struct scsi_io_context *sioc = req->end_io_data; 252 253 if (sioc->done) 254 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len); 255 256 kmem_cache_free(scsi_io_context_cache, sioc); 257 __blk_put_request(req->q, req); 258} 259 260static int scsi_merge_bio(struct request *rq, struct bio *bio) 261{ 262 struct request_queue *q = rq->q; 263 264 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 265 if (rq_data_dir(rq) == WRITE) 266 bio->bi_rw |= (1 << BIO_RW); 267 blk_queue_bounce(q, &bio); 268 269 return blk_rq_append_bio(q, rq, bio); 270} 271 272static void scsi_bi_endio(struct bio *bio, int error) 273{ 274 bio_put(bio); 275} 276 277/** 278 * scsi_req_map_sg - map a scatterlist into a request 279 * @rq: request to fill 280 * @sgl: scatterlist 281 * @nsegs: number of elements 282 * @bufflen: len of buffer 283 * @gfp: memory allocation flags 284 * 285 * scsi_req_map_sg maps a scatterlist into a request so that the 286 * request can be sent to the block layer. We do not trust the scatterlist 287 * sent to use, as some ULDs use that struct to only organize the pages. 288 */ 289static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, 290 int nsegs, unsigned bufflen, gfp_t gfp) 291{ 292 struct request_queue *q = rq->q; 293 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 294 unsigned int data_len = bufflen, len, bytes, off; 295 struct scatterlist *sg; 296 struct page *page; 297 struct bio *bio = NULL; 298 int i, err, nr_vecs = 0; 299 300 for_each_sg(sgl, sg, nsegs, i) { 301 page = sg_page(sg); 302 off = sg->offset; 303 len = sg->length; 304 data_len += len; 305 306 while (len > 0 && data_len > 0) { 307 /* 308 * sg sends a scatterlist that is larger than 309 * the data_len it wants transferred for certain 310 * IO sizes 311 */ 312 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 313 bytes = min(bytes, data_len); 314 315 if (!bio) { 316 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 317 nr_pages -= nr_vecs; 318 319 bio = bio_alloc(gfp, nr_vecs); 320 if (!bio) { 321 err = -ENOMEM; 322 goto free_bios; 323 } 324 bio->bi_end_io = scsi_bi_endio; 325 } 326 327 if (bio_add_pc_page(q, bio, page, bytes, off) != 328 bytes) { 329 bio_put(bio); 330 err = -EINVAL; 331 goto free_bios; 332 } 333 334 if (bio->bi_vcnt >= nr_vecs) { 335 err = scsi_merge_bio(rq, bio); 336 if (err) { 337 bio_endio(bio, 0); 338 goto free_bios; 339 } 340 bio = NULL; 341 } 342 343 page++; 344 len -= bytes; 345 data_len -=bytes; 346 off = 0; 347 } 348 } 349 350 rq->buffer = rq->data = NULL; 351 rq->data_len = bufflen; 352 return 0; 353 354free_bios: 355 while ((bio = rq->bio) != NULL) { 356 rq->bio = bio->bi_next; 357 /* 358 * call endio instead of bio_put incase it was bounced 359 */ 360 bio_endio(bio, 0); 361 } 362 363 return err; 364} 365 366/** 367 * scsi_execute_async - insert request 368 * @sdev: scsi device 369 * @cmd: scsi command 370 * @cmd_len: length of scsi cdb 371 * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE 372 * @buffer: data buffer (this can be a kernel buffer or scatterlist) 373 * @bufflen: len of buffer 374 * @use_sg: if buffer is a scatterlist this is the number of elements 375 * @timeout: request timeout in seconds 376 * @retries: number of times to retry request 377 * @privdata: data passed to done() 378 * @done: callback function when done 379 * @gfp: memory allocation flags 380 */ 381int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, 382 int cmd_len, int data_direction, void *buffer, unsigned bufflen, 383 int use_sg, int timeout, int retries, void *privdata, 384 void (*done)(void *, char *, int, int), gfp_t gfp) 385{ 386 struct request *req; 387 struct scsi_io_context *sioc; 388 int err = 0; 389 int write = (data_direction == DMA_TO_DEVICE); 390 391 sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp); 392 if (!sioc) 393 return DRIVER_ERROR << 24; 394 395 req = blk_get_request(sdev->request_queue, write, gfp); 396 if (!req) 397 goto free_sense; 398 req->cmd_type = REQ_TYPE_BLOCK_PC; 399 req->cmd_flags |= REQ_QUIET; 400 401 if (use_sg) 402 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); 403 else if (bufflen) 404 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp); 405 406 if (err) 407 goto free_req; 408 409 req->cmd_len = cmd_len; 410 memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 411 memcpy(req->cmd, cmd, req->cmd_len); 412 req->sense = sioc->sense; 413 req->sense_len = 0; 414 req->timeout = timeout; 415 req->retries = retries; 416 req->end_io_data = sioc; 417 418 sioc->data = privdata; 419 sioc->done = done; 420 421 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async); 422 return 0; 423 424free_req: 425 blk_put_request(req); 426free_sense: 427 kmem_cache_free(scsi_io_context_cache, sioc); 428 return DRIVER_ERROR << 24; 429} 430EXPORT_SYMBOL_GPL(scsi_execute_async); 431 432/* 433 * Function: scsi_init_cmd_errh() 434 * 435 * Purpose: Initialize cmd fields related to error handling. 436 * 437 * Arguments: cmd - command that is ready to be queued. 438 * 439 * Notes: This function has the job of initializing a number of 440 * fields related to error handling. Typically this will 441 * be called once for each command, as required. 442 */ 443static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 444{ 445 cmd->serial_number = 0; 446 scsi_set_resid(cmd, 0); 447 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 448 if (cmd->cmd_len == 0) 449 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 450} 451 452void scsi_device_unbusy(struct scsi_device *sdev) 453{ 454 struct Scsi_Host *shost = sdev->host; 455 unsigned long flags; 456 457 spin_lock_irqsave(shost->host_lock, flags); 458 shost->host_busy--; 459 if (unlikely(scsi_host_in_recovery(shost) && 460 (shost->host_failed || shost->host_eh_scheduled))) 461 scsi_eh_wakeup(shost); 462 spin_unlock(shost->host_lock); 463 spin_lock(sdev->request_queue->queue_lock); 464 sdev->device_busy--; 465 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 466} 467 468/* 469 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 470 * and call blk_run_queue for all the scsi_devices on the target - 471 * including current_sdev first. 472 * 473 * Called with *no* scsi locks held. 474 */ 475static void scsi_single_lun_run(struct scsi_device *current_sdev) 476{ 477 struct Scsi_Host *shost = current_sdev->host; 478 struct scsi_device *sdev, *tmp; 479 struct scsi_target *starget = scsi_target(current_sdev); 480 unsigned long flags; 481 482 spin_lock_irqsave(shost->host_lock, flags); 483 starget->starget_sdev_user = NULL; 484 spin_unlock_irqrestore(shost->host_lock, flags); 485 486 /* 487 * Call blk_run_queue for all LUNs on the target, starting with 488 * current_sdev. We race with others (to set starget_sdev_user), 489 * but in most cases, we will be first. Ideally, each LU on the 490 * target would get some limited time or requests on the target. 491 */ 492 blk_run_queue(current_sdev->request_queue); 493 494 spin_lock_irqsave(shost->host_lock, flags); 495 if (starget->starget_sdev_user) 496 goto out; 497 list_for_each_entry_safe(sdev, tmp, &starget->devices, 498 same_target_siblings) { 499 if (sdev == current_sdev) 500 continue; 501 if (scsi_device_get(sdev)) 502 continue; 503 504 spin_unlock_irqrestore(shost->host_lock, flags); 505 blk_run_queue(sdev->request_queue); 506 spin_lock_irqsave(shost->host_lock, flags); 507 508 scsi_device_put(sdev); 509 } 510 out: 511 spin_unlock_irqrestore(shost->host_lock, flags); 512} 513 514/* 515 * Function: scsi_run_queue() 516 * 517 * Purpose: Select a proper request queue to serve next 518 * 519 * Arguments: q - last request's queue 520 * 521 * Returns: Nothing 522 * 523 * Notes: The previous command was completely finished, start 524 * a new one if possible. 525 */ 526static void scsi_run_queue(struct request_queue *q) 527{ 528 struct scsi_device *sdev = q->queuedata; 529 struct Scsi_Host *shost = sdev->host; 530 unsigned long flags; 531 532 if (scsi_target(sdev)->single_lun) 533 scsi_single_lun_run(sdev); 534 535 spin_lock_irqsave(shost->host_lock, flags); 536 while (!list_empty(&shost->starved_list) && 537 !shost->host_blocked && !shost->host_self_blocked && 538 !((shost->can_queue > 0) && 539 (shost->host_busy >= shost->can_queue))) { 540 /* 541 * As long as shost is accepting commands and we have 542 * starved queues, call blk_run_queue. scsi_request_fn 543 * drops the queue_lock and can add us back to the 544 * starved_list. 545 * 546 * host_lock protects the starved_list and starved_entry. 547 * scsi_request_fn must get the host_lock before checking 548 * or modifying starved_list or starved_entry. 549 */ 550 sdev = list_entry(shost->starved_list.next, 551 struct scsi_device, starved_entry); 552 list_del_init(&sdev->starved_entry); 553 spin_unlock_irqrestore(shost->host_lock, flags); 554 555 556 if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 557 !test_and_set_bit(QUEUE_FLAG_REENTER, 558 &sdev->request_queue->queue_flags)) { 559 blk_run_queue(sdev->request_queue); 560 clear_bit(QUEUE_FLAG_REENTER, 561 &sdev->request_queue->queue_flags); 562 } else 563 blk_run_queue(sdev->request_queue); 564 565 spin_lock_irqsave(shost->host_lock, flags); 566 if (unlikely(!list_empty(&sdev->starved_entry))) 567 /* 568 * sdev lost a race, and was put back on the 569 * starved list. This is unlikely but without this 570 * in theory we could loop forever. 571 */ 572 break; 573 } 574 spin_unlock_irqrestore(shost->host_lock, flags); 575 576 blk_run_queue(q); 577} 578 579/* 580 * Function: scsi_requeue_command() 581 * 582 * Purpose: Handle post-processing of completed commands. 583 * 584 * Arguments: q - queue to operate on 585 * cmd - command that may need to be requeued. 586 * 587 * Returns: Nothing 588 * 589 * Notes: After command completion, there may be blocks left 590 * over which weren't finished by the previous command 591 * this can be for a number of reasons - the main one is 592 * I/O errors in the middle of the request, in which case 593 * we need to request the blocks that come after the bad 594 * sector. 595 * Notes: Upon return, cmd is a stale pointer. 596 */ 597static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 598{ 599 struct request *req = cmd->request; 600 unsigned long flags; 601 602 scsi_unprep_request(req); 603 spin_lock_irqsave(q->queue_lock, flags); 604 blk_requeue_request(q, req); 605 spin_unlock_irqrestore(q->queue_lock, flags); 606 607 scsi_run_queue(q); 608} 609 610void scsi_next_command(struct scsi_cmnd *cmd) 611{ 612 struct scsi_device *sdev = cmd->device; 613 struct request_queue *q = sdev->request_queue; 614 615 /* need to hold a reference on the device before we let go of the cmd */ 616 get_device(&sdev->sdev_gendev); 617 618 scsi_put_command(cmd); 619 scsi_run_queue(q); 620 621 /* ok to remove device now */ 622 put_device(&sdev->sdev_gendev); 623} 624 625void scsi_run_host_queues(struct Scsi_Host *shost) 626{ 627 struct scsi_device *sdev; 628 629 shost_for_each_device(sdev, shost) 630 scsi_run_queue(sdev->request_queue); 631} 632 633/* 634 * Function: scsi_end_request() 635 * 636 * Purpose: Post-processing of completed commands (usually invoked at end 637 * of upper level post-processing and scsi_io_completion). 638 * 639 * Arguments: cmd - command that is complete. 640 * error - 0 if I/O indicates success, < 0 for I/O error. 641 * bytes - number of bytes of completed I/O 642 * requeue - indicates whether we should requeue leftovers. 643 * 644 * Lock status: Assumed that lock is not held upon entry. 645 * 646 * Returns: cmd if requeue required, NULL otherwise. 647 * 648 * Notes: This is called for block device requests in order to 649 * mark some number of sectors as complete. 650 * 651 * We are guaranteeing that the request queue will be goosed 652 * at some point during this call. 653 * Notes: If cmd was requeued, upon return it will be a stale pointer. 654 */ 655static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, 656 int bytes, int requeue) 657{ 658 struct request_queue *q = cmd->device->request_queue; 659 struct request *req = cmd->request; 660 661 /* 662 * If there are blocks left over at the end, set up the command 663 * to queue the remainder of them. 664 */ 665 if (blk_end_request(req, error, bytes)) { 666 int leftover = (req->hard_nr_sectors << 9); 667 668 if (blk_pc_request(req)) 669 leftover = req->data_len; 670 671 /* kill remainder if no retrys */ 672 if (error && blk_noretry_request(req)) 673 blk_end_request(req, error, leftover); 674 else { 675 if (requeue) { 676 /* 677 * Bleah. Leftovers again. Stick the 678 * leftovers in the front of the 679 * queue, and goose the queue again. 680 */ 681 scsi_requeue_command(q, cmd); 682 cmd = NULL; 683 } 684 return cmd; 685 } 686 } 687 688 /* 689 * This will goose the queue request function at the end, so we don't 690 * need to worry about launching another command. 691 */ 692 scsi_next_command(cmd); 693 return NULL; 694} 695 696static inline unsigned int scsi_sgtable_index(unsigned short nents) 697{ 698 unsigned int index; 699 700 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 701 702 if (nents <= 8) 703 index = 0; 704 else 705 index = get_count_order(nents) - 3; 706 707 return index; 708} 709 710static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 711{ 712 struct scsi_host_sg_pool *sgp; 713 714 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 715 mempool_free(sgl, sgp->pool); 716} 717 718static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 719{ 720 struct scsi_host_sg_pool *sgp; 721 722 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 723 return mempool_alloc(sgp->pool, gfp_mask); 724} 725 726static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, 727 gfp_t gfp_mask) 728{ 729 int ret; 730 731 BUG_ON(!nents); 732 733 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 734 gfp_mask, scsi_sg_alloc); 735 if (unlikely(ret)) 736 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, 737 scsi_sg_free); 738 739 return ret; 740} 741 742static void scsi_free_sgtable(struct scsi_data_buffer *sdb) 743{ 744 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 745} 746 747/* 748 * Function: scsi_release_buffers() 749 * 750 * Purpose: Completion processing for block device I/O requests. 751 * 752 * Arguments: cmd - command that we are bailing. 753 * 754 * Lock status: Assumed that no lock is held upon entry. 755 * 756 * Returns: Nothing 757 * 758 * Notes: In the event that an upper level driver rejects a 759 * command, we must release resources allocated during 760 * the __init_io() function. Primarily this would involve 761 * the scatter-gather table, and potentially any bounce 762 * buffers. 763 */ 764void scsi_release_buffers(struct scsi_cmnd *cmd) 765{ 766 if (cmd->sdb.table.nents) 767 scsi_free_sgtable(&cmd->sdb); 768 769 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 770 771 if (scsi_bidi_cmnd(cmd)) { 772 struct scsi_data_buffer *bidi_sdb = 773 cmd->request->next_rq->special; 774 scsi_free_sgtable(bidi_sdb); 775 kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb); 776 cmd->request->next_rq->special = NULL; 777 } 778} 779EXPORT_SYMBOL(scsi_release_buffers); 780 781/* 782 * Bidi commands Must be complete as a whole, both sides at once. 783 * If part of the bytes were written and lld returned 784 * scsi_in()->resid and/or scsi_out()->resid this information will be left 785 * in req->data_len and req->next_rq->data_len. The upper-layer driver can 786 * decide what to do with this information. 787 */ 788void scsi_end_bidi_request(struct scsi_cmnd *cmd) 789{ 790 struct request *req = cmd->request; 791 unsigned int dlen = req->data_len; 792 unsigned int next_dlen = req->next_rq->data_len; 793 794 req->data_len = scsi_out(cmd)->resid; 795 req->next_rq->data_len = scsi_in(cmd)->resid; 796 797 /* The req and req->next_rq have not been completed */ 798 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen)); 799 800 scsi_release_buffers(cmd); 801 802 /* 803 * This will goose the queue request function at the end, so we don't 804 * need to worry about launching another command. 805 */ 806 scsi_next_command(cmd); 807} 808 809/* 810 * Function: scsi_io_completion() 811 * 812 * Purpose: Completion processing for block device I/O requests. 813 * 814 * Arguments: cmd - command that is finished. 815 * 816 * Lock status: Assumed that no lock is held upon entry. 817 * 818 * Returns: Nothing 819 * 820 * Notes: This function is matched in terms of capabilities to 821 * the function that created the scatter-gather list. 822 * In other words, if there are no bounce buffers 823 * (the normal case for most drivers), we don't need 824 * the logic to deal with cleaning up afterwards. 825 * 826 * We must do one of several things here: 827 * 828 * a) Call scsi_end_request. This will finish off the 829 * specified number of sectors. If we are done, the 830 * command block will be released, and the queue 831 * function will be goosed. If we are not done, then 832 * scsi_end_request will directly goose the queue. 833 * 834 * b) We can just use scsi_requeue_command() here. This would 835 * be used if we just wanted to retry, for example. 836 */ 837void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 838{ 839 int result = cmd->result; 840 int this_count = scsi_bufflen(cmd); 841 struct request_queue *q = cmd->device->request_queue; 842 struct request *req = cmd->request; 843 int clear_errors = 1; 844 struct scsi_sense_hdr sshdr; 845 int sense_valid = 0; 846 int sense_deferred = 0; 847 848 if (result) { 849 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 850 if (sense_valid) 851 sense_deferred = scsi_sense_is_deferred(&sshdr); 852 } 853 854 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 855 req->errors = result; 856 if (result) { 857 clear_errors = 0; 858 if (sense_valid && req->sense) { 859 /* 860 * SG_IO wants current and deferred errors 861 */ 862 int len = 8 + cmd->sense_buffer[7]; 863 864 if (len > SCSI_SENSE_BUFFERSIZE) 865 len = SCSI_SENSE_BUFFERSIZE; 866 memcpy(req->sense, cmd->sense_buffer, len); 867 req->sense_len = len; 868 } 869 } 870 if (scsi_bidi_cmnd(cmd)) { 871 /* will also release_buffers */ 872 scsi_end_bidi_request(cmd); 873 return; 874 } 875 req->data_len = scsi_get_resid(cmd); 876 } 877 878 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 879 scsi_release_buffers(cmd); 880 881 /* 882 * Next deal with any sectors which we were able to correctly 883 * handle. 884 */ 885 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 886 "%d bytes done.\n", 887 req->nr_sectors, good_bytes)); 888 889 if (clear_errors) 890 req->errors = 0; 891 892 /* A number of bytes were successfully read. If there 893 * are leftovers and there is some kind of error 894 * (result != 0), retry the rest. 895 */ 896 if (scsi_end_request(cmd, 0, good_bytes, result == 0) == NULL) 897 return; 898 899 /* good_bytes = 0, or (inclusive) there were leftovers and 900 * result = 0, so scsi_end_request couldn't retry. 901 */ 902 if (sense_valid && !sense_deferred) { 903 switch (sshdr.sense_key) { 904 case UNIT_ATTENTION: 905 if (cmd->device->removable) { 906 /* Detected disc change. Set a bit 907 * and quietly refuse further access. 908 */ 909 cmd->device->changed = 1; 910 scsi_end_request(cmd, -EIO, this_count, 1); 911 return; 912 } else { 913 /* Must have been a power glitch, or a 914 * bus reset. Could not have been a 915 * media change, so we just retry the 916 * request and see what happens. 917 */ 918 scsi_requeue_command(q, cmd); 919 return; 920 } 921 break; 922 case ILLEGAL_REQUEST: 923 /* If we had an ILLEGAL REQUEST returned, then 924 * we may have performed an unsupported 925 * command. The only thing this should be 926 * would be a ten byte read where only a six 927 * byte read was supported. Also, on a system 928 * where READ CAPACITY failed, we may have 929 * read past the end of the disk. 930 */ 931 if ((cmd->device->use_10_for_rw && 932 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 933 (cmd->cmnd[0] == READ_10 || 934 cmd->cmnd[0] == WRITE_10)) { 935 cmd->device->use_10_for_rw = 0; 936 /* This will cause a retry with a 937 * 6-byte command. 938 */ 939 scsi_requeue_command(q, cmd); 940 return; 941 } else { 942 scsi_end_request(cmd, -EIO, this_count, 1); 943 return; 944 } 945 break; 946 case NOT_READY: 947 /* If the device is in the process of becoming 948 * ready, or has a temporary blockage, retry. 949 */ 950 if (sshdr.asc == 0x04) { 951 switch (sshdr.ascq) { 952 case 0x01: /* becoming ready */ 953 case 0x04: /* format in progress */ 954 case 0x05: /* rebuild in progress */ 955 case 0x06: /* recalculation in progress */ 956 case 0x07: /* operation in progress */ 957 case 0x08: /* Long write in progress */ 958 case 0x09: /* self test in progress */ 959 scsi_requeue_command(q, cmd); 960 return; 961 default: 962 break; 963 } 964 } 965 if (!(req->cmd_flags & REQ_QUIET)) 966 scsi_cmd_print_sense_hdr(cmd, 967 "Device not ready", 968 &sshdr); 969 970 scsi_end_request(cmd, -EIO, this_count, 1); 971 return; 972 case VOLUME_OVERFLOW: 973 if (!(req->cmd_flags & REQ_QUIET)) { 974 scmd_printk(KERN_INFO, cmd, 975 "Volume overflow, CDB: "); 976 __scsi_print_command(cmd->cmnd); 977 scsi_print_sense("", cmd); 978 } 979 /* See SSC3rXX or current. */ 980 scsi_end_request(cmd, -EIO, this_count, 1); 981 return; 982 default: 983 break; 984 } 985 } 986 if (host_byte(result) == DID_RESET) { 987 /* Third party bus reset or reset for error recovery 988 * reasons. Just retry the request and see what 989 * happens. 990 */ 991 scsi_requeue_command(q, cmd); 992 return; 993 } 994 if (result) { 995 if (!(req->cmd_flags & REQ_QUIET)) { 996 scsi_print_result(cmd); 997 if (driver_byte(result) & DRIVER_SENSE) 998 scsi_print_sense("", cmd); 999 } 1000 } 1001 scsi_end_request(cmd, -EIO, this_count, !result); 1002} 1003 1004static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 1005 gfp_t gfp_mask) 1006{ 1007 int count; 1008 1009 /* 1010 * If sg table allocation fails, requeue request later. 1011 */ 1012 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 1013 gfp_mask))) { 1014 return BLKPREP_DEFER; 1015 } 1016 1017 req->buffer = NULL; 1018 if (blk_pc_request(req)) 1019 sdb->length = req->data_len; 1020 else 1021 sdb->length = req->nr_sectors << 9; 1022 1023 /* 1024 * Next, walk the list, and fill in the addresses and sizes of 1025 * each segment. 1026 */ 1027 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1028 BUG_ON(count > sdb->table.nents); 1029 sdb->table.nents = count; 1030 return BLKPREP_OK; 1031} 1032 1033/* 1034 * Function: scsi_init_io() 1035 * 1036 * Purpose: SCSI I/O initialize function. 1037 * 1038 * Arguments: cmd - Command descriptor we wish to initialize 1039 * 1040 * Returns: 0 on success 1041 * BLKPREP_DEFER if the failure is retryable 1042 * BLKPREP_KILL if the failure is fatal 1043 */ 1044int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 1045{ 1046 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); 1047 if (error) 1048 goto err_exit; 1049 1050 if (blk_bidi_rq(cmd->request)) { 1051 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 1052 scsi_bidi_sdb_cache, GFP_ATOMIC); 1053 if (!bidi_sdb) { 1054 error = BLKPREP_DEFER; 1055 goto err_exit; 1056 } 1057 1058 cmd->request->next_rq->special = bidi_sdb; 1059 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, 1060 GFP_ATOMIC); 1061 if (error) 1062 goto err_exit; 1063 } 1064 1065 return BLKPREP_OK ; 1066 1067err_exit: 1068 scsi_release_buffers(cmd); 1069 if (error == BLKPREP_KILL) 1070 scsi_put_command(cmd); 1071 else /* BLKPREP_DEFER */ 1072 scsi_unprep_request(cmd->request); 1073 1074 return error; 1075} 1076EXPORT_SYMBOL(scsi_init_io); 1077 1078static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1079 struct request *req) 1080{ 1081 struct scsi_cmnd *cmd; 1082 1083 if (!req->special) { 1084 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1085 if (unlikely(!cmd)) 1086 return NULL; 1087 req->special = cmd; 1088 } else { 1089 cmd = req->special; 1090 } 1091 1092 /* pull a tag out of the request if we have one */ 1093 cmd->tag = req->tag; 1094 cmd->request = req; 1095 1096 return cmd; 1097} 1098 1099int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1100{ 1101 struct scsi_cmnd *cmd; 1102 int ret = scsi_prep_state_check(sdev, req); 1103 1104 if (ret != BLKPREP_OK) 1105 return ret; 1106 1107 cmd = scsi_get_cmd_from_req(sdev, req); 1108 if (unlikely(!cmd)) 1109 return BLKPREP_DEFER; 1110 1111 /* 1112 * BLOCK_PC requests may transfer data, in which case they must 1113 * a bio attached to them. Or they might contain a SCSI command 1114 * that does not transfer data, in which case they may optionally 1115 * submit a request without an attached bio. 1116 */ 1117 if (req->bio) { 1118 int ret; 1119 1120 BUG_ON(!req->nr_phys_segments); 1121 1122 ret = scsi_init_io(cmd, GFP_ATOMIC); 1123 if (unlikely(ret)) 1124 return ret; 1125 } else { 1126 BUG_ON(req->data_len); 1127 BUG_ON(req->data); 1128 1129 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1130 req->buffer = NULL; 1131 } 1132 1133 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); 1134 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1135 cmd->cmd_len = req->cmd_len; 1136 if (!req->data_len) 1137 cmd->sc_data_direction = DMA_NONE; 1138 else if (rq_data_dir(req) == WRITE) 1139 cmd->sc_data_direction = DMA_TO_DEVICE; 1140 else 1141 cmd->sc_data_direction = DMA_FROM_DEVICE; 1142 1143 cmd->transfersize = req->data_len; 1144 cmd->allowed = req->retries; 1145 cmd->timeout_per_command = req->timeout; 1146 return BLKPREP_OK; 1147} 1148EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1149 1150/* 1151 * Setup a REQ_TYPE_FS command. These are simple read/write request 1152 * from filesystems that still need to be translated to SCSI CDBs from 1153 * the ULD. 1154 */ 1155int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1156{ 1157 struct scsi_cmnd *cmd; 1158 int ret = scsi_prep_state_check(sdev, req); 1159 1160 if (ret != BLKPREP_OK) 1161 return ret; 1162 /* 1163 * Filesystem requests must transfer data. 1164 */ 1165 BUG_ON(!req->nr_phys_segments); 1166 1167 cmd = scsi_get_cmd_from_req(sdev, req); 1168 if (unlikely(!cmd)) 1169 return BLKPREP_DEFER; 1170 1171 return scsi_init_io(cmd, GFP_ATOMIC); 1172} 1173EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1174 1175int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1176{ 1177 int ret = BLKPREP_OK; 1178 1179 /* 1180 * If the device is not in running state we will reject some 1181 * or all commands. 1182 */ 1183 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1184 switch (sdev->sdev_state) { 1185 case SDEV_OFFLINE: 1186 /* 1187 * If the device is offline we refuse to process any 1188 * commands. The device must be brought online 1189 * before trying any recovery commands. 1190 */ 1191 sdev_printk(KERN_ERR, sdev, 1192 "rejecting I/O to offline device\n"); 1193 ret = BLKPREP_KILL; 1194 break; 1195 case SDEV_DEL: 1196 /* 1197 * If the device is fully deleted, we refuse to 1198 * process any commands as well. 1199 */ 1200 sdev_printk(KERN_ERR, sdev, 1201 "rejecting I/O to dead device\n"); 1202 ret = BLKPREP_KILL; 1203 break; 1204 case SDEV_QUIESCE: 1205 case SDEV_BLOCK: 1206 /* 1207 * If the devices is blocked we defer normal commands. 1208 */ 1209 if (!(req->cmd_flags & REQ_PREEMPT)) 1210 ret = BLKPREP_DEFER; 1211 break; 1212 default: 1213 /* 1214 * For any other not fully online state we only allow 1215 * special commands. In particular any user initiated 1216 * command is not allowed. 1217 */ 1218 if (!(req->cmd_flags & REQ_PREEMPT)) 1219 ret = BLKPREP_KILL; 1220 break; 1221 } 1222 } 1223 return ret; 1224} 1225EXPORT_SYMBOL(scsi_prep_state_check); 1226 1227int scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1228{ 1229 struct scsi_device *sdev = q->queuedata; 1230 1231 switch (ret) { 1232 case BLKPREP_KILL: 1233 req->errors = DID_NO_CONNECT << 16; 1234 /* release the command and kill it */ 1235 if (req->special) { 1236 struct scsi_cmnd *cmd = req->special; 1237 scsi_release_buffers(cmd); 1238 scsi_put_command(cmd); 1239 req->special = NULL; 1240 } 1241 break; 1242 case BLKPREP_DEFER: 1243 /* 1244 * If we defer, the elv_next_request() returns NULL, but the 1245 * queue must be restarted, so we plug here if no returning 1246 * command will automatically do that. 1247 */ 1248 if (sdev->device_busy == 0) 1249 blk_plug_device(q); 1250 break; 1251 default: 1252 req->cmd_flags |= REQ_DONTPREP; 1253 } 1254 1255 return ret; 1256} 1257EXPORT_SYMBOL(scsi_prep_return); 1258 1259int scsi_prep_fn(struct request_queue *q, struct request *req) 1260{ 1261 struct scsi_device *sdev = q->queuedata; 1262 int ret = BLKPREP_KILL; 1263 1264 if (req->cmd_type == REQ_TYPE_BLOCK_PC) 1265 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1266 return scsi_prep_return(q, req, ret); 1267} 1268 1269/* 1270 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1271 * return 0. 1272 * 1273 * Called with the queue_lock held. 1274 */ 1275static inline int scsi_dev_queue_ready(struct request_queue *q, 1276 struct scsi_device *sdev) 1277{ 1278 if (sdev->device_busy >= sdev->queue_depth) 1279 return 0; 1280 if (sdev->device_busy == 0 && sdev->device_blocked) { 1281 /* 1282 * unblock after device_blocked iterates to zero 1283 */ 1284 if (--sdev->device_blocked == 0) { 1285 SCSI_LOG_MLQUEUE(3, 1286 sdev_printk(KERN_INFO, sdev, 1287 "unblocking device at zero depth\n")); 1288 } else { 1289 blk_plug_device(q); 1290 return 0; 1291 } 1292 } 1293 if (sdev->device_blocked) 1294 return 0; 1295 1296 return 1; 1297} 1298 1299/* 1300 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1301 * return 0. We must end up running the queue again whenever 0 is 1302 * returned, else IO can hang. 1303 * 1304 * Called with host_lock held. 1305 */ 1306static inline int scsi_host_queue_ready(struct request_queue *q, 1307 struct Scsi_Host *shost, 1308 struct scsi_device *sdev) 1309{ 1310 if (scsi_host_in_recovery(shost)) 1311 return 0; 1312 if (shost->host_busy == 0 && shost->host_blocked) { 1313 /* 1314 * unblock after host_blocked iterates to zero 1315 */ 1316 if (--shost->host_blocked == 0) { 1317 SCSI_LOG_MLQUEUE(3, 1318 printk("scsi%d unblocking host at zero depth\n", 1319 shost->host_no)); 1320 } else { 1321 blk_plug_device(q); 1322 return 0; 1323 } 1324 } 1325 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 1326 shost->host_blocked || shost->host_self_blocked) { 1327 if (list_empty(&sdev->starved_entry)) 1328 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1329 return 0; 1330 } 1331 1332 /* We're OK to process the command, so we can't be starved */ 1333 if (!list_empty(&sdev->starved_entry)) 1334 list_del_init(&sdev->starved_entry); 1335 1336 return 1; 1337} 1338 1339/* 1340 * Kill a request for a dead device 1341 */ 1342static void scsi_kill_request(struct request *req, struct request_queue *q) 1343{ 1344 struct scsi_cmnd *cmd = req->special; 1345 struct scsi_device *sdev = cmd->device; 1346 struct Scsi_Host *shost = sdev->host; 1347 1348 blkdev_dequeue_request(req); 1349 1350 if (unlikely(cmd == NULL)) { 1351 printk(KERN_CRIT "impossible request in %s.\n", 1352 __FUNCTION__); 1353 BUG(); 1354 } 1355 1356 scsi_init_cmd_errh(cmd); 1357 cmd->result = DID_NO_CONNECT << 16; 1358 atomic_inc(&cmd->device->iorequest_cnt); 1359 1360 /* 1361 * SCSI request completion path will do scsi_device_unbusy(), 1362 * bump busy counts. To bump the counters, we need to dance 1363 * with the locks as normal issue path does. 1364 */ 1365 sdev->device_busy++; 1366 spin_unlock(sdev->request_queue->queue_lock); 1367 spin_lock(shost->host_lock); 1368 shost->host_busy++; 1369 spin_unlock(shost->host_lock); 1370 spin_lock(sdev->request_queue->queue_lock); 1371 1372 __scsi_done(cmd); 1373} 1374 1375static void scsi_softirq_done(struct request *rq) 1376{ 1377 struct scsi_cmnd *cmd = rq->completion_data; 1378 unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; 1379 int disposition; 1380 1381 INIT_LIST_HEAD(&cmd->eh_entry); 1382 1383 disposition = scsi_decide_disposition(cmd); 1384 if (disposition != SUCCESS && 1385 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1386 sdev_printk(KERN_ERR, cmd->device, 1387 "timing out command, waited %lus\n", 1388 wait_for/HZ); 1389 disposition = SUCCESS; 1390 } 1391 1392 scsi_log_completion(cmd, disposition); 1393 1394 switch (disposition) { 1395 case SUCCESS: 1396 scsi_finish_command(cmd); 1397 break; 1398 case NEEDS_RETRY: 1399 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1400 break; 1401 case ADD_TO_MLQUEUE: 1402 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1403 break; 1404 default: 1405 if (!scsi_eh_scmd_add(cmd, 0)) 1406 scsi_finish_command(cmd); 1407 } 1408} 1409 1410/* 1411 * Function: scsi_request_fn() 1412 * 1413 * Purpose: Main strategy routine for SCSI. 1414 * 1415 * Arguments: q - Pointer to actual queue. 1416 * 1417 * Returns: Nothing 1418 * 1419 * Lock status: IO request lock assumed to be held when called. 1420 */ 1421static void scsi_request_fn(struct request_queue *q) 1422{ 1423 struct scsi_device *sdev = q->queuedata; 1424 struct Scsi_Host *shost; 1425 struct scsi_cmnd *cmd; 1426 struct request *req; 1427 1428 if (!sdev) { 1429 printk("scsi: killing requests for dead queue\n"); 1430 while ((req = elv_next_request(q)) != NULL) 1431 scsi_kill_request(req, q); 1432 return; 1433 } 1434 1435 if(!get_device(&sdev->sdev_gendev)) 1436 /* We must be tearing the block queue down already */ 1437 return; 1438 1439 /* 1440 * To start with, we keep looping until the queue is empty, or until 1441 * the host is no longer able to accept any more requests. 1442 */ 1443 shost = sdev->host; 1444 while (!blk_queue_plugged(q)) { 1445 int rtn; 1446 /* 1447 * get next queueable request. We do this early to make sure 1448 * that the request is fully prepared even if we cannot 1449 * accept it. 1450 */ 1451 req = elv_next_request(q); 1452 if (!req || !scsi_dev_queue_ready(q, sdev)) 1453 break; 1454 1455 if (unlikely(!scsi_device_online(sdev))) { 1456 sdev_printk(KERN_ERR, sdev, 1457 "rejecting I/O to offline device\n"); 1458 scsi_kill_request(req, q); 1459 continue; 1460 } 1461 1462 1463 /* 1464 * Remove the request from the request list. 1465 */ 1466 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1467 blkdev_dequeue_request(req); 1468 sdev->device_busy++; 1469 1470 spin_unlock(q->queue_lock); 1471 cmd = req->special; 1472 if (unlikely(cmd == NULL)) { 1473 printk(KERN_CRIT "impossible request in %s.\n" 1474 "please mail a stack trace to " 1475 "linux-scsi@vger.kernel.org\n", 1476 __FUNCTION__); 1477 blk_dump_rq_flags(req, "foo"); 1478 BUG(); 1479 } 1480 spin_lock(shost->host_lock); 1481 1482 if (!scsi_host_queue_ready(q, shost, sdev)) 1483 goto not_ready; 1484 if (scsi_target(sdev)->single_lun) { 1485 if (scsi_target(sdev)->starget_sdev_user && 1486 scsi_target(sdev)->starget_sdev_user != sdev) 1487 goto not_ready; 1488 scsi_target(sdev)->starget_sdev_user = sdev; 1489 } 1490 shost->host_busy++; 1491 1492 /* 1493 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will 1494 * take the lock again. 1495 */ 1496 spin_unlock_irq(shost->host_lock); 1497 1498 /* 1499 * Finally, initialize any error handling parameters, and set up 1500 * the timers for timeouts. 1501 */ 1502 scsi_init_cmd_errh(cmd); 1503 1504 /* 1505 * Dispatch the command to the low-level driver. 1506 */ 1507 rtn = scsi_dispatch_cmd(cmd); 1508 spin_lock_irq(q->queue_lock); 1509 if(rtn) { 1510 /* we're refusing the command; because of 1511 * the way locks get dropped, we need to 1512 * check here if plugging is required */ 1513 if(sdev->device_busy == 0) 1514 blk_plug_device(q); 1515 1516 break; 1517 } 1518 } 1519 1520 goto out; 1521 1522 not_ready: 1523 spin_unlock_irq(shost->host_lock); 1524 1525 /* 1526 * lock q, handle tag, requeue req, and decrement device_busy. We 1527 * must return with queue_lock held. 1528 * 1529 * Decrementing device_busy without checking it is OK, as all such 1530 * cases (host limits or settings) should run the queue at some 1531 * later time. 1532 */ 1533 spin_lock_irq(q->queue_lock); 1534 blk_requeue_request(q, req); 1535 sdev->device_busy--; 1536 if(sdev->device_busy == 0) 1537 blk_plug_device(q); 1538 out: 1539 /* must be careful here...if we trigger the ->remove() function 1540 * we cannot be holding the q lock */ 1541 spin_unlock_irq(q->queue_lock); 1542 put_device(&sdev->sdev_gendev); 1543 spin_lock_irq(q->queue_lock); 1544} 1545 1546u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1547{ 1548 struct device *host_dev; 1549 u64 bounce_limit = 0xffffffff; 1550 1551 if (shost->unchecked_isa_dma) 1552 return BLK_BOUNCE_ISA; 1553 /* 1554 * Platforms with virtual-DMA translation 1555 * hardware have no practical limit. 1556 */ 1557 if (!PCI_DMA_BUS_IS_PHYS) 1558 return BLK_BOUNCE_ANY; 1559 1560 host_dev = scsi_get_device(shost); 1561 if (host_dev && host_dev->dma_mask) 1562 bounce_limit = *host_dev->dma_mask; 1563 1564 return bounce_limit; 1565} 1566EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1567 1568struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 1569 request_fn_proc *request_fn) 1570{ 1571 struct request_queue *q; 1572 struct device *dev = shost->shost_gendev.parent; 1573 1574 q = blk_init_queue(request_fn, NULL); 1575 if (!q) 1576 return NULL; 1577 1578 /* 1579 * this limit is imposed by hardware restrictions 1580 */ 1581 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1582 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); 1583 1584 blk_queue_max_sectors(q, shost->max_sectors); 1585 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1586 blk_queue_segment_boundary(q, shost->dma_boundary); 1587 dma_set_seg_boundary(dev, shost->dma_boundary); 1588 1589 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1590 1591 if (!shost->use_clustering) 1592 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1593 1594 /* 1595 * set a reasonable default alignment on word boundaries: the 1596 * host and device may alter it using 1597 * blk_queue_update_dma_alignment() later. 1598 */ 1599 blk_queue_dma_alignment(q, 0x03); 1600 1601 return q; 1602} 1603EXPORT_SYMBOL(__scsi_alloc_queue); 1604 1605struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1606{ 1607 struct request_queue *q; 1608 1609 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 1610 if (!q) 1611 return NULL; 1612 1613 blk_queue_prep_rq(q, scsi_prep_fn); 1614 blk_queue_softirq_done(q, scsi_softirq_done); 1615 return q; 1616} 1617 1618void scsi_free_queue(struct request_queue *q) 1619{ 1620 blk_cleanup_queue(q); 1621} 1622 1623/* 1624 * Function: scsi_block_requests() 1625 * 1626 * Purpose: Utility function used by low-level drivers to prevent further 1627 * commands from being queued to the device. 1628 * 1629 * Arguments: shost - Host in question 1630 * 1631 * Returns: Nothing 1632 * 1633 * Lock status: No locks are assumed held. 1634 * 1635 * Notes: There is no timer nor any other means by which the requests 1636 * get unblocked other than the low-level driver calling 1637 * scsi_unblock_requests(). 1638 */ 1639void scsi_block_requests(struct Scsi_Host *shost) 1640{ 1641 shost->host_self_blocked = 1; 1642} 1643EXPORT_SYMBOL(scsi_block_requests); 1644 1645/* 1646 * Function: scsi_unblock_requests() 1647 * 1648 * Purpose: Utility function used by low-level drivers to allow further 1649 * commands from being queued to the device. 1650 * 1651 * Arguments: shost - Host in question 1652 * 1653 * Returns: Nothing 1654 * 1655 * Lock status: No locks are assumed held. 1656 * 1657 * Notes: There is no timer nor any other means by which the requests 1658 * get unblocked other than the low-level driver calling 1659 * scsi_unblock_requests(). 1660 * 1661 * This is done as an API function so that changes to the 1662 * internals of the scsi mid-layer won't require wholesale 1663 * changes to drivers that use this feature. 1664 */ 1665void scsi_unblock_requests(struct Scsi_Host *shost) 1666{ 1667 shost->host_self_blocked = 0; 1668 scsi_run_host_queues(shost); 1669} 1670EXPORT_SYMBOL(scsi_unblock_requests); 1671 1672int __init scsi_init_queue(void) 1673{ 1674 int i; 1675 1676 scsi_io_context_cache = kmem_cache_create("scsi_io_context", 1677 sizeof(struct scsi_io_context), 1678 0, 0, NULL); 1679 if (!scsi_io_context_cache) { 1680 printk(KERN_ERR "SCSI: can't init scsi io context cache\n"); 1681 return -ENOMEM; 1682 } 1683 1684 scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb", 1685 sizeof(struct scsi_data_buffer), 1686 0, 0, NULL); 1687 if (!scsi_bidi_sdb_cache) { 1688 printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n"); 1689 goto cleanup_io_context; 1690 } 1691 1692 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1693 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1694 int size = sgp->size * sizeof(struct scatterlist); 1695 1696 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1697 SLAB_HWCACHE_ALIGN, NULL); 1698 if (!sgp->slab) { 1699 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1700 sgp->name); 1701 goto cleanup_bidi_sdb; 1702 } 1703 1704 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1705 sgp->slab); 1706 if (!sgp->pool) { 1707 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1708 sgp->name); 1709 goto cleanup_bidi_sdb; 1710 } 1711 } 1712 1713 return 0; 1714 1715cleanup_bidi_sdb: 1716 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1717 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1718 if (sgp->pool) 1719 mempool_destroy(sgp->pool); 1720 if (sgp->slab) 1721 kmem_cache_destroy(sgp->slab); 1722 } 1723 kmem_cache_destroy(scsi_bidi_sdb_cache); 1724cleanup_io_context: 1725 kmem_cache_destroy(scsi_io_context_cache); 1726 1727 return -ENOMEM; 1728} 1729 1730void scsi_exit_queue(void) 1731{ 1732 int i; 1733 1734 kmem_cache_destroy(scsi_io_context_cache); 1735 kmem_cache_destroy(scsi_bidi_sdb_cache); 1736 1737 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1738 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1739 mempool_destroy(sgp->pool); 1740 kmem_cache_destroy(sgp->slab); 1741 } 1742} 1743 1744/** 1745 * scsi_mode_select - issue a mode select 1746 * @sdev: SCSI device to be queried 1747 * @pf: Page format bit (1 == standard, 0 == vendor specific) 1748 * @sp: Save page bit (0 == don't save, 1 == save) 1749 * @modepage: mode page being requested 1750 * @buffer: request buffer (may not be smaller than eight bytes) 1751 * @len: length of request buffer. 1752 * @timeout: command timeout 1753 * @retries: number of retries before failing 1754 * @data: returns a structure abstracting the mode header data 1755 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1756 * must be SCSI_SENSE_BUFFERSIZE big. 1757 * 1758 * Returns zero if successful; negative error number or scsi 1759 * status on error 1760 * 1761 */ 1762int 1763scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 1764 unsigned char *buffer, int len, int timeout, int retries, 1765 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1766{ 1767 unsigned char cmd[10]; 1768 unsigned char *real_buffer; 1769 int ret; 1770 1771 memset(cmd, 0, sizeof(cmd)); 1772 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 1773 1774 if (sdev->use_10_for_ms) { 1775 if (len > 65535) 1776 return -EINVAL; 1777 real_buffer = kmalloc(8 + len, GFP_KERNEL); 1778 if (!real_buffer) 1779 return -ENOMEM; 1780 memcpy(real_buffer + 8, buffer, len); 1781 len += 8; 1782 real_buffer[0] = 0; 1783 real_buffer[1] = 0; 1784 real_buffer[2] = data->medium_type; 1785 real_buffer[3] = data->device_specific; 1786 real_buffer[4] = data->longlba ? 0x01 : 0; 1787 real_buffer[5] = 0; 1788 real_buffer[6] = data->block_descriptor_length >> 8; 1789 real_buffer[7] = data->block_descriptor_length; 1790 1791 cmd[0] = MODE_SELECT_10; 1792 cmd[7] = len >> 8; 1793 cmd[8] = len; 1794 } else { 1795 if (len > 255 || data->block_descriptor_length > 255 || 1796 data->longlba) 1797 return -EINVAL; 1798 1799 real_buffer = kmalloc(4 + len, GFP_KERNEL); 1800 if (!real_buffer) 1801 return -ENOMEM; 1802 memcpy(real_buffer + 4, buffer, len); 1803 len += 4; 1804 real_buffer[0] = 0; 1805 real_buffer[1] = data->medium_type; 1806 real_buffer[2] = data->device_specific; 1807 real_buffer[3] = data->block_descriptor_length; 1808 1809 1810 cmd[0] = MODE_SELECT; 1811 cmd[4] = len; 1812 } 1813 1814 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 1815 sshdr, timeout, retries); 1816 kfree(real_buffer); 1817 return ret; 1818} 1819EXPORT_SYMBOL_GPL(scsi_mode_select); 1820 1821/** 1822 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 1823 * @sdev: SCSI device to be queried 1824 * @dbd: set if mode sense will allow block descriptors to be returned 1825 * @modepage: mode page being requested 1826 * @buffer: request buffer (may not be smaller than eight bytes) 1827 * @len: length of request buffer. 1828 * @timeout: command timeout 1829 * @retries: number of retries before failing 1830 * @data: returns a structure abstracting the mode header data 1831 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1832 * must be SCSI_SENSE_BUFFERSIZE big. 1833 * 1834 * Returns zero if unsuccessful, or the header offset (either 4 1835 * or 8 depending on whether a six or ten byte command was 1836 * issued) if successful. 1837 */ 1838int 1839scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1840 unsigned char *buffer, int len, int timeout, int retries, 1841 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1842{ 1843 unsigned char cmd[12]; 1844 int use_10_for_ms; 1845 int header_length; 1846 int result; 1847 struct scsi_sense_hdr my_sshdr; 1848 1849 memset(data, 0, sizeof(*data)); 1850 memset(&cmd[0], 0, 12); 1851 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1852 cmd[2] = modepage; 1853 1854 /* caller might not be interested in sense, but we need it */ 1855 if (!sshdr) 1856 sshdr = &my_sshdr; 1857 1858 retry: 1859 use_10_for_ms = sdev->use_10_for_ms; 1860 1861 if (use_10_for_ms) { 1862 if (len < 8) 1863 len = 8; 1864 1865 cmd[0] = MODE_SENSE_10; 1866 cmd[8] = len; 1867 header_length = 8; 1868 } else { 1869 if (len < 4) 1870 len = 4; 1871 1872 cmd[0] = MODE_SENSE; 1873 cmd[4] = len; 1874 header_length = 4; 1875 } 1876 1877 memset(buffer, 0, len); 1878 1879 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1880 sshdr, timeout, retries); 1881 1882 /* This code looks awful: what it's doing is making sure an 1883 * ILLEGAL REQUEST sense return identifies the actual command 1884 * byte as the problem. MODE_SENSE commands can return 1885 * ILLEGAL REQUEST if the code page isn't supported */ 1886 1887 if (use_10_for_ms && !scsi_status_is_good(result) && 1888 (driver_byte(result) & DRIVER_SENSE)) { 1889 if (scsi_sense_valid(sshdr)) { 1890 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 1891 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 1892 /* 1893 * Invalid command operation code 1894 */ 1895 sdev->use_10_for_ms = 0; 1896 goto retry; 1897 } 1898 } 1899 } 1900 1901 if(scsi_status_is_good(result)) { 1902 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 1903 (modepage == 6 || modepage == 8))) { 1904 /* Initio breakage? */ 1905 header_length = 0; 1906 data->length = 13; 1907 data->medium_type = 0; 1908 data->device_specific = 0; 1909 data->longlba = 0; 1910 data->block_descriptor_length = 0; 1911 } else if(use_10_for_ms) { 1912 data->length = buffer[0]*256 + buffer[1] + 2; 1913 data->medium_type = buffer[2]; 1914 data->device_specific = buffer[3]; 1915 data->longlba = buffer[4] & 0x01; 1916 data->block_descriptor_length = buffer[6]*256 1917 + buffer[7]; 1918 } else { 1919 data->length = buffer[0] + 1; 1920 data->medium_type = buffer[1]; 1921 data->device_specific = buffer[2]; 1922 data->block_descriptor_length = buffer[3]; 1923 } 1924 data->header_length = header_length; 1925 } 1926 1927 return result; 1928} 1929EXPORT_SYMBOL(scsi_mode_sense); 1930 1931/** 1932 * scsi_test_unit_ready - test if unit is ready 1933 * @sdev: scsi device to change the state of. 1934 * @timeout: command timeout 1935 * @retries: number of retries before failing 1936 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 1937 * returning sense. Make sure that this is cleared before passing 1938 * in. 1939 * 1940 * Returns zero if unsuccessful or an error if TUR failed. For 1941 * removable media, a return of NOT_READY or UNIT_ATTENTION is 1942 * translated to success, with the ->changed flag updated. 1943 **/ 1944int 1945scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 1946 struct scsi_sense_hdr *sshdr_external) 1947{ 1948 char cmd[] = { 1949 TEST_UNIT_READY, 0, 0, 0, 0, 0, 1950 }; 1951 struct scsi_sense_hdr *sshdr; 1952 int result; 1953 1954 if (!sshdr_external) 1955 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 1956 else 1957 sshdr = sshdr_external; 1958 1959 /* try to eat the UNIT_ATTENTION if there are enough retries */ 1960 do { 1961 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 1962 timeout, retries); 1963 } while ((driver_byte(result) & DRIVER_SENSE) && 1964 sshdr && sshdr->sense_key == UNIT_ATTENTION && 1965 --retries); 1966 1967 if (!sshdr) 1968 /* could not allocate sense buffer, so can't process it */ 1969 return result; 1970 1971 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { 1972 1973 if ((scsi_sense_valid(sshdr)) && 1974 ((sshdr->sense_key == UNIT_ATTENTION) || 1975 (sshdr->sense_key == NOT_READY))) { 1976 sdev->changed = 1; 1977 result = 0; 1978 } 1979 } 1980 if (!sshdr_external) 1981 kfree(sshdr); 1982 return result; 1983} 1984EXPORT_SYMBOL(scsi_test_unit_ready); 1985 1986/** 1987 * scsi_device_set_state - Take the given device through the device state model. 1988 * @sdev: scsi device to change the state of. 1989 * @state: state to change to. 1990 * 1991 * Returns zero if unsuccessful or an error if the requested 1992 * transition is illegal. 1993 */ 1994int 1995scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 1996{ 1997 enum scsi_device_state oldstate = sdev->sdev_state; 1998 1999 if (state == oldstate) 2000 return 0; 2001 2002 switch (state) { 2003 case SDEV_CREATED: 2004 /* There are no legal states that come back to 2005 * created. This is the manually initialised start 2006 * state */ 2007 goto illegal; 2008 2009 case SDEV_RUNNING: 2010 switch (oldstate) { 2011 case SDEV_CREATED: 2012 case SDEV_OFFLINE: 2013 case SDEV_QUIESCE: 2014 case SDEV_BLOCK: 2015 break; 2016 default: 2017 goto illegal; 2018 } 2019 break; 2020 2021 case SDEV_QUIESCE: 2022 switch (oldstate) { 2023 case SDEV_RUNNING: 2024 case SDEV_OFFLINE: 2025 break; 2026 default: 2027 goto illegal; 2028 } 2029 break; 2030 2031 case SDEV_OFFLINE: 2032 switch (oldstate) { 2033 case SDEV_CREATED: 2034 case SDEV_RUNNING: 2035 case SDEV_QUIESCE: 2036 case SDEV_BLOCK: 2037 break; 2038 default: 2039 goto illegal; 2040 } 2041 break; 2042 2043 case SDEV_BLOCK: 2044 switch (oldstate) { 2045 case SDEV_CREATED: 2046 case SDEV_RUNNING: 2047 break; 2048 default: 2049 goto illegal; 2050 } 2051 break; 2052 2053 case SDEV_CANCEL: 2054 switch (oldstate) { 2055 case SDEV_CREATED: 2056 case SDEV_RUNNING: 2057 case SDEV_QUIESCE: 2058 case SDEV_OFFLINE: 2059 case SDEV_BLOCK: 2060 break; 2061 default: 2062 goto illegal; 2063 } 2064 break; 2065 2066 case SDEV_DEL: 2067 switch (oldstate) { 2068 case SDEV_CREATED: 2069 case SDEV_RUNNING: 2070 case SDEV_OFFLINE: 2071 case SDEV_CANCEL: 2072 break; 2073 default: 2074 goto illegal; 2075 } 2076 break; 2077 2078 } 2079 sdev->sdev_state = state; 2080 return 0; 2081 2082 illegal: 2083 SCSI_LOG_ERROR_RECOVERY(1, 2084 sdev_printk(KERN_ERR, sdev, 2085 "Illegal state transition %s->%s\n", 2086 scsi_device_state_name(oldstate), 2087 scsi_device_state_name(state)) 2088 ); 2089 return -EINVAL; 2090} 2091EXPORT_SYMBOL(scsi_device_set_state); 2092 2093/** 2094 * sdev_evt_emit - emit a single SCSI device uevent 2095 * @sdev: associated SCSI device 2096 * @evt: event to emit 2097 * 2098 * Send a single uevent (scsi_event) to the associated scsi_device. 2099 */ 2100static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2101{ 2102 int idx = 0; 2103 char *envp[3]; 2104 2105 switch (evt->evt_type) { 2106 case SDEV_EVT_MEDIA_CHANGE: 2107 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2108 break; 2109 2110 default: 2111 /* do nothing */ 2112 break; 2113 } 2114 2115 envp[idx++] = NULL; 2116 2117 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2118} 2119 2120/** 2121 * sdev_evt_thread - send a uevent for each scsi event 2122 * @work: work struct for scsi_device 2123 * 2124 * Dispatch queued events to their associated scsi_device kobjects 2125 * as uevents. 2126 */ 2127void scsi_evt_thread(struct work_struct *work) 2128{ 2129 struct scsi_device *sdev; 2130 LIST_HEAD(event_list); 2131 2132 sdev = container_of(work, struct scsi_device, event_work); 2133 2134 while (1) { 2135 struct scsi_event *evt; 2136 struct list_head *this, *tmp; 2137 unsigned long flags; 2138 2139 spin_lock_irqsave(&sdev->list_lock, flags); 2140 list_splice_init(&sdev->event_list, &event_list); 2141 spin_unlock_irqrestore(&sdev->list_lock, flags); 2142 2143 if (list_empty(&event_list)) 2144 break; 2145 2146 list_for_each_safe(this, tmp, &event_list) { 2147 evt = list_entry(this, struct scsi_event, node); 2148 list_del(&evt->node); 2149 scsi_evt_emit(sdev, evt); 2150 kfree(evt); 2151 } 2152 } 2153} 2154 2155/** 2156 * sdev_evt_send - send asserted event to uevent thread 2157 * @sdev: scsi_device event occurred on 2158 * @evt: event to send 2159 * 2160 * Assert scsi device event asynchronously. 2161 */ 2162void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2163{ 2164 unsigned long flags; 2165 2166 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2167 kfree(evt); 2168 return; 2169 } 2170 2171 spin_lock_irqsave(&sdev->list_lock, flags); 2172 list_add_tail(&evt->node, &sdev->event_list); 2173 schedule_work(&sdev->event_work); 2174 spin_unlock_irqrestore(&sdev->list_lock, flags); 2175} 2176EXPORT_SYMBOL_GPL(sdev_evt_send); 2177 2178/** 2179 * sdev_evt_alloc - allocate a new scsi event 2180 * @evt_type: type of event to allocate 2181 * @gfpflags: GFP flags for allocation 2182 * 2183 * Allocates and returns a new scsi_event. 2184 */ 2185struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2186 gfp_t gfpflags) 2187{ 2188 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2189 if (!evt) 2190 return NULL; 2191 2192 evt->evt_type = evt_type; 2193 INIT_LIST_HEAD(&evt->node); 2194 2195 /* evt_type-specific initialization, if any */ 2196 switch (evt_type) { 2197 case SDEV_EVT_MEDIA_CHANGE: 2198 default: 2199 /* do nothing */ 2200 break; 2201 } 2202 2203 return evt; 2204} 2205EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2206 2207/** 2208 * sdev_evt_send_simple - send asserted event to uevent thread 2209 * @sdev: scsi_device event occurred on 2210 * @evt_type: type of event to send 2211 * @gfpflags: GFP flags for allocation 2212 * 2213 * Assert scsi device event asynchronously, given an event type. 2214 */ 2215void sdev_evt_send_simple(struct scsi_device *sdev, 2216 enum scsi_device_event evt_type, gfp_t gfpflags) 2217{ 2218 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2219 if (!evt) { 2220 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2221 evt_type); 2222 return; 2223 } 2224 2225 sdev_evt_send(sdev, evt); 2226} 2227EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2228 2229/** 2230 * scsi_device_quiesce - Block user issued commands. 2231 * @sdev: scsi device to quiesce. 2232 * 2233 * This works by trying to transition to the SDEV_QUIESCE state 2234 * (which must be a legal transition). When the device is in this 2235 * state, only special requests will be accepted, all others will 2236 * be deferred. Since special requests may also be requeued requests, 2237 * a successful return doesn't guarantee the device will be 2238 * totally quiescent. 2239 * 2240 * Must be called with user context, may sleep. 2241 * 2242 * Returns zero if unsuccessful or an error if not. 2243 */ 2244int 2245scsi_device_quiesce(struct scsi_device *sdev) 2246{ 2247 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2248 if (err) 2249 return err; 2250 2251 scsi_run_queue(sdev->request_queue); 2252 while (sdev->device_busy) { 2253 msleep_interruptible(200); 2254 scsi_run_queue(sdev->request_queue); 2255 } 2256 return 0; 2257} 2258EXPORT_SYMBOL(scsi_device_quiesce); 2259 2260/** 2261 * scsi_device_resume - Restart user issued commands to a quiesced device. 2262 * @sdev: scsi device to resume. 2263 * 2264 * Moves the device from quiesced back to running and restarts the 2265 * queues. 2266 * 2267 * Must be called with user context, may sleep. 2268 */ 2269void 2270scsi_device_resume(struct scsi_device *sdev) 2271{ 2272 if(scsi_device_set_state(sdev, SDEV_RUNNING)) 2273 return; 2274 scsi_run_queue(sdev->request_queue); 2275} 2276EXPORT_SYMBOL(scsi_device_resume); 2277 2278static void 2279device_quiesce_fn(struct scsi_device *sdev, void *data) 2280{ 2281 scsi_device_quiesce(sdev); 2282} 2283 2284void 2285scsi_target_quiesce(struct scsi_target *starget) 2286{ 2287 starget_for_each_device(starget, NULL, device_quiesce_fn); 2288} 2289EXPORT_SYMBOL(scsi_target_quiesce); 2290 2291static void 2292device_resume_fn(struct scsi_device *sdev, void *data) 2293{ 2294 scsi_device_resume(sdev); 2295} 2296 2297void 2298scsi_target_resume(struct scsi_target *starget) 2299{ 2300 starget_for_each_device(starget, NULL, device_resume_fn); 2301} 2302EXPORT_SYMBOL(scsi_target_resume); 2303 2304/** 2305 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2306 * @sdev: device to block 2307 * 2308 * Block request made by scsi lld's to temporarily stop all 2309 * scsi commands on the specified device. Called from interrupt 2310 * or normal process context. 2311 * 2312 * Returns zero if successful or error if not 2313 * 2314 * Notes: 2315 * This routine transitions the device to the SDEV_BLOCK state 2316 * (which must be a legal transition). When the device is in this 2317 * state, all commands are deferred until the scsi lld reenables 2318 * the device with scsi_device_unblock or device_block_tmo fires. 2319 * This routine assumes the host_lock is held on entry. 2320 */ 2321int 2322scsi_internal_device_block(struct scsi_device *sdev) 2323{ 2324 struct request_queue *q = sdev->request_queue; 2325 unsigned long flags; 2326 int err = 0; 2327 2328 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2329 if (err) 2330 return err; 2331 2332 /* 2333 * The device has transitioned to SDEV_BLOCK. Stop the 2334 * block layer from calling the midlayer with this device's 2335 * request queue. 2336 */ 2337 spin_lock_irqsave(q->queue_lock, flags); 2338 blk_stop_queue(q); 2339 spin_unlock_irqrestore(q->queue_lock, flags); 2340 2341 return 0; 2342} 2343EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2344 2345/** 2346 * scsi_internal_device_unblock - resume a device after a block request 2347 * @sdev: device to resume 2348 * 2349 * Called by scsi lld's or the midlayer to restart the device queue 2350 * for the previously suspended scsi device. Called from interrupt or 2351 * normal process context. 2352 * 2353 * Returns zero if successful or error if not. 2354 * 2355 * Notes: 2356 * This routine transitions the device to the SDEV_RUNNING state 2357 * (which must be a legal transition) allowing the midlayer to 2358 * goose the queue for this device. This routine assumes the 2359 * host_lock is held upon entry. 2360 */ 2361int 2362scsi_internal_device_unblock(struct scsi_device *sdev) 2363{ 2364 struct request_queue *q = sdev->request_queue; 2365 int err; 2366 unsigned long flags; 2367 2368 /* 2369 * Try to transition the scsi device to SDEV_RUNNING 2370 * and goose the device queue if successful. 2371 */ 2372 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2373 if (err) 2374 return err; 2375 2376 spin_lock_irqsave(q->queue_lock, flags); 2377 blk_start_queue(q); 2378 spin_unlock_irqrestore(q->queue_lock, flags); 2379 2380 return 0; 2381} 2382EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2383 2384static void 2385device_block(struct scsi_device *sdev, void *data) 2386{ 2387 scsi_internal_device_block(sdev); 2388} 2389 2390static int 2391target_block(struct device *dev, void *data) 2392{ 2393 if (scsi_is_target_device(dev)) 2394 starget_for_each_device(to_scsi_target(dev), NULL, 2395 device_block); 2396 return 0; 2397} 2398 2399void 2400scsi_target_block(struct device *dev) 2401{ 2402 if (scsi_is_target_device(dev)) 2403 starget_for_each_device(to_scsi_target(dev), NULL, 2404 device_block); 2405 else 2406 device_for_each_child(dev, NULL, target_block); 2407} 2408EXPORT_SYMBOL_GPL(scsi_target_block); 2409 2410static void 2411device_unblock(struct scsi_device *sdev, void *data) 2412{ 2413 scsi_internal_device_unblock(sdev); 2414} 2415 2416static int 2417target_unblock(struct device *dev, void *data) 2418{ 2419 if (scsi_is_target_device(dev)) 2420 starget_for_each_device(to_scsi_target(dev), NULL, 2421 device_unblock); 2422 return 0; 2423} 2424 2425void 2426scsi_target_unblock(struct device *dev) 2427{ 2428 if (scsi_is_target_device(dev)) 2429 starget_for_each_device(to_scsi_target(dev), NULL, 2430 device_unblock); 2431 else 2432 device_for_each_child(dev, NULL, target_unblock); 2433} 2434EXPORT_SYMBOL_GPL(scsi_target_unblock); 2435 2436/** 2437 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2438 * @sgl: scatter-gather list 2439 * @sg_count: number of segments in sg 2440 * @offset: offset in bytes into sg, on return offset into the mapped area 2441 * @len: bytes to map, on return number of bytes mapped 2442 * 2443 * Returns virtual address of the start of the mapped page 2444 */ 2445void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 2446 size_t *offset, size_t *len) 2447{ 2448 int i; 2449 size_t sg_len = 0, len_complete = 0; 2450 struct scatterlist *sg; 2451 struct page *page; 2452 2453 WARN_ON(!irqs_disabled()); 2454 2455 for_each_sg(sgl, sg, sg_count, i) { 2456 len_complete = sg_len; /* Complete sg-entries */ 2457 sg_len += sg->length; 2458 if (sg_len > *offset) 2459 break; 2460 } 2461 2462 if (unlikely(i == sg_count)) { 2463 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2464 "elements %d\n", 2465 __FUNCTION__, sg_len, *offset, sg_count); 2466 WARN_ON(1); 2467 return NULL; 2468 } 2469 2470 /* Offset starting from the beginning of first page in this sg-entry */ 2471 *offset = *offset - len_complete + sg->offset; 2472 2473 /* Assumption: contiguous pages can be accessed as "page + i" */ 2474 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 2475 *offset &= ~PAGE_MASK; 2476 2477 /* Bytes in this sg-entry from *offset to the end of the page */ 2478 sg_len = PAGE_SIZE - *offset; 2479 if (*len > sg_len) 2480 *len = sg_len; 2481 2482 return kmap_atomic(page, KM_BIO_SRC_IRQ); 2483} 2484EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2485 2486/** 2487 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 2488 * @virt: virtual address to be unmapped 2489 */ 2490void scsi_kunmap_atomic_sg(void *virt) 2491{ 2492 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 2493} 2494EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2495