scsi_lib.c revision f0c0a376d0fcd4c5579ecf5e95f88387cba85211
1/* 2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale 3 * 4 * SCSI queueing library. 5 * Initial versions: Eric Youngdale (eric@andante.org). 6 * Based upon conversations with large numbers 7 * of people at Linux Expo. 8 */ 9 10#include <linux/bio.h> 11#include <linux/bitops.h> 12#include <linux/blkdev.h> 13#include <linux/completion.h> 14#include <linux/kernel.h> 15#include <linux/mempool.h> 16#include <linux/slab.h> 17#include <linux/init.h> 18#include <linux/pci.h> 19#include <linux/delay.h> 20#include <linux/hardirq.h> 21#include <linux/scatterlist.h> 22 23#include <scsi/scsi.h> 24#include <scsi/scsi_cmnd.h> 25#include <scsi/scsi_dbg.h> 26#include <scsi/scsi_device.h> 27#include <scsi/scsi_driver.h> 28#include <scsi/scsi_eh.h> 29#include <scsi/scsi_host.h> 30 31#include "scsi_priv.h" 32#include "scsi_logging.h" 33 34 35#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 36#define SG_MEMPOOL_SIZE 2 37 38struct scsi_host_sg_pool { 39 size_t size; 40 char *name; 41 struct kmem_cache *slab; 42 mempool_t *pool; 43}; 44 45#define SP(x) { x, "sgpool-" __stringify(x) } 46#if (SCSI_MAX_SG_SEGMENTS < 32) 47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 48#endif 49static struct scsi_host_sg_pool scsi_sg_pools[] = { 50 SP(8), 51 SP(16), 52#if (SCSI_MAX_SG_SEGMENTS > 32) 53 SP(32), 54#if (SCSI_MAX_SG_SEGMENTS > 64) 55 SP(64), 56#if (SCSI_MAX_SG_SEGMENTS > 128) 57 SP(128), 58#if (SCSI_MAX_SG_SEGMENTS > 256) 59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 60#endif 61#endif 62#endif 63#endif 64 SP(SCSI_MAX_SG_SEGMENTS) 65}; 66#undef SP 67 68struct kmem_cache *scsi_sdb_cache; 69 70static void scsi_run_queue(struct request_queue *q); 71 72/* 73 * Function: scsi_unprep_request() 74 * 75 * Purpose: Remove all preparation done for a request, including its 76 * associated scsi_cmnd, so that it can be requeued. 77 * 78 * Arguments: req - request to unprepare 79 * 80 * Lock status: Assumed that no locks are held upon entry. 81 * 82 * Returns: Nothing. 83 */ 84static void scsi_unprep_request(struct request *req) 85{ 86 struct scsi_cmnd *cmd = req->special; 87 88 req->cmd_flags &= ~REQ_DONTPREP; 89 req->special = NULL; 90 91 scsi_put_command(cmd); 92} 93 94/* 95 * Function: scsi_queue_insert() 96 * 97 * Purpose: Insert a command in the midlevel queue. 98 * 99 * Arguments: cmd - command that we are adding to queue. 100 * reason - why we are inserting command to queue. 101 * 102 * Lock status: Assumed that lock is not held upon entry. 103 * 104 * Returns: Nothing. 105 * 106 * Notes: We do this for one of two cases. Either the host is busy 107 * and it cannot accept any more commands for the time being, 108 * or the device returned QUEUE_FULL and can accept no more 109 * commands. 110 * Notes: This could be called either from an interrupt context or a 111 * normal process context. 112 */ 113int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 114{ 115 struct Scsi_Host *host = cmd->device->host; 116 struct scsi_device *device = cmd->device; 117 struct scsi_target *starget = scsi_target(device); 118 struct request_queue *q = device->request_queue; 119 unsigned long flags; 120 121 SCSI_LOG_MLQUEUE(1, 122 printk("Inserting command %p into mlqueue\n", cmd)); 123 124 /* 125 * Set the appropriate busy bit for the device/host. 126 * 127 * If the host/device isn't busy, assume that something actually 128 * completed, and that we should be able to queue a command now. 129 * 130 * Note that the prior mid-layer assumption that any host could 131 * always queue at least one command is now broken. The mid-layer 132 * will implement a user specifiable stall (see 133 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 134 * if a command is requeued with no other commands outstanding 135 * either for the device or for the host. 136 */ 137 switch (reason) { 138 case SCSI_MLQUEUE_HOST_BUSY: 139 host->host_blocked = host->max_host_blocked; 140 break; 141 case SCSI_MLQUEUE_DEVICE_BUSY: 142 device->device_blocked = device->max_device_blocked; 143 break; 144 case SCSI_MLQUEUE_TARGET_BUSY: 145 starget->target_blocked = starget->max_target_blocked; 146 break; 147 } 148 149 /* 150 * Decrement the counters, since these commands are no longer 151 * active on the host/device. 152 */ 153 scsi_device_unbusy(device); 154 155 /* 156 * Requeue this command. It will go before all other commands 157 * that are already in the queue. 158 * 159 * NOTE: there is magic here about the way the queue is plugged if 160 * we have no outstanding commands. 161 * 162 * Although we *don't* plug the queue, we call the request 163 * function. The SCSI request function detects the blocked condition 164 * and plugs the queue appropriately. 165 */ 166 spin_lock_irqsave(q->queue_lock, flags); 167 blk_requeue_request(q, cmd->request); 168 spin_unlock_irqrestore(q->queue_lock, flags); 169 170 scsi_run_queue(q); 171 172 return 0; 173} 174 175/** 176 * scsi_execute - insert request and wait for the result 177 * @sdev: scsi device 178 * @cmd: scsi command 179 * @data_direction: data direction 180 * @buffer: data buffer 181 * @bufflen: len of buffer 182 * @sense: optional sense buffer 183 * @timeout: request timeout in seconds 184 * @retries: number of times to retry request 185 * @flags: or into request flags; 186 * 187 * returns the req->errors value which is the scsi_cmnd result 188 * field. 189 */ 190int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 191 int data_direction, void *buffer, unsigned bufflen, 192 unsigned char *sense, int timeout, int retries, int flags) 193{ 194 struct request *req; 195 int write = (data_direction == DMA_TO_DEVICE); 196 int ret = DRIVER_ERROR << 24; 197 198 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 199 200 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 201 buffer, bufflen, __GFP_WAIT)) 202 goto out; 203 204 req->cmd_len = COMMAND_SIZE(cmd[0]); 205 memcpy(req->cmd, cmd, req->cmd_len); 206 req->sense = sense; 207 req->sense_len = 0; 208 req->retries = retries; 209 req->timeout = timeout; 210 req->cmd_type = REQ_TYPE_BLOCK_PC; 211 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 212 213 /* 214 * head injection *required* here otherwise quiesce won't work 215 */ 216 blk_execute_rq(req->q, NULL, req, 1); 217 218 /* 219 * Some devices (USB mass-storage in particular) may transfer 220 * garbage data together with a residue indicating that the data 221 * is invalid. Prevent the garbage from being misinterpreted 222 * and prevent security leaks by zeroing out the excess data. 223 */ 224 if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) 225 memset(buffer + (bufflen - req->data_len), 0, req->data_len); 226 227 ret = req->errors; 228 out: 229 blk_put_request(req); 230 231 return ret; 232} 233EXPORT_SYMBOL(scsi_execute); 234 235 236int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 237 int data_direction, void *buffer, unsigned bufflen, 238 struct scsi_sense_hdr *sshdr, int timeout, int retries) 239{ 240 char *sense = NULL; 241 int result; 242 243 if (sshdr) { 244 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 245 if (!sense) 246 return DRIVER_ERROR << 24; 247 } 248 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 249 sense, timeout, retries, 0); 250 if (sshdr) 251 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 252 253 kfree(sense); 254 return result; 255} 256EXPORT_SYMBOL(scsi_execute_req); 257 258struct scsi_io_context { 259 void *data; 260 void (*done)(void *data, char *sense, int result, int resid); 261 char sense[SCSI_SENSE_BUFFERSIZE]; 262}; 263 264static struct kmem_cache *scsi_io_context_cache; 265 266static void scsi_end_async(struct request *req, int uptodate) 267{ 268 struct scsi_io_context *sioc = req->end_io_data; 269 270 if (sioc->done) 271 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len); 272 273 kmem_cache_free(scsi_io_context_cache, sioc); 274 __blk_put_request(req->q, req); 275} 276 277static int scsi_merge_bio(struct request *rq, struct bio *bio) 278{ 279 struct request_queue *q = rq->q; 280 281 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 282 if (rq_data_dir(rq) == WRITE) 283 bio->bi_rw |= (1 << BIO_RW); 284 blk_queue_bounce(q, &bio); 285 286 return blk_rq_append_bio(q, rq, bio); 287} 288 289static void scsi_bi_endio(struct bio *bio, int error) 290{ 291 bio_put(bio); 292} 293 294/** 295 * scsi_req_map_sg - map a scatterlist into a request 296 * @rq: request to fill 297 * @sgl: scatterlist 298 * @nsegs: number of elements 299 * @bufflen: len of buffer 300 * @gfp: memory allocation flags 301 * 302 * scsi_req_map_sg maps a scatterlist into a request so that the 303 * request can be sent to the block layer. We do not trust the scatterlist 304 * sent to use, as some ULDs use that struct to only organize the pages. 305 */ 306static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, 307 int nsegs, unsigned bufflen, gfp_t gfp) 308{ 309 struct request_queue *q = rq->q; 310 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 311 unsigned int data_len = bufflen, len, bytes, off; 312 struct scatterlist *sg; 313 struct page *page; 314 struct bio *bio = NULL; 315 int i, err, nr_vecs = 0; 316 317 for_each_sg(sgl, sg, nsegs, i) { 318 page = sg_page(sg); 319 off = sg->offset; 320 len = sg->length; 321 322 while (len > 0 && data_len > 0) { 323 /* 324 * sg sends a scatterlist that is larger than 325 * the data_len it wants transferred for certain 326 * IO sizes 327 */ 328 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 329 bytes = min(bytes, data_len); 330 331 if (!bio) { 332 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 333 nr_pages -= nr_vecs; 334 335 bio = bio_alloc(gfp, nr_vecs); 336 if (!bio) { 337 err = -ENOMEM; 338 goto free_bios; 339 } 340 bio->bi_end_io = scsi_bi_endio; 341 } 342 343 if (bio_add_pc_page(q, bio, page, bytes, off) != 344 bytes) { 345 bio_put(bio); 346 err = -EINVAL; 347 goto free_bios; 348 } 349 350 if (bio->bi_vcnt >= nr_vecs) { 351 err = scsi_merge_bio(rq, bio); 352 if (err) { 353 bio_endio(bio, 0); 354 goto free_bios; 355 } 356 bio = NULL; 357 } 358 359 page++; 360 len -= bytes; 361 data_len -=bytes; 362 off = 0; 363 } 364 } 365 366 rq->buffer = rq->data = NULL; 367 rq->data_len = bufflen; 368 return 0; 369 370free_bios: 371 while ((bio = rq->bio) != NULL) { 372 rq->bio = bio->bi_next; 373 /* 374 * call endio instead of bio_put incase it was bounced 375 */ 376 bio_endio(bio, 0); 377 } 378 379 return err; 380} 381 382/** 383 * scsi_execute_async - insert request 384 * @sdev: scsi device 385 * @cmd: scsi command 386 * @cmd_len: length of scsi cdb 387 * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE 388 * @buffer: data buffer (this can be a kernel buffer or scatterlist) 389 * @bufflen: len of buffer 390 * @use_sg: if buffer is a scatterlist this is the number of elements 391 * @timeout: request timeout in seconds 392 * @retries: number of times to retry request 393 * @privdata: data passed to done() 394 * @done: callback function when done 395 * @gfp: memory allocation flags 396 */ 397int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, 398 int cmd_len, int data_direction, void *buffer, unsigned bufflen, 399 int use_sg, int timeout, int retries, void *privdata, 400 void (*done)(void *, char *, int, int), gfp_t gfp) 401{ 402 struct request *req; 403 struct scsi_io_context *sioc; 404 int err = 0; 405 int write = (data_direction == DMA_TO_DEVICE); 406 407 sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp); 408 if (!sioc) 409 return DRIVER_ERROR << 24; 410 411 req = blk_get_request(sdev->request_queue, write, gfp); 412 if (!req) 413 goto free_sense; 414 req->cmd_type = REQ_TYPE_BLOCK_PC; 415 req->cmd_flags |= REQ_QUIET; 416 417 if (use_sg) 418 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); 419 else if (bufflen) 420 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp); 421 422 if (err) 423 goto free_req; 424 425 req->cmd_len = cmd_len; 426 memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 427 memcpy(req->cmd, cmd, req->cmd_len); 428 req->sense = sioc->sense; 429 req->sense_len = 0; 430 req->timeout = timeout; 431 req->retries = retries; 432 req->end_io_data = sioc; 433 434 sioc->data = privdata; 435 sioc->done = done; 436 437 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async); 438 return 0; 439 440free_req: 441 blk_put_request(req); 442free_sense: 443 kmem_cache_free(scsi_io_context_cache, sioc); 444 return DRIVER_ERROR << 24; 445} 446EXPORT_SYMBOL_GPL(scsi_execute_async); 447 448/* 449 * Function: scsi_init_cmd_errh() 450 * 451 * Purpose: Initialize cmd fields related to error handling. 452 * 453 * Arguments: cmd - command that is ready to be queued. 454 * 455 * Notes: This function has the job of initializing a number of 456 * fields related to error handling. Typically this will 457 * be called once for each command, as required. 458 */ 459static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 460{ 461 cmd->serial_number = 0; 462 scsi_set_resid(cmd, 0); 463 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 464 if (cmd->cmd_len == 0) 465 cmd->cmd_len = scsi_command_size(cmd->cmnd); 466} 467 468void scsi_device_unbusy(struct scsi_device *sdev) 469{ 470 struct Scsi_Host *shost = sdev->host; 471 struct scsi_target *starget = scsi_target(sdev); 472 unsigned long flags; 473 474 spin_lock_irqsave(shost->host_lock, flags); 475 shost->host_busy--; 476 starget->target_busy--; 477 if (unlikely(scsi_host_in_recovery(shost) && 478 (shost->host_failed || shost->host_eh_scheduled))) 479 scsi_eh_wakeup(shost); 480 spin_unlock(shost->host_lock); 481 spin_lock(sdev->request_queue->queue_lock); 482 sdev->device_busy--; 483 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 484} 485 486/* 487 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 488 * and call blk_run_queue for all the scsi_devices on the target - 489 * including current_sdev first. 490 * 491 * Called with *no* scsi locks held. 492 */ 493static void scsi_single_lun_run(struct scsi_device *current_sdev) 494{ 495 struct Scsi_Host *shost = current_sdev->host; 496 struct scsi_device *sdev, *tmp; 497 struct scsi_target *starget = scsi_target(current_sdev); 498 unsigned long flags; 499 500 spin_lock_irqsave(shost->host_lock, flags); 501 starget->starget_sdev_user = NULL; 502 spin_unlock_irqrestore(shost->host_lock, flags); 503 504 /* 505 * Call blk_run_queue for all LUNs on the target, starting with 506 * current_sdev. We race with others (to set starget_sdev_user), 507 * but in most cases, we will be first. Ideally, each LU on the 508 * target would get some limited time or requests on the target. 509 */ 510 blk_run_queue(current_sdev->request_queue); 511 512 spin_lock_irqsave(shost->host_lock, flags); 513 if (starget->starget_sdev_user) 514 goto out; 515 list_for_each_entry_safe(sdev, tmp, &starget->devices, 516 same_target_siblings) { 517 if (sdev == current_sdev) 518 continue; 519 if (scsi_device_get(sdev)) 520 continue; 521 522 spin_unlock_irqrestore(shost->host_lock, flags); 523 blk_run_queue(sdev->request_queue); 524 spin_lock_irqsave(shost->host_lock, flags); 525 526 scsi_device_put(sdev); 527 } 528 out: 529 spin_unlock_irqrestore(shost->host_lock, flags); 530} 531 532static inline int scsi_target_is_busy(struct scsi_target *starget) 533{ 534 return ((starget->can_queue > 0 && 535 starget->target_busy >= starget->can_queue) || 536 starget->target_blocked); 537} 538 539/* 540 * Function: scsi_run_queue() 541 * 542 * Purpose: Select a proper request queue to serve next 543 * 544 * Arguments: q - last request's queue 545 * 546 * Returns: Nothing 547 * 548 * Notes: The previous command was completely finished, start 549 * a new one if possible. 550 */ 551static void scsi_run_queue(struct request_queue *q) 552{ 553 struct scsi_device *starved_head = NULL, *sdev = q->queuedata; 554 struct Scsi_Host *shost = sdev->host; 555 unsigned long flags; 556 557 if (scsi_target(sdev)->single_lun) 558 scsi_single_lun_run(sdev); 559 560 spin_lock_irqsave(shost->host_lock, flags); 561 while (!list_empty(&shost->starved_list) && 562 !shost->host_blocked && !shost->host_self_blocked && 563 !((shost->can_queue > 0) && 564 (shost->host_busy >= shost->can_queue))) { 565 566 int flagset; 567 568 /* 569 * As long as shost is accepting commands and we have 570 * starved queues, call blk_run_queue. scsi_request_fn 571 * drops the queue_lock and can add us back to the 572 * starved_list. 573 * 574 * host_lock protects the starved_list and starved_entry. 575 * scsi_request_fn must get the host_lock before checking 576 * or modifying starved_list or starved_entry. 577 */ 578 sdev = list_entry(shost->starved_list.next, 579 struct scsi_device, starved_entry); 580 /* 581 * The *queue_ready functions can add a device back onto the 582 * starved list's tail, so we must check for a infinite loop. 583 */ 584 if (sdev == starved_head) 585 break; 586 if (!starved_head) 587 starved_head = sdev; 588 589 if (scsi_target_is_busy(scsi_target(sdev))) { 590 list_move_tail(&sdev->starved_entry, 591 &shost->starved_list); 592 continue; 593 } 594 595 list_del_init(&sdev->starved_entry); 596 spin_unlock(shost->host_lock); 597 598 spin_lock(sdev->request_queue->queue_lock); 599 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 600 !test_bit(QUEUE_FLAG_REENTER, 601 &sdev->request_queue->queue_flags); 602 if (flagset) 603 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 604 __blk_run_queue(sdev->request_queue); 605 if (flagset) 606 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 607 spin_unlock(sdev->request_queue->queue_lock); 608 609 spin_lock(shost->host_lock); 610 } 611 spin_unlock_irqrestore(shost->host_lock, flags); 612 613 blk_run_queue(q); 614} 615 616/* 617 * Function: scsi_requeue_command() 618 * 619 * Purpose: Handle post-processing of completed commands. 620 * 621 * Arguments: q - queue to operate on 622 * cmd - command that may need to be requeued. 623 * 624 * Returns: Nothing 625 * 626 * Notes: After command completion, there may be blocks left 627 * over which weren't finished by the previous command 628 * this can be for a number of reasons - the main one is 629 * I/O errors in the middle of the request, in which case 630 * we need to request the blocks that come after the bad 631 * sector. 632 * Notes: Upon return, cmd is a stale pointer. 633 */ 634static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 635{ 636 struct request *req = cmd->request; 637 unsigned long flags; 638 639 scsi_unprep_request(req); 640 spin_lock_irqsave(q->queue_lock, flags); 641 blk_requeue_request(q, req); 642 spin_unlock_irqrestore(q->queue_lock, flags); 643 644 scsi_run_queue(q); 645} 646 647void scsi_next_command(struct scsi_cmnd *cmd) 648{ 649 struct scsi_device *sdev = cmd->device; 650 struct request_queue *q = sdev->request_queue; 651 652 /* need to hold a reference on the device before we let go of the cmd */ 653 get_device(&sdev->sdev_gendev); 654 655 scsi_put_command(cmd); 656 scsi_run_queue(q); 657 658 /* ok to remove device now */ 659 put_device(&sdev->sdev_gendev); 660} 661 662void scsi_run_host_queues(struct Scsi_Host *shost) 663{ 664 struct scsi_device *sdev; 665 666 shost_for_each_device(sdev, shost) 667 scsi_run_queue(sdev->request_queue); 668} 669 670/* 671 * Function: scsi_end_request() 672 * 673 * Purpose: Post-processing of completed commands (usually invoked at end 674 * of upper level post-processing and scsi_io_completion). 675 * 676 * Arguments: cmd - command that is complete. 677 * error - 0 if I/O indicates success, < 0 for I/O error. 678 * bytes - number of bytes of completed I/O 679 * requeue - indicates whether we should requeue leftovers. 680 * 681 * Lock status: Assumed that lock is not held upon entry. 682 * 683 * Returns: cmd if requeue required, NULL otherwise. 684 * 685 * Notes: This is called for block device requests in order to 686 * mark some number of sectors as complete. 687 * 688 * We are guaranteeing that the request queue will be goosed 689 * at some point during this call. 690 * Notes: If cmd was requeued, upon return it will be a stale pointer. 691 */ 692static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, 693 int bytes, int requeue) 694{ 695 struct request_queue *q = cmd->device->request_queue; 696 struct request *req = cmd->request; 697 698 /* 699 * If there are blocks left over at the end, set up the command 700 * to queue the remainder of them. 701 */ 702 if (blk_end_request(req, error, bytes)) { 703 int leftover = (req->hard_nr_sectors << 9); 704 705 if (blk_pc_request(req)) 706 leftover = req->data_len; 707 708 /* kill remainder if no retrys */ 709 if (error && blk_noretry_request(req)) 710 blk_end_request(req, error, leftover); 711 else { 712 if (requeue) { 713 /* 714 * Bleah. Leftovers again. Stick the 715 * leftovers in the front of the 716 * queue, and goose the queue again. 717 */ 718 scsi_requeue_command(q, cmd); 719 cmd = NULL; 720 } 721 return cmd; 722 } 723 } 724 725 /* 726 * This will goose the queue request function at the end, so we don't 727 * need to worry about launching another command. 728 */ 729 scsi_next_command(cmd); 730 return NULL; 731} 732 733static inline unsigned int scsi_sgtable_index(unsigned short nents) 734{ 735 unsigned int index; 736 737 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 738 739 if (nents <= 8) 740 index = 0; 741 else 742 index = get_count_order(nents) - 3; 743 744 return index; 745} 746 747static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 748{ 749 struct scsi_host_sg_pool *sgp; 750 751 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 752 mempool_free(sgl, sgp->pool); 753} 754 755static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 756{ 757 struct scsi_host_sg_pool *sgp; 758 759 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 760 return mempool_alloc(sgp->pool, gfp_mask); 761} 762 763static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, 764 gfp_t gfp_mask) 765{ 766 int ret; 767 768 BUG_ON(!nents); 769 770 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 771 gfp_mask, scsi_sg_alloc); 772 if (unlikely(ret)) 773 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, 774 scsi_sg_free); 775 776 return ret; 777} 778 779static void scsi_free_sgtable(struct scsi_data_buffer *sdb) 780{ 781 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 782} 783 784/* 785 * Function: scsi_release_buffers() 786 * 787 * Purpose: Completion processing for block device I/O requests. 788 * 789 * Arguments: cmd - command that we are bailing. 790 * 791 * Lock status: Assumed that no lock is held upon entry. 792 * 793 * Returns: Nothing 794 * 795 * Notes: In the event that an upper level driver rejects a 796 * command, we must release resources allocated during 797 * the __init_io() function. Primarily this would involve 798 * the scatter-gather table, and potentially any bounce 799 * buffers. 800 */ 801void scsi_release_buffers(struct scsi_cmnd *cmd) 802{ 803 if (cmd->sdb.table.nents) 804 scsi_free_sgtable(&cmd->sdb); 805 806 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 807 808 if (scsi_bidi_cmnd(cmd)) { 809 struct scsi_data_buffer *bidi_sdb = 810 cmd->request->next_rq->special; 811 scsi_free_sgtable(bidi_sdb); 812 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 813 cmd->request->next_rq->special = NULL; 814 } 815 816 if (scsi_prot_sg_count(cmd)) 817 scsi_free_sgtable(cmd->prot_sdb); 818} 819EXPORT_SYMBOL(scsi_release_buffers); 820 821/* 822 * Bidi commands Must be complete as a whole, both sides at once. 823 * If part of the bytes were written and lld returned 824 * scsi_in()->resid and/or scsi_out()->resid this information will be left 825 * in req->data_len and req->next_rq->data_len. The upper-layer driver can 826 * decide what to do with this information. 827 */ 828static void scsi_end_bidi_request(struct scsi_cmnd *cmd) 829{ 830 struct request *req = cmd->request; 831 unsigned int dlen = req->data_len; 832 unsigned int next_dlen = req->next_rq->data_len; 833 834 req->data_len = scsi_out(cmd)->resid; 835 req->next_rq->data_len = scsi_in(cmd)->resid; 836 837 /* The req and req->next_rq have not been completed */ 838 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen)); 839 840 scsi_release_buffers(cmd); 841 842 /* 843 * This will goose the queue request function at the end, so we don't 844 * need to worry about launching another command. 845 */ 846 scsi_next_command(cmd); 847} 848 849/* 850 * Function: scsi_io_completion() 851 * 852 * Purpose: Completion processing for block device I/O requests. 853 * 854 * Arguments: cmd - command that is finished. 855 * 856 * Lock status: Assumed that no lock is held upon entry. 857 * 858 * Returns: Nothing 859 * 860 * Notes: This function is matched in terms of capabilities to 861 * the function that created the scatter-gather list. 862 * In other words, if there are no bounce buffers 863 * (the normal case for most drivers), we don't need 864 * the logic to deal with cleaning up afterwards. 865 * 866 * We must do one of several things here: 867 * 868 * a) Call scsi_end_request. This will finish off the 869 * specified number of sectors. If we are done, the 870 * command block will be released, and the queue 871 * function will be goosed. If we are not done, then 872 * scsi_end_request will directly goose the queue. 873 * 874 * b) We can just use scsi_requeue_command() here. This would 875 * be used if we just wanted to retry, for example. 876 */ 877void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 878{ 879 int result = cmd->result; 880 int this_count; 881 struct request_queue *q = cmd->device->request_queue; 882 struct request *req = cmd->request; 883 int error = 0; 884 struct scsi_sense_hdr sshdr; 885 int sense_valid = 0; 886 int sense_deferred = 0; 887 888 if (result) { 889 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 890 if (sense_valid) 891 sense_deferred = scsi_sense_is_deferred(&sshdr); 892 } 893 894 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 895 req->errors = result; 896 if (result) { 897 if (sense_valid && req->sense) { 898 /* 899 * SG_IO wants current and deferred errors 900 */ 901 int len = 8 + cmd->sense_buffer[7]; 902 903 if (len > SCSI_SENSE_BUFFERSIZE) 904 len = SCSI_SENSE_BUFFERSIZE; 905 memcpy(req->sense, cmd->sense_buffer, len); 906 req->sense_len = len; 907 } 908 if (!sense_deferred) 909 error = -EIO; 910 } 911 if (scsi_bidi_cmnd(cmd)) { 912 /* will also release_buffers */ 913 scsi_end_bidi_request(cmd); 914 return; 915 } 916 req->data_len = scsi_get_resid(cmd); 917 } 918 919 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 920 scsi_release_buffers(cmd); 921 922 /* 923 * Next deal with any sectors which we were able to correctly 924 * handle. 925 */ 926 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 927 "%d bytes done.\n", 928 req->nr_sectors, good_bytes)); 929 930 /* A number of bytes were successfully read. If there 931 * are leftovers and there is some kind of error 932 * (result != 0), retry the rest. 933 */ 934 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 935 return; 936 this_count = blk_rq_bytes(req); 937 938 /* good_bytes = 0, or (inclusive) there were leftovers and 939 * result = 0, so scsi_end_request couldn't retry. 940 */ 941 if (sense_valid && !sense_deferred) { 942 switch (sshdr.sense_key) { 943 case UNIT_ATTENTION: 944 if (cmd->device->removable) { 945 /* Detected disc change. Set a bit 946 * and quietly refuse further access. 947 */ 948 cmd->device->changed = 1; 949 scsi_end_request(cmd, -EIO, this_count, 1); 950 return; 951 } else { 952 /* Must have been a power glitch, or a 953 * bus reset. Could not have been a 954 * media change, so we just retry the 955 * request and see what happens. 956 */ 957 scsi_requeue_command(q, cmd); 958 return; 959 } 960 break; 961 case ILLEGAL_REQUEST: 962 /* If we had an ILLEGAL REQUEST returned, then 963 * we may have performed an unsupported 964 * command. The only thing this should be 965 * would be a ten byte read where only a six 966 * byte read was supported. Also, on a system 967 * where READ CAPACITY failed, we may have 968 * read past the end of the disk. 969 */ 970 if ((cmd->device->use_10_for_rw && 971 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 972 (cmd->cmnd[0] == READ_10 || 973 cmd->cmnd[0] == WRITE_10)) { 974 cmd->device->use_10_for_rw = 0; 975 /* This will cause a retry with a 976 * 6-byte command. 977 */ 978 scsi_requeue_command(q, cmd); 979 } else if (sshdr.asc == 0x10) /* DIX */ 980 scsi_end_request(cmd, -EIO, this_count, 0); 981 else 982 scsi_end_request(cmd, -EIO, this_count, 1); 983 return; 984 case ABORTED_COMMAND: 985 if (sshdr.asc == 0x10) { /* DIF */ 986 scsi_end_request(cmd, -EIO, this_count, 0); 987 return; 988 } 989 break; 990 case NOT_READY: 991 /* If the device is in the process of becoming 992 * ready, or has a temporary blockage, retry. 993 */ 994 if (sshdr.asc == 0x04) { 995 switch (sshdr.ascq) { 996 case 0x01: /* becoming ready */ 997 case 0x04: /* format in progress */ 998 case 0x05: /* rebuild in progress */ 999 case 0x06: /* recalculation in progress */ 1000 case 0x07: /* operation in progress */ 1001 case 0x08: /* Long write in progress */ 1002 case 0x09: /* self test in progress */ 1003 scsi_requeue_command(q, cmd); 1004 return; 1005 default: 1006 break; 1007 } 1008 } 1009 if (!(req->cmd_flags & REQ_QUIET)) 1010 scsi_cmd_print_sense_hdr(cmd, 1011 "Device not ready", 1012 &sshdr); 1013 1014 scsi_end_request(cmd, -EIO, this_count, 1); 1015 return; 1016 case VOLUME_OVERFLOW: 1017 if (!(req->cmd_flags & REQ_QUIET)) { 1018 scmd_printk(KERN_INFO, cmd, 1019 "Volume overflow, CDB: "); 1020 __scsi_print_command(cmd->cmnd); 1021 scsi_print_sense("", cmd); 1022 } 1023 /* See SSC3rXX or current. */ 1024 scsi_end_request(cmd, -EIO, this_count, 1); 1025 return; 1026 default: 1027 break; 1028 } 1029 } 1030 if (host_byte(result) == DID_RESET) { 1031 /* Third party bus reset or reset for error recovery 1032 * reasons. Just retry the request and see what 1033 * happens. 1034 */ 1035 scsi_requeue_command(q, cmd); 1036 return; 1037 } 1038 if (result) { 1039 if (!(req->cmd_flags & REQ_QUIET)) { 1040 scsi_print_result(cmd); 1041 if (driver_byte(result) & DRIVER_SENSE) 1042 scsi_print_sense("", cmd); 1043 } 1044 } 1045 scsi_end_request(cmd, -EIO, this_count, !result); 1046} 1047 1048static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 1049 gfp_t gfp_mask) 1050{ 1051 int count; 1052 1053 /* 1054 * If sg table allocation fails, requeue request later. 1055 */ 1056 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 1057 gfp_mask))) { 1058 return BLKPREP_DEFER; 1059 } 1060 1061 req->buffer = NULL; 1062 1063 /* 1064 * Next, walk the list, and fill in the addresses and sizes of 1065 * each segment. 1066 */ 1067 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1068 BUG_ON(count > sdb->table.nents); 1069 sdb->table.nents = count; 1070 if (blk_pc_request(req)) 1071 sdb->length = req->data_len; 1072 else 1073 sdb->length = req->nr_sectors << 9; 1074 return BLKPREP_OK; 1075} 1076 1077/* 1078 * Function: scsi_init_io() 1079 * 1080 * Purpose: SCSI I/O initialize function. 1081 * 1082 * Arguments: cmd - Command descriptor we wish to initialize 1083 * 1084 * Returns: 0 on success 1085 * BLKPREP_DEFER if the failure is retryable 1086 * BLKPREP_KILL if the failure is fatal 1087 */ 1088int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 1089{ 1090 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); 1091 if (error) 1092 goto err_exit; 1093 1094 if (blk_bidi_rq(cmd->request)) { 1095 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 1096 scsi_sdb_cache, GFP_ATOMIC); 1097 if (!bidi_sdb) { 1098 error = BLKPREP_DEFER; 1099 goto err_exit; 1100 } 1101 1102 cmd->request->next_rq->special = bidi_sdb; 1103 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, 1104 GFP_ATOMIC); 1105 if (error) 1106 goto err_exit; 1107 } 1108 1109 if (blk_integrity_rq(cmd->request)) { 1110 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 1111 int ivecs, count; 1112 1113 BUG_ON(prot_sdb == NULL); 1114 ivecs = blk_rq_count_integrity_sg(cmd->request); 1115 1116 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { 1117 error = BLKPREP_DEFER; 1118 goto err_exit; 1119 } 1120 1121 count = blk_rq_map_integrity_sg(cmd->request, 1122 prot_sdb->table.sgl); 1123 BUG_ON(unlikely(count > ivecs)); 1124 1125 cmd->prot_sdb = prot_sdb; 1126 cmd->prot_sdb->table.nents = count; 1127 } 1128 1129 return BLKPREP_OK ; 1130 1131err_exit: 1132 scsi_release_buffers(cmd); 1133 if (error == BLKPREP_KILL) 1134 scsi_put_command(cmd); 1135 else /* BLKPREP_DEFER */ 1136 scsi_unprep_request(cmd->request); 1137 1138 return error; 1139} 1140EXPORT_SYMBOL(scsi_init_io); 1141 1142static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1143 struct request *req) 1144{ 1145 struct scsi_cmnd *cmd; 1146 1147 if (!req->special) { 1148 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1149 if (unlikely(!cmd)) 1150 return NULL; 1151 req->special = cmd; 1152 } else { 1153 cmd = req->special; 1154 } 1155 1156 /* pull a tag out of the request if we have one */ 1157 cmd->tag = req->tag; 1158 cmd->request = req; 1159 1160 cmd->cmnd = req->cmd; 1161 1162 return cmd; 1163} 1164 1165int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1166{ 1167 struct scsi_cmnd *cmd; 1168 int ret = scsi_prep_state_check(sdev, req); 1169 1170 if (ret != BLKPREP_OK) 1171 return ret; 1172 1173 cmd = scsi_get_cmd_from_req(sdev, req); 1174 if (unlikely(!cmd)) 1175 return BLKPREP_DEFER; 1176 1177 /* 1178 * BLOCK_PC requests may transfer data, in which case they must 1179 * a bio attached to them. Or they might contain a SCSI command 1180 * that does not transfer data, in which case they may optionally 1181 * submit a request without an attached bio. 1182 */ 1183 if (req->bio) { 1184 int ret; 1185 1186 BUG_ON(!req->nr_phys_segments); 1187 1188 ret = scsi_init_io(cmd, GFP_ATOMIC); 1189 if (unlikely(ret)) 1190 return ret; 1191 } else { 1192 BUG_ON(req->data_len); 1193 BUG_ON(req->data); 1194 1195 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1196 req->buffer = NULL; 1197 } 1198 1199 cmd->cmd_len = req->cmd_len; 1200 if (!req->data_len) 1201 cmd->sc_data_direction = DMA_NONE; 1202 else if (rq_data_dir(req) == WRITE) 1203 cmd->sc_data_direction = DMA_TO_DEVICE; 1204 else 1205 cmd->sc_data_direction = DMA_FROM_DEVICE; 1206 1207 cmd->transfersize = req->data_len; 1208 cmd->allowed = req->retries; 1209 return BLKPREP_OK; 1210} 1211EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1212 1213/* 1214 * Setup a REQ_TYPE_FS command. These are simple read/write request 1215 * from filesystems that still need to be translated to SCSI CDBs from 1216 * the ULD. 1217 */ 1218int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1219{ 1220 struct scsi_cmnd *cmd; 1221 int ret = scsi_prep_state_check(sdev, req); 1222 1223 if (ret != BLKPREP_OK) 1224 return ret; 1225 1226 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1227 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1228 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1229 if (ret != BLKPREP_OK) 1230 return ret; 1231 } 1232 1233 /* 1234 * Filesystem requests must transfer data. 1235 */ 1236 BUG_ON(!req->nr_phys_segments); 1237 1238 cmd = scsi_get_cmd_from_req(sdev, req); 1239 if (unlikely(!cmd)) 1240 return BLKPREP_DEFER; 1241 1242 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1243 return scsi_init_io(cmd, GFP_ATOMIC); 1244} 1245EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1246 1247int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1248{ 1249 int ret = BLKPREP_OK; 1250 1251 /* 1252 * If the device is not in running state we will reject some 1253 * or all commands. 1254 */ 1255 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1256 switch (sdev->sdev_state) { 1257 case SDEV_OFFLINE: 1258 /* 1259 * If the device is offline we refuse to process any 1260 * commands. The device must be brought online 1261 * before trying any recovery commands. 1262 */ 1263 sdev_printk(KERN_ERR, sdev, 1264 "rejecting I/O to offline device\n"); 1265 ret = BLKPREP_KILL; 1266 break; 1267 case SDEV_DEL: 1268 /* 1269 * If the device is fully deleted, we refuse to 1270 * process any commands as well. 1271 */ 1272 sdev_printk(KERN_ERR, sdev, 1273 "rejecting I/O to dead device\n"); 1274 ret = BLKPREP_KILL; 1275 break; 1276 case SDEV_QUIESCE: 1277 case SDEV_BLOCK: 1278 case SDEV_CREATED_BLOCK: 1279 /* 1280 * If the devices is blocked we defer normal commands. 1281 */ 1282 if (!(req->cmd_flags & REQ_PREEMPT)) 1283 ret = BLKPREP_DEFER; 1284 break; 1285 default: 1286 /* 1287 * For any other not fully online state we only allow 1288 * special commands. In particular any user initiated 1289 * command is not allowed. 1290 */ 1291 if (!(req->cmd_flags & REQ_PREEMPT)) 1292 ret = BLKPREP_KILL; 1293 break; 1294 } 1295 } 1296 return ret; 1297} 1298EXPORT_SYMBOL(scsi_prep_state_check); 1299 1300int scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1301{ 1302 struct scsi_device *sdev = q->queuedata; 1303 1304 switch (ret) { 1305 case BLKPREP_KILL: 1306 req->errors = DID_NO_CONNECT << 16; 1307 /* release the command and kill it */ 1308 if (req->special) { 1309 struct scsi_cmnd *cmd = req->special; 1310 scsi_release_buffers(cmd); 1311 scsi_put_command(cmd); 1312 req->special = NULL; 1313 } 1314 break; 1315 case BLKPREP_DEFER: 1316 /* 1317 * If we defer, the elv_next_request() returns NULL, but the 1318 * queue must be restarted, so we plug here if no returning 1319 * command will automatically do that. 1320 */ 1321 if (sdev->device_busy == 0) 1322 blk_plug_device(q); 1323 break; 1324 default: 1325 req->cmd_flags |= REQ_DONTPREP; 1326 } 1327 1328 return ret; 1329} 1330EXPORT_SYMBOL(scsi_prep_return); 1331 1332int scsi_prep_fn(struct request_queue *q, struct request *req) 1333{ 1334 struct scsi_device *sdev = q->queuedata; 1335 int ret = BLKPREP_KILL; 1336 1337 if (req->cmd_type == REQ_TYPE_BLOCK_PC) 1338 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1339 return scsi_prep_return(q, req, ret); 1340} 1341 1342/* 1343 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1344 * return 0. 1345 * 1346 * Called with the queue_lock held. 1347 */ 1348static inline int scsi_dev_queue_ready(struct request_queue *q, 1349 struct scsi_device *sdev) 1350{ 1351 if (sdev->device_busy >= sdev->queue_depth) 1352 return 0; 1353 if (sdev->device_busy == 0 && sdev->device_blocked) { 1354 /* 1355 * unblock after device_blocked iterates to zero 1356 */ 1357 if (--sdev->device_blocked == 0) { 1358 SCSI_LOG_MLQUEUE(3, 1359 sdev_printk(KERN_INFO, sdev, 1360 "unblocking device at zero depth\n")); 1361 } else { 1362 blk_plug_device(q); 1363 return 0; 1364 } 1365 } 1366 if (sdev->device_blocked) 1367 return 0; 1368 1369 return 1; 1370} 1371 1372 1373/* 1374 * scsi_target_queue_ready: checks if there we can send commands to target 1375 * @sdev: scsi device on starget to check. 1376 * 1377 * Called with the host lock held. 1378 */ 1379static inline int scsi_target_queue_ready(struct Scsi_Host *shost, 1380 struct scsi_device *sdev) 1381{ 1382 struct scsi_target *starget = scsi_target(sdev); 1383 1384 if (starget->single_lun) { 1385 if (starget->starget_sdev_user && 1386 starget->starget_sdev_user != sdev) 1387 return 0; 1388 starget->starget_sdev_user = sdev; 1389 } 1390 1391 if (starget->target_busy == 0 && starget->target_blocked) { 1392 /* 1393 * unblock after target_blocked iterates to zero 1394 */ 1395 if (--starget->target_blocked == 0) { 1396 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, 1397 "unblocking target at zero depth\n")); 1398 } else { 1399 blk_plug_device(sdev->request_queue); 1400 return 0; 1401 } 1402 } 1403 1404 if (scsi_target_is_busy(starget)) { 1405 if (list_empty(&sdev->starved_entry)) { 1406 list_add_tail(&sdev->starved_entry, 1407 &shost->starved_list); 1408 return 0; 1409 } 1410 } 1411 1412 /* We're OK to process the command, so we can't be starved */ 1413 if (!list_empty(&sdev->starved_entry)) 1414 list_del_init(&sdev->starved_entry); 1415 return 1; 1416} 1417 1418/* 1419 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1420 * return 0. We must end up running the queue again whenever 0 is 1421 * returned, else IO can hang. 1422 * 1423 * Called with host_lock held. 1424 */ 1425static inline int scsi_host_queue_ready(struct request_queue *q, 1426 struct Scsi_Host *shost, 1427 struct scsi_device *sdev) 1428{ 1429 if (scsi_host_in_recovery(shost)) 1430 return 0; 1431 if (shost->host_busy == 0 && shost->host_blocked) { 1432 /* 1433 * unblock after host_blocked iterates to zero 1434 */ 1435 if (--shost->host_blocked == 0) { 1436 SCSI_LOG_MLQUEUE(3, 1437 printk("scsi%d unblocking host at zero depth\n", 1438 shost->host_no)); 1439 } else { 1440 return 0; 1441 } 1442 } 1443 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 1444 shost->host_blocked || shost->host_self_blocked) { 1445 if (list_empty(&sdev->starved_entry)) 1446 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1447 return 0; 1448 } 1449 1450 /* We're OK to process the command, so we can't be starved */ 1451 if (!list_empty(&sdev->starved_entry)) 1452 list_del_init(&sdev->starved_entry); 1453 1454 return 1; 1455} 1456 1457/* 1458 * Kill a request for a dead device 1459 */ 1460static void scsi_kill_request(struct request *req, struct request_queue *q) 1461{ 1462 struct scsi_cmnd *cmd = req->special; 1463 struct scsi_device *sdev = cmd->device; 1464 struct scsi_target *starget = scsi_target(sdev); 1465 struct Scsi_Host *shost = sdev->host; 1466 1467 blkdev_dequeue_request(req); 1468 1469 if (unlikely(cmd == NULL)) { 1470 printk(KERN_CRIT "impossible request in %s.\n", 1471 __func__); 1472 BUG(); 1473 } 1474 1475 scsi_init_cmd_errh(cmd); 1476 cmd->result = DID_NO_CONNECT << 16; 1477 atomic_inc(&cmd->device->iorequest_cnt); 1478 1479 /* 1480 * SCSI request completion path will do scsi_device_unbusy(), 1481 * bump busy counts. To bump the counters, we need to dance 1482 * with the locks as normal issue path does. 1483 */ 1484 sdev->device_busy++; 1485 spin_unlock(sdev->request_queue->queue_lock); 1486 spin_lock(shost->host_lock); 1487 shost->host_busy++; 1488 starget->target_busy++; 1489 spin_unlock(shost->host_lock); 1490 spin_lock(sdev->request_queue->queue_lock); 1491 1492 blk_complete_request(req); 1493} 1494 1495static void scsi_softirq_done(struct request *rq) 1496{ 1497 struct scsi_cmnd *cmd = rq->special; 1498 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; 1499 int disposition; 1500 1501 INIT_LIST_HEAD(&cmd->eh_entry); 1502 1503 /* 1504 * Set the serial numbers back to zero 1505 */ 1506 cmd->serial_number = 0; 1507 1508 atomic_inc(&cmd->device->iodone_cnt); 1509 if (cmd->result) 1510 atomic_inc(&cmd->device->ioerr_cnt); 1511 1512 disposition = scsi_decide_disposition(cmd); 1513 if (disposition != SUCCESS && 1514 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1515 sdev_printk(KERN_ERR, cmd->device, 1516 "timing out command, waited %lus\n", 1517 wait_for/HZ); 1518 disposition = SUCCESS; 1519 } 1520 1521 scsi_log_completion(cmd, disposition); 1522 1523 switch (disposition) { 1524 case SUCCESS: 1525 scsi_finish_command(cmd); 1526 break; 1527 case NEEDS_RETRY: 1528 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1529 break; 1530 case ADD_TO_MLQUEUE: 1531 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1532 break; 1533 default: 1534 if (!scsi_eh_scmd_add(cmd, 0)) 1535 scsi_finish_command(cmd); 1536 } 1537} 1538 1539/* 1540 * Function: scsi_request_fn() 1541 * 1542 * Purpose: Main strategy routine for SCSI. 1543 * 1544 * Arguments: q - Pointer to actual queue. 1545 * 1546 * Returns: Nothing 1547 * 1548 * Lock status: IO request lock assumed to be held when called. 1549 */ 1550static void scsi_request_fn(struct request_queue *q) 1551{ 1552 struct scsi_device *sdev = q->queuedata; 1553 struct Scsi_Host *shost; 1554 struct scsi_cmnd *cmd; 1555 struct request *req; 1556 1557 if (!sdev) { 1558 printk("scsi: killing requests for dead queue\n"); 1559 while ((req = elv_next_request(q)) != NULL) 1560 scsi_kill_request(req, q); 1561 return; 1562 } 1563 1564 if(!get_device(&sdev->sdev_gendev)) 1565 /* We must be tearing the block queue down already */ 1566 return; 1567 1568 /* 1569 * To start with, we keep looping until the queue is empty, or until 1570 * the host is no longer able to accept any more requests. 1571 */ 1572 shost = sdev->host; 1573 while (!blk_queue_plugged(q)) { 1574 int rtn; 1575 /* 1576 * get next queueable request. We do this early to make sure 1577 * that the request is fully prepared even if we cannot 1578 * accept it. 1579 */ 1580 req = elv_next_request(q); 1581 if (!req || !scsi_dev_queue_ready(q, sdev)) 1582 break; 1583 1584 if (unlikely(!scsi_device_online(sdev))) { 1585 sdev_printk(KERN_ERR, sdev, 1586 "rejecting I/O to offline device\n"); 1587 scsi_kill_request(req, q); 1588 continue; 1589 } 1590 1591 1592 /* 1593 * Remove the request from the request list. 1594 */ 1595 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1596 blkdev_dequeue_request(req); 1597 sdev->device_busy++; 1598 1599 spin_unlock(q->queue_lock); 1600 cmd = req->special; 1601 if (unlikely(cmd == NULL)) { 1602 printk(KERN_CRIT "impossible request in %s.\n" 1603 "please mail a stack trace to " 1604 "linux-scsi@vger.kernel.org\n", 1605 __func__); 1606 blk_dump_rq_flags(req, "foo"); 1607 BUG(); 1608 } 1609 spin_lock(shost->host_lock); 1610 1611 /* 1612 * We hit this when the driver is using a host wide 1613 * tag map. For device level tag maps the queue_depth check 1614 * in the device ready fn would prevent us from trying 1615 * to allocate a tag. Since the map is a shared host resource 1616 * we add the dev to the starved list so it eventually gets 1617 * a run when a tag is freed. 1618 */ 1619 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { 1620 if (list_empty(&sdev->starved_entry)) 1621 list_add_tail(&sdev->starved_entry, 1622 &shost->starved_list); 1623 goto not_ready; 1624 } 1625 1626 if (!scsi_target_queue_ready(shost, sdev)) 1627 goto not_ready; 1628 1629 if (!scsi_host_queue_ready(q, shost, sdev)) 1630 goto not_ready; 1631 1632 scsi_target(sdev)->target_busy++; 1633 shost->host_busy++; 1634 1635 /* 1636 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will 1637 * take the lock again. 1638 */ 1639 spin_unlock_irq(shost->host_lock); 1640 1641 /* 1642 * Finally, initialize any error handling parameters, and set up 1643 * the timers for timeouts. 1644 */ 1645 scsi_init_cmd_errh(cmd); 1646 1647 /* 1648 * Dispatch the command to the low-level driver. 1649 */ 1650 rtn = scsi_dispatch_cmd(cmd); 1651 spin_lock_irq(q->queue_lock); 1652 if(rtn) { 1653 /* we're refusing the command; because of 1654 * the way locks get dropped, we need to 1655 * check here if plugging is required */ 1656 if(sdev->device_busy == 0) 1657 blk_plug_device(q); 1658 1659 break; 1660 } 1661 } 1662 1663 goto out; 1664 1665 not_ready: 1666 spin_unlock_irq(shost->host_lock); 1667 1668 /* 1669 * lock q, handle tag, requeue req, and decrement device_busy. We 1670 * must return with queue_lock held. 1671 * 1672 * Decrementing device_busy without checking it is OK, as all such 1673 * cases (host limits or settings) should run the queue at some 1674 * later time. 1675 */ 1676 spin_lock_irq(q->queue_lock); 1677 blk_requeue_request(q, req); 1678 sdev->device_busy--; 1679 if(sdev->device_busy == 0) 1680 blk_plug_device(q); 1681 out: 1682 /* must be careful here...if we trigger the ->remove() function 1683 * we cannot be holding the q lock */ 1684 spin_unlock_irq(q->queue_lock); 1685 put_device(&sdev->sdev_gendev); 1686 spin_lock_irq(q->queue_lock); 1687} 1688 1689u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1690{ 1691 struct device *host_dev; 1692 u64 bounce_limit = 0xffffffff; 1693 1694 if (shost->unchecked_isa_dma) 1695 return BLK_BOUNCE_ISA; 1696 /* 1697 * Platforms with virtual-DMA translation 1698 * hardware have no practical limit. 1699 */ 1700 if (!PCI_DMA_BUS_IS_PHYS) 1701 return BLK_BOUNCE_ANY; 1702 1703 host_dev = scsi_get_device(shost); 1704 if (host_dev && host_dev->dma_mask) 1705 bounce_limit = *host_dev->dma_mask; 1706 1707 return bounce_limit; 1708} 1709EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1710 1711struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 1712 request_fn_proc *request_fn) 1713{ 1714 struct request_queue *q; 1715 struct device *dev = shost->shost_gendev.parent; 1716 1717 q = blk_init_queue(request_fn, NULL); 1718 if (!q) 1719 return NULL; 1720 1721 /* 1722 * this limit is imposed by hardware restrictions 1723 */ 1724 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1725 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); 1726 1727 blk_queue_max_sectors(q, shost->max_sectors); 1728 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1729 blk_queue_segment_boundary(q, shost->dma_boundary); 1730 dma_set_seg_boundary(dev, shost->dma_boundary); 1731 1732 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1733 1734 /* New queue, no concurrency on queue_flags */ 1735 if (!shost->use_clustering) 1736 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1737 1738 /* 1739 * set a reasonable default alignment on word boundaries: the 1740 * host and device may alter it using 1741 * blk_queue_update_dma_alignment() later. 1742 */ 1743 blk_queue_dma_alignment(q, 0x03); 1744 1745 return q; 1746} 1747EXPORT_SYMBOL(__scsi_alloc_queue); 1748 1749struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1750{ 1751 struct request_queue *q; 1752 1753 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 1754 if (!q) 1755 return NULL; 1756 1757 blk_queue_prep_rq(q, scsi_prep_fn); 1758 blk_queue_softirq_done(q, scsi_softirq_done); 1759 blk_queue_rq_timed_out(q, scsi_times_out); 1760 return q; 1761} 1762 1763void scsi_free_queue(struct request_queue *q) 1764{ 1765 blk_cleanup_queue(q); 1766} 1767 1768/* 1769 * Function: scsi_block_requests() 1770 * 1771 * Purpose: Utility function used by low-level drivers to prevent further 1772 * commands from being queued to the device. 1773 * 1774 * Arguments: shost - Host in question 1775 * 1776 * Returns: Nothing 1777 * 1778 * Lock status: No locks are assumed held. 1779 * 1780 * Notes: There is no timer nor any other means by which the requests 1781 * get unblocked other than the low-level driver calling 1782 * scsi_unblock_requests(). 1783 */ 1784void scsi_block_requests(struct Scsi_Host *shost) 1785{ 1786 shost->host_self_blocked = 1; 1787} 1788EXPORT_SYMBOL(scsi_block_requests); 1789 1790/* 1791 * Function: scsi_unblock_requests() 1792 * 1793 * Purpose: Utility function used by low-level drivers to allow further 1794 * commands from being queued to the device. 1795 * 1796 * Arguments: shost - Host in question 1797 * 1798 * Returns: Nothing 1799 * 1800 * Lock status: No locks are assumed held. 1801 * 1802 * Notes: There is no timer nor any other means by which the requests 1803 * get unblocked other than the low-level driver calling 1804 * scsi_unblock_requests(). 1805 * 1806 * This is done as an API function so that changes to the 1807 * internals of the scsi mid-layer won't require wholesale 1808 * changes to drivers that use this feature. 1809 */ 1810void scsi_unblock_requests(struct Scsi_Host *shost) 1811{ 1812 shost->host_self_blocked = 0; 1813 scsi_run_host_queues(shost); 1814} 1815EXPORT_SYMBOL(scsi_unblock_requests); 1816 1817int __init scsi_init_queue(void) 1818{ 1819 int i; 1820 1821 scsi_io_context_cache = kmem_cache_create("scsi_io_context", 1822 sizeof(struct scsi_io_context), 1823 0, 0, NULL); 1824 if (!scsi_io_context_cache) { 1825 printk(KERN_ERR "SCSI: can't init scsi io context cache\n"); 1826 return -ENOMEM; 1827 } 1828 1829 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 1830 sizeof(struct scsi_data_buffer), 1831 0, 0, NULL); 1832 if (!scsi_sdb_cache) { 1833 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 1834 goto cleanup_io_context; 1835 } 1836 1837 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1838 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1839 int size = sgp->size * sizeof(struct scatterlist); 1840 1841 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1842 SLAB_HWCACHE_ALIGN, NULL); 1843 if (!sgp->slab) { 1844 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1845 sgp->name); 1846 goto cleanup_sdb; 1847 } 1848 1849 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1850 sgp->slab); 1851 if (!sgp->pool) { 1852 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1853 sgp->name); 1854 goto cleanup_sdb; 1855 } 1856 } 1857 1858 return 0; 1859 1860cleanup_sdb: 1861 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1862 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1863 if (sgp->pool) 1864 mempool_destroy(sgp->pool); 1865 if (sgp->slab) 1866 kmem_cache_destroy(sgp->slab); 1867 } 1868 kmem_cache_destroy(scsi_sdb_cache); 1869cleanup_io_context: 1870 kmem_cache_destroy(scsi_io_context_cache); 1871 1872 return -ENOMEM; 1873} 1874 1875void scsi_exit_queue(void) 1876{ 1877 int i; 1878 1879 kmem_cache_destroy(scsi_io_context_cache); 1880 kmem_cache_destroy(scsi_sdb_cache); 1881 1882 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1883 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1884 mempool_destroy(sgp->pool); 1885 kmem_cache_destroy(sgp->slab); 1886 } 1887} 1888 1889/** 1890 * scsi_mode_select - issue a mode select 1891 * @sdev: SCSI device to be queried 1892 * @pf: Page format bit (1 == standard, 0 == vendor specific) 1893 * @sp: Save page bit (0 == don't save, 1 == save) 1894 * @modepage: mode page being requested 1895 * @buffer: request buffer (may not be smaller than eight bytes) 1896 * @len: length of request buffer. 1897 * @timeout: command timeout 1898 * @retries: number of retries before failing 1899 * @data: returns a structure abstracting the mode header data 1900 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1901 * must be SCSI_SENSE_BUFFERSIZE big. 1902 * 1903 * Returns zero if successful; negative error number or scsi 1904 * status on error 1905 * 1906 */ 1907int 1908scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 1909 unsigned char *buffer, int len, int timeout, int retries, 1910 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1911{ 1912 unsigned char cmd[10]; 1913 unsigned char *real_buffer; 1914 int ret; 1915 1916 memset(cmd, 0, sizeof(cmd)); 1917 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 1918 1919 if (sdev->use_10_for_ms) { 1920 if (len > 65535) 1921 return -EINVAL; 1922 real_buffer = kmalloc(8 + len, GFP_KERNEL); 1923 if (!real_buffer) 1924 return -ENOMEM; 1925 memcpy(real_buffer + 8, buffer, len); 1926 len += 8; 1927 real_buffer[0] = 0; 1928 real_buffer[1] = 0; 1929 real_buffer[2] = data->medium_type; 1930 real_buffer[3] = data->device_specific; 1931 real_buffer[4] = data->longlba ? 0x01 : 0; 1932 real_buffer[5] = 0; 1933 real_buffer[6] = data->block_descriptor_length >> 8; 1934 real_buffer[7] = data->block_descriptor_length; 1935 1936 cmd[0] = MODE_SELECT_10; 1937 cmd[7] = len >> 8; 1938 cmd[8] = len; 1939 } else { 1940 if (len > 255 || data->block_descriptor_length > 255 || 1941 data->longlba) 1942 return -EINVAL; 1943 1944 real_buffer = kmalloc(4 + len, GFP_KERNEL); 1945 if (!real_buffer) 1946 return -ENOMEM; 1947 memcpy(real_buffer + 4, buffer, len); 1948 len += 4; 1949 real_buffer[0] = 0; 1950 real_buffer[1] = data->medium_type; 1951 real_buffer[2] = data->device_specific; 1952 real_buffer[3] = data->block_descriptor_length; 1953 1954 1955 cmd[0] = MODE_SELECT; 1956 cmd[4] = len; 1957 } 1958 1959 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 1960 sshdr, timeout, retries); 1961 kfree(real_buffer); 1962 return ret; 1963} 1964EXPORT_SYMBOL_GPL(scsi_mode_select); 1965 1966/** 1967 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 1968 * @sdev: SCSI device to be queried 1969 * @dbd: set if mode sense will allow block descriptors to be returned 1970 * @modepage: mode page being requested 1971 * @buffer: request buffer (may not be smaller than eight bytes) 1972 * @len: length of request buffer. 1973 * @timeout: command timeout 1974 * @retries: number of retries before failing 1975 * @data: returns a structure abstracting the mode header data 1976 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1977 * must be SCSI_SENSE_BUFFERSIZE big. 1978 * 1979 * Returns zero if unsuccessful, or the header offset (either 4 1980 * or 8 depending on whether a six or ten byte command was 1981 * issued) if successful. 1982 */ 1983int 1984scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1985 unsigned char *buffer, int len, int timeout, int retries, 1986 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1987{ 1988 unsigned char cmd[12]; 1989 int use_10_for_ms; 1990 int header_length; 1991 int result; 1992 struct scsi_sense_hdr my_sshdr; 1993 1994 memset(data, 0, sizeof(*data)); 1995 memset(&cmd[0], 0, 12); 1996 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1997 cmd[2] = modepage; 1998 1999 /* caller might not be interested in sense, but we need it */ 2000 if (!sshdr) 2001 sshdr = &my_sshdr; 2002 2003 retry: 2004 use_10_for_ms = sdev->use_10_for_ms; 2005 2006 if (use_10_for_ms) { 2007 if (len < 8) 2008 len = 8; 2009 2010 cmd[0] = MODE_SENSE_10; 2011 cmd[8] = len; 2012 header_length = 8; 2013 } else { 2014 if (len < 4) 2015 len = 4; 2016 2017 cmd[0] = MODE_SENSE; 2018 cmd[4] = len; 2019 header_length = 4; 2020 } 2021 2022 memset(buffer, 0, len); 2023 2024 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 2025 sshdr, timeout, retries); 2026 2027 /* This code looks awful: what it's doing is making sure an 2028 * ILLEGAL REQUEST sense return identifies the actual command 2029 * byte as the problem. MODE_SENSE commands can return 2030 * ILLEGAL REQUEST if the code page isn't supported */ 2031 2032 if (use_10_for_ms && !scsi_status_is_good(result) && 2033 (driver_byte(result) & DRIVER_SENSE)) { 2034 if (scsi_sense_valid(sshdr)) { 2035 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 2036 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 2037 /* 2038 * Invalid command operation code 2039 */ 2040 sdev->use_10_for_ms = 0; 2041 goto retry; 2042 } 2043 } 2044 } 2045 2046 if(scsi_status_is_good(result)) { 2047 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 2048 (modepage == 6 || modepage == 8))) { 2049 /* Initio breakage? */ 2050 header_length = 0; 2051 data->length = 13; 2052 data->medium_type = 0; 2053 data->device_specific = 0; 2054 data->longlba = 0; 2055 data->block_descriptor_length = 0; 2056 } else if(use_10_for_ms) { 2057 data->length = buffer[0]*256 + buffer[1] + 2; 2058 data->medium_type = buffer[2]; 2059 data->device_specific = buffer[3]; 2060 data->longlba = buffer[4] & 0x01; 2061 data->block_descriptor_length = buffer[6]*256 2062 + buffer[7]; 2063 } else { 2064 data->length = buffer[0] + 1; 2065 data->medium_type = buffer[1]; 2066 data->device_specific = buffer[2]; 2067 data->block_descriptor_length = buffer[3]; 2068 } 2069 data->header_length = header_length; 2070 } 2071 2072 return result; 2073} 2074EXPORT_SYMBOL(scsi_mode_sense); 2075 2076/** 2077 * scsi_test_unit_ready - test if unit is ready 2078 * @sdev: scsi device to change the state of. 2079 * @timeout: command timeout 2080 * @retries: number of retries before failing 2081 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 2082 * returning sense. Make sure that this is cleared before passing 2083 * in. 2084 * 2085 * Returns zero if unsuccessful or an error if TUR failed. For 2086 * removable media, a return of NOT_READY or UNIT_ATTENTION is 2087 * translated to success, with the ->changed flag updated. 2088 **/ 2089int 2090scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 2091 struct scsi_sense_hdr *sshdr_external) 2092{ 2093 char cmd[] = { 2094 TEST_UNIT_READY, 0, 0, 0, 0, 0, 2095 }; 2096 struct scsi_sense_hdr *sshdr; 2097 int result; 2098 2099 if (!sshdr_external) 2100 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 2101 else 2102 sshdr = sshdr_external; 2103 2104 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2105 do { 2106 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2107 timeout, retries); 2108 } while ((driver_byte(result) & DRIVER_SENSE) && 2109 sshdr && sshdr->sense_key == UNIT_ATTENTION && 2110 --retries); 2111 2112 if (!sshdr) 2113 /* could not allocate sense buffer, so can't process it */ 2114 return result; 2115 2116 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { 2117 2118 if ((scsi_sense_valid(sshdr)) && 2119 ((sshdr->sense_key == UNIT_ATTENTION) || 2120 (sshdr->sense_key == NOT_READY))) { 2121 sdev->changed = 1; 2122 result = 0; 2123 } 2124 } 2125 if (!sshdr_external) 2126 kfree(sshdr); 2127 return result; 2128} 2129EXPORT_SYMBOL(scsi_test_unit_ready); 2130 2131/** 2132 * scsi_device_set_state - Take the given device through the device state model. 2133 * @sdev: scsi device to change the state of. 2134 * @state: state to change to. 2135 * 2136 * Returns zero if unsuccessful or an error if the requested 2137 * transition is illegal. 2138 */ 2139int 2140scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2141{ 2142 enum scsi_device_state oldstate = sdev->sdev_state; 2143 2144 if (state == oldstate) 2145 return 0; 2146 2147 switch (state) { 2148 case SDEV_CREATED: 2149 switch (oldstate) { 2150 case SDEV_CREATED_BLOCK: 2151 break; 2152 default: 2153 goto illegal; 2154 } 2155 break; 2156 2157 case SDEV_RUNNING: 2158 switch (oldstate) { 2159 case SDEV_CREATED: 2160 case SDEV_OFFLINE: 2161 case SDEV_QUIESCE: 2162 case SDEV_BLOCK: 2163 break; 2164 default: 2165 goto illegal; 2166 } 2167 break; 2168 2169 case SDEV_QUIESCE: 2170 switch (oldstate) { 2171 case SDEV_RUNNING: 2172 case SDEV_OFFLINE: 2173 break; 2174 default: 2175 goto illegal; 2176 } 2177 break; 2178 2179 case SDEV_OFFLINE: 2180 switch (oldstate) { 2181 case SDEV_CREATED: 2182 case SDEV_RUNNING: 2183 case SDEV_QUIESCE: 2184 case SDEV_BLOCK: 2185 break; 2186 default: 2187 goto illegal; 2188 } 2189 break; 2190 2191 case SDEV_BLOCK: 2192 switch (oldstate) { 2193 case SDEV_RUNNING: 2194 case SDEV_CREATED_BLOCK: 2195 break; 2196 default: 2197 goto illegal; 2198 } 2199 break; 2200 2201 case SDEV_CREATED_BLOCK: 2202 switch (oldstate) { 2203 case SDEV_CREATED: 2204 break; 2205 default: 2206 goto illegal; 2207 } 2208 break; 2209 2210 case SDEV_CANCEL: 2211 switch (oldstate) { 2212 case SDEV_CREATED: 2213 case SDEV_RUNNING: 2214 case SDEV_QUIESCE: 2215 case SDEV_OFFLINE: 2216 case SDEV_BLOCK: 2217 break; 2218 default: 2219 goto illegal; 2220 } 2221 break; 2222 2223 case SDEV_DEL: 2224 switch (oldstate) { 2225 case SDEV_CREATED: 2226 case SDEV_RUNNING: 2227 case SDEV_OFFLINE: 2228 case SDEV_CANCEL: 2229 break; 2230 default: 2231 goto illegal; 2232 } 2233 break; 2234 2235 } 2236 sdev->sdev_state = state; 2237 return 0; 2238 2239 illegal: 2240 SCSI_LOG_ERROR_RECOVERY(1, 2241 sdev_printk(KERN_ERR, sdev, 2242 "Illegal state transition %s->%s\n", 2243 scsi_device_state_name(oldstate), 2244 scsi_device_state_name(state)) 2245 ); 2246 return -EINVAL; 2247} 2248EXPORT_SYMBOL(scsi_device_set_state); 2249 2250/** 2251 * sdev_evt_emit - emit a single SCSI device uevent 2252 * @sdev: associated SCSI device 2253 * @evt: event to emit 2254 * 2255 * Send a single uevent (scsi_event) to the associated scsi_device. 2256 */ 2257static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2258{ 2259 int idx = 0; 2260 char *envp[3]; 2261 2262 switch (evt->evt_type) { 2263 case SDEV_EVT_MEDIA_CHANGE: 2264 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2265 break; 2266 2267 default: 2268 /* do nothing */ 2269 break; 2270 } 2271 2272 envp[idx++] = NULL; 2273 2274 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2275} 2276 2277/** 2278 * sdev_evt_thread - send a uevent for each scsi event 2279 * @work: work struct for scsi_device 2280 * 2281 * Dispatch queued events to their associated scsi_device kobjects 2282 * as uevents. 2283 */ 2284void scsi_evt_thread(struct work_struct *work) 2285{ 2286 struct scsi_device *sdev; 2287 LIST_HEAD(event_list); 2288 2289 sdev = container_of(work, struct scsi_device, event_work); 2290 2291 while (1) { 2292 struct scsi_event *evt; 2293 struct list_head *this, *tmp; 2294 unsigned long flags; 2295 2296 spin_lock_irqsave(&sdev->list_lock, flags); 2297 list_splice_init(&sdev->event_list, &event_list); 2298 spin_unlock_irqrestore(&sdev->list_lock, flags); 2299 2300 if (list_empty(&event_list)) 2301 break; 2302 2303 list_for_each_safe(this, tmp, &event_list) { 2304 evt = list_entry(this, struct scsi_event, node); 2305 list_del(&evt->node); 2306 scsi_evt_emit(sdev, evt); 2307 kfree(evt); 2308 } 2309 } 2310} 2311 2312/** 2313 * sdev_evt_send - send asserted event to uevent thread 2314 * @sdev: scsi_device event occurred on 2315 * @evt: event to send 2316 * 2317 * Assert scsi device event asynchronously. 2318 */ 2319void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2320{ 2321 unsigned long flags; 2322 2323#if 0 2324 /* FIXME: currently this check eliminates all media change events 2325 * for polled devices. Need to update to discriminate between AN 2326 * and polled events */ 2327 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2328 kfree(evt); 2329 return; 2330 } 2331#endif 2332 2333 spin_lock_irqsave(&sdev->list_lock, flags); 2334 list_add_tail(&evt->node, &sdev->event_list); 2335 schedule_work(&sdev->event_work); 2336 spin_unlock_irqrestore(&sdev->list_lock, flags); 2337} 2338EXPORT_SYMBOL_GPL(sdev_evt_send); 2339 2340/** 2341 * sdev_evt_alloc - allocate a new scsi event 2342 * @evt_type: type of event to allocate 2343 * @gfpflags: GFP flags for allocation 2344 * 2345 * Allocates and returns a new scsi_event. 2346 */ 2347struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2348 gfp_t gfpflags) 2349{ 2350 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2351 if (!evt) 2352 return NULL; 2353 2354 evt->evt_type = evt_type; 2355 INIT_LIST_HEAD(&evt->node); 2356 2357 /* evt_type-specific initialization, if any */ 2358 switch (evt_type) { 2359 case SDEV_EVT_MEDIA_CHANGE: 2360 default: 2361 /* do nothing */ 2362 break; 2363 } 2364 2365 return evt; 2366} 2367EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2368 2369/** 2370 * sdev_evt_send_simple - send asserted event to uevent thread 2371 * @sdev: scsi_device event occurred on 2372 * @evt_type: type of event to send 2373 * @gfpflags: GFP flags for allocation 2374 * 2375 * Assert scsi device event asynchronously, given an event type. 2376 */ 2377void sdev_evt_send_simple(struct scsi_device *sdev, 2378 enum scsi_device_event evt_type, gfp_t gfpflags) 2379{ 2380 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2381 if (!evt) { 2382 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2383 evt_type); 2384 return; 2385 } 2386 2387 sdev_evt_send(sdev, evt); 2388} 2389EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2390 2391/** 2392 * scsi_device_quiesce - Block user issued commands. 2393 * @sdev: scsi device to quiesce. 2394 * 2395 * This works by trying to transition to the SDEV_QUIESCE state 2396 * (which must be a legal transition). When the device is in this 2397 * state, only special requests will be accepted, all others will 2398 * be deferred. Since special requests may also be requeued requests, 2399 * a successful return doesn't guarantee the device will be 2400 * totally quiescent. 2401 * 2402 * Must be called with user context, may sleep. 2403 * 2404 * Returns zero if unsuccessful or an error if not. 2405 */ 2406int 2407scsi_device_quiesce(struct scsi_device *sdev) 2408{ 2409 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2410 if (err) 2411 return err; 2412 2413 scsi_run_queue(sdev->request_queue); 2414 while (sdev->device_busy) { 2415 msleep_interruptible(200); 2416 scsi_run_queue(sdev->request_queue); 2417 } 2418 return 0; 2419} 2420EXPORT_SYMBOL(scsi_device_quiesce); 2421 2422/** 2423 * scsi_device_resume - Restart user issued commands to a quiesced device. 2424 * @sdev: scsi device to resume. 2425 * 2426 * Moves the device from quiesced back to running and restarts the 2427 * queues. 2428 * 2429 * Must be called with user context, may sleep. 2430 */ 2431void 2432scsi_device_resume(struct scsi_device *sdev) 2433{ 2434 if(scsi_device_set_state(sdev, SDEV_RUNNING)) 2435 return; 2436 scsi_run_queue(sdev->request_queue); 2437} 2438EXPORT_SYMBOL(scsi_device_resume); 2439 2440static void 2441device_quiesce_fn(struct scsi_device *sdev, void *data) 2442{ 2443 scsi_device_quiesce(sdev); 2444} 2445 2446void 2447scsi_target_quiesce(struct scsi_target *starget) 2448{ 2449 starget_for_each_device(starget, NULL, device_quiesce_fn); 2450} 2451EXPORT_SYMBOL(scsi_target_quiesce); 2452 2453static void 2454device_resume_fn(struct scsi_device *sdev, void *data) 2455{ 2456 scsi_device_resume(sdev); 2457} 2458 2459void 2460scsi_target_resume(struct scsi_target *starget) 2461{ 2462 starget_for_each_device(starget, NULL, device_resume_fn); 2463} 2464EXPORT_SYMBOL(scsi_target_resume); 2465 2466/** 2467 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2468 * @sdev: device to block 2469 * 2470 * Block request made by scsi lld's to temporarily stop all 2471 * scsi commands on the specified device. Called from interrupt 2472 * or normal process context. 2473 * 2474 * Returns zero if successful or error if not 2475 * 2476 * Notes: 2477 * This routine transitions the device to the SDEV_BLOCK state 2478 * (which must be a legal transition). When the device is in this 2479 * state, all commands are deferred until the scsi lld reenables 2480 * the device with scsi_device_unblock or device_block_tmo fires. 2481 * This routine assumes the host_lock is held on entry. 2482 */ 2483int 2484scsi_internal_device_block(struct scsi_device *sdev) 2485{ 2486 struct request_queue *q = sdev->request_queue; 2487 unsigned long flags; 2488 int err = 0; 2489 2490 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2491 if (err) { 2492 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); 2493 2494 if (err) 2495 return err; 2496 } 2497 2498 /* 2499 * The device has transitioned to SDEV_BLOCK. Stop the 2500 * block layer from calling the midlayer with this device's 2501 * request queue. 2502 */ 2503 spin_lock_irqsave(q->queue_lock, flags); 2504 blk_stop_queue(q); 2505 spin_unlock_irqrestore(q->queue_lock, flags); 2506 2507 return 0; 2508} 2509EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2510 2511/** 2512 * scsi_internal_device_unblock - resume a device after a block request 2513 * @sdev: device to resume 2514 * 2515 * Called by scsi lld's or the midlayer to restart the device queue 2516 * for the previously suspended scsi device. Called from interrupt or 2517 * normal process context. 2518 * 2519 * Returns zero if successful or error if not. 2520 * 2521 * Notes: 2522 * This routine transitions the device to the SDEV_RUNNING state 2523 * (which must be a legal transition) allowing the midlayer to 2524 * goose the queue for this device. This routine assumes the 2525 * host_lock is held upon entry. 2526 */ 2527int 2528scsi_internal_device_unblock(struct scsi_device *sdev) 2529{ 2530 struct request_queue *q = sdev->request_queue; 2531 int err; 2532 unsigned long flags; 2533 2534 /* 2535 * Try to transition the scsi device to SDEV_RUNNING 2536 * and goose the device queue if successful. 2537 */ 2538 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2539 if (err) { 2540 err = scsi_device_set_state(sdev, SDEV_CREATED); 2541 2542 if (err) 2543 return err; 2544 } 2545 2546 spin_lock_irqsave(q->queue_lock, flags); 2547 blk_start_queue(q); 2548 spin_unlock_irqrestore(q->queue_lock, flags); 2549 2550 return 0; 2551} 2552EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2553 2554static void 2555device_block(struct scsi_device *sdev, void *data) 2556{ 2557 scsi_internal_device_block(sdev); 2558} 2559 2560static int 2561target_block(struct device *dev, void *data) 2562{ 2563 if (scsi_is_target_device(dev)) 2564 starget_for_each_device(to_scsi_target(dev), NULL, 2565 device_block); 2566 return 0; 2567} 2568 2569void 2570scsi_target_block(struct device *dev) 2571{ 2572 if (scsi_is_target_device(dev)) 2573 starget_for_each_device(to_scsi_target(dev), NULL, 2574 device_block); 2575 else 2576 device_for_each_child(dev, NULL, target_block); 2577} 2578EXPORT_SYMBOL_GPL(scsi_target_block); 2579 2580static void 2581device_unblock(struct scsi_device *sdev, void *data) 2582{ 2583 scsi_internal_device_unblock(sdev); 2584} 2585 2586static int 2587target_unblock(struct device *dev, void *data) 2588{ 2589 if (scsi_is_target_device(dev)) 2590 starget_for_each_device(to_scsi_target(dev), NULL, 2591 device_unblock); 2592 return 0; 2593} 2594 2595void 2596scsi_target_unblock(struct device *dev) 2597{ 2598 if (scsi_is_target_device(dev)) 2599 starget_for_each_device(to_scsi_target(dev), NULL, 2600 device_unblock); 2601 else 2602 device_for_each_child(dev, NULL, target_unblock); 2603} 2604EXPORT_SYMBOL_GPL(scsi_target_unblock); 2605 2606/** 2607 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2608 * @sgl: scatter-gather list 2609 * @sg_count: number of segments in sg 2610 * @offset: offset in bytes into sg, on return offset into the mapped area 2611 * @len: bytes to map, on return number of bytes mapped 2612 * 2613 * Returns virtual address of the start of the mapped page 2614 */ 2615void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 2616 size_t *offset, size_t *len) 2617{ 2618 int i; 2619 size_t sg_len = 0, len_complete = 0; 2620 struct scatterlist *sg; 2621 struct page *page; 2622 2623 WARN_ON(!irqs_disabled()); 2624 2625 for_each_sg(sgl, sg, sg_count, i) { 2626 len_complete = sg_len; /* Complete sg-entries */ 2627 sg_len += sg->length; 2628 if (sg_len > *offset) 2629 break; 2630 } 2631 2632 if (unlikely(i == sg_count)) { 2633 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2634 "elements %d\n", 2635 __func__, sg_len, *offset, sg_count); 2636 WARN_ON(1); 2637 return NULL; 2638 } 2639 2640 /* Offset starting from the beginning of first page in this sg-entry */ 2641 *offset = *offset - len_complete + sg->offset; 2642 2643 /* Assumption: contiguous pages can be accessed as "page + i" */ 2644 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 2645 *offset &= ~PAGE_MASK; 2646 2647 /* Bytes in this sg-entry from *offset to the end of the page */ 2648 sg_len = PAGE_SIZE - *offset; 2649 if (*len > sg_len) 2650 *len = sg_len; 2651 2652 return kmap_atomic(page, KM_BIO_SRC_IRQ); 2653} 2654EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2655 2656/** 2657 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 2658 * @virt: virtual address to be unmapped 2659 */ 2660void scsi_kunmap_atomic_sg(void *virt) 2661{ 2662 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 2663} 2664EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2665